Page MenuHomec4science

main.aux
No OneTemporary

File Metadata

Created
Fri, May 17, 15:12

main.aux

\relax
\providecommand\hyper@newdestlabel[2]{}
\providecommand\HyperFirstAtBeginDocument{\AtBeginDocument}
\HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined
\global\let\oldcontentsline\contentsline
\gdef\contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}}
\global\let\oldnewlabel\newlabel
\gdef\newlabel#1#2{\newlabelxx{#1}#2}
\gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}}
\AtEndDocument{\ifx\hyper@anchor\@undefined
\let\contentsline\oldcontentsline
\let\newlabel\oldnewlabel
\fi}
\fi}
\global\let\hyper@last\relax
\gdef\HyperFirstAtBeginDocument#1{#1}
\providecommand\HyField@AuxAddToFields[1]{}
\providecommand\HyField@AuxAddToCoFields[2]{}
\citation{khot2011grothendieck,lovasz2003semidefinite,zhao1998semidefinite}
\citation{mossel2015consistency,song2007dependence}
\citation{singer2011angular,singer2011three}
\citation{raghavendra2008optimal}
\@writefile{toc}{\contentsline {section}{\numberline {1}Introduction}{1}{section.1}\protected@file@percent }
\newlabel{intro}{{1}{1}{Introduction}{section.1}{}}
\newlabel{prob:01}{{1}{1}{Introduction}{equation.1.1}{}}
\@writefile{toc}{\contentsline {paragraph}{Vignette: Burer-Monteiro splitting.}{1}{section*.1}\protected@file@percent }
\newlabel{eq:sdp}{{2}{1}{Vignette: Burer-Monteiro splitting}{equation.1.2}{}}
\citation{yurtsever2018conditional}
\citation{burer2003nonlinear,burer2005local}
\citation{pataki1998rank,barvinok1995problems}
\citation{waldspurger2018rank}
\citation{burer2005local}
\citation{boumal2016non}
\citation{burer2003nonlinear,burer2005local,kulis2007fast}
\citation{parikh2014proximal}
\citation{nocedal2006numerical}
\citation{nedelcu2014computational,lan2016iteration,xu2017inexact}
\newlabel{prob:nc}{{3}{2}{Vignette: Burer-Monteiro splitting}{equation.1.3}{}}
\newlabel{sec:preliminaries}{{2}{2}{Preliminaries}{section.2}{}}
\@writefile{toc}{\contentsline {section}{\numberline {2}Preliminaries }{2}{section.2}\protected@file@percent }
\@writefile{toc}{\contentsline {paragraph}{\textbf {{Notation.}}}{2}{section*.2}\protected@file@percent }
\citation{hestenes1969multiplier,powell1969method}
\citation{bertsekas1982constrained,birgin2014practical}
\newlabel{eq:indicator}{{4}{3}{\textbf {{Notation.}}}{equation.2.4}{}}
\newlabel{eq:smoothness basic}{{5}{3}{\textbf {{Notation.}}}{equation.2.5}{}}
\newlabel{eq:minmax}{{6}{3}{\textbf {{Notation.}}}{equation.2.6}{}}
\newlabel{eq:Lagrangian}{{7}{3}{\textbf {{Notation.}}}{equation.2.7}{}}
\newlabel{e:exac}{{8}{3}{\textbf {{Notation.}}}{equation.2.8}{}}
\newlabel{sec:opt cnds}{{2}{3}{\textbf {{Notation.}}}{equation.2.8}{}}
\newlabel{e:inclu1}{{9}{3}{\textbf {{Notation.}}}{equation.2.9}{}}
\newlabel{e:inclu2}{{10}{3}{\textbf {{Notation.}}}{equation.2.10}{}}
\newlabel{eq:inclu3}{{11}{3}{\textbf {{Notation.}}}{equation.2.11}{}}
\newlabel{eq:cvg metric}{{12}{3}{\textbf {{Notation.}}}{equation.2.12}{}}
\citation{bertsekas1976penalty}
\newlabel{eq:dist_subgrad}{{13}{4}{Vignette: Burer-Monteiro splitting}{equation.2.13}{}}
\newlabel{eq:sec_opt}{{15}{4}{Vignette: Burer-Monteiro splitting}{equation.2.15}{}}
\@writefile{toc}{\contentsline {paragraph}{{\textbf {Smoothness lemma.}}}{4}{section*.3}\protected@file@percent }
\newlabel{lem:smoothness}{{2.1}{4}{\textbf {smoothness}}{theorem.2.1}{}}
\newlabel{eq:smoothness of Lagrangian}{{17}{4}{\textbf {smoothness}}{equation.2.17}{}}
\newlabel{sec:AL algorithm}{{3}{4}{Algorithm}{section.3}{}}
\@writefile{toc}{\contentsline {section}{\numberline {3}Algorithm }{4}{section.3}\protected@file@percent }
\newlabel{sec:cvg rate}{{4}{4}{Convergence Rate}{section.4}{}}
\@writefile{toc}{\contentsline {section}{\numberline {4}Convergence Rate }{4}{section.4}\protected@file@percent }
\@writefile{loa}{\contentsline {algorithm}{\numberline {1}{\ignorespaces Inexact ALM for solving\nobreakspace {}\textup {\hbox {\mathsurround \z@ \normalfont (\ignorespaces \ref {prob:01}\unskip \@@italiccorr )}}}}{5}{algorithm.1}\protected@file@percent }
\newlabel{Algo:2}{{1}{5}{Algorithm}{algorithm.1}{}}
\newlabel{thm:main}{{4.1}{5}{Convergence Rate}{theorem.4.1}{}}
\newlabel{eq:defn_restricted_lipsichtz}{{19}{5}{Convergence Rate}{equation.4.19}{}}
\newlabel{eq:regularity}{{20}{5}{Convergence Rate}{equation.4.20}{}}
\newlabel{eq:stat_prec_first}{{21}{5}{Convergence Rate}{equation.4.21}{}}
\citation{karimi2016linear}
\citation{xu2017globally}
\citation{bolte2018nonconvex}
\citation{bolte2018nonconvex}
\citation{flores2012complete}
\citation{ghadimi2016accelerated}
\citation{cartis2012complexity}
\citation{ghadimi2016accelerated}
\citation{nesterov1983method}
\citation{ghadimi2016accelerated}
\@writefile{toc}{\contentsline {paragraph}{Penalty method.}{6}{section*.4}\protected@file@percent }
\newlabel{sec:first-o-opt}{{4.1}{6}{First-Order Optimality}{subsection.4.1}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.1}First-Order Optimality }{6}{subsection.4.1}\protected@file@percent }
\citation{ghadimi2016accelerated}
\citation{ghadimi2016accelerated}
\citation{cartis2012complexity}
\citation{nouiehed2018convergence}
\citation{cartis2012complexity}
\citation{cartis2012complexity}
\citation{cartis2012complexity}
\citation{cartis2012complexity}
\newlabel{eq:iter_1storder}{{24}{7}{First-Order Optimality}{equation.4.24}{}}
\newlabel{cor:first}{{4.2}{7}{First-Order Optimality}{theorem.4.2}{}}
\newlabel{sec:second-o-opt}{{4.2}{7}{Second-Order Optimality}{subsection.4.2}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2}Second-Order Optimality }{7}{subsection.4.2}\protected@file@percent }
\newlabel{eq:sec_inn_comp}{{26}{7}{Second-Order Optimality}{equation.4.26}{}}
\newlabel{cor:second}{{4.3}{7}{Second-Order Optimality}{theorem.4.3}{}}
\citation{hestenes1969multiplier,powell1969method}
\citation{lan2016iteration,nedelcu2014computational,tran2018adaptive,xu2017inexact}
\citation{bertsekas2014constrained}
\citation{fernandez2012local}
\citation{birgin2016evaluation}
\citation{cartis2018optimality}
\citation{birgin2016evaluation,cartis2018optimality}
\citation{cartis2011evaluation}
\citation{bolte2018nonconvex}
\citation{bolte2017error}
\citation{clason2018acceleration}
\citation{chambolle2011first}
\citation{burer2003nonlinear,burer2005local}
\citation{bhojanapalli2016dropping,park2016provable}
\citation{bhojanapalli2018smoothed}
\citation{boumal2014manopt,boumal2016global,boumal2016non}
\newlabel{sec:related work}{{5}{8}{Related Work}{section.5}{}}
\@writefile{toc}{\contentsline {section}{\numberline {5}Related Work }{8}{section.5}\protected@file@percent }
\newlabel{eq:pen_sdp}{{29}{8}{Related Work}{equation.5.29}{}}
\newlabel{eq:pen_nc}{{30}{8}{Related Work}{equation.5.30}{}}
\citation{boumal2016global}
\citation{nesterov2009primal,yurtsever2015universal,yurtsever2018conditional}
\citation{jaggi2013revisiting}
\citation{dai2002convergence,mascarenhas2004bfgs}
\citation{cartis2012complexity}
\citation{Peng2007}
\citation{kulis2007fast}
\citation{fletcher2013practical}
\newlabel{sec:experiments}{{6}{9}{Numerical Evidence}{section.6}{}}
\@writefile{toc}{\contentsline {section}{\numberline {6}Numerical Evidence }{9}{section.6}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {6.1}Clustering}{9}{subsection.6.1}\protected@file@percent }
\newlabel{eq:sdp_svx}{{31}{9}{Clustering}{equation.6.31}{}}
\newlabel{eq:nc_cluster}{{32}{9}{Clustering}{equation.6.32}{}}
\citation{yurtsever2018conditional}
\citation{yang2015sdpnal}
\citation{mixon2016clustering}
\citation{xiao2017/online}
\citation{kulis2007fast}
\citation{tepper2018clustering}
\citation{kulis2007fast}
\citation{tepper2018clustering}
\citation{chen2001atomic,candes2008introduction,arora2018compressed}
\citation{tran2018adaptive,chambolle2011first}
\citation{beck2009fast}
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Convergence of different algorithms for clustering with fashion-MNIST dataset. Here, we set the rank as $r=20$ for the nonconvex approaches. The solution rank for the template\nobreakspace {}\textup {\hbox {\mathsurround \z@ \normalfont (\ignorespaces \ref {eq:sdp_svx}\unskip \@@italiccorr )}} is the number of clusters $s$ \cite [Theorem 1]{kulis2007fast}. However, as discussed in\nobreakspace {}\cite {tepper2018clustering}, setting rank $r>s$ leads more accurate reconstruction at the expense of speed, hence our choice of $r=20$. }}{10}{figure.1}\protected@file@percent }
\newlabel{fig:clustering}{{1}{10}{Convergence of different algorithms for clustering with fashion-MNIST dataset. Here, we set the rank as $r=20$ for the nonconvex approaches. The solution rank for the template~\eqref {eq:sdp_svx} is the number of clusters $s$ \cite [Theorem 1]{kulis2007fast}. However, as discussed in~\cite {tepper2018clustering}, setting rank $r>s$ leads more accurate reconstruction at the expense of speed, hence our choice of $r=20$}{figure.1}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {6.2}Basis Pursuit}{10}{subsection.6.2}\protected@file@percent }
\newlabel{eq:bp_main}{{34}{10}{Basis Pursuit}{equation.6.34}{}}
\newlabel{eq:bp-equiv}{{35}{10}{Basis Pursuit}{equation.6.35}{}}
\citation{obozinski2011group}
\bibstyle{abbrv}
\bibdata{bibliography.bib}
\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Convergence with different subsolvers for the aforementioned nonconvex relaxation. }}{11}{figure.2}\protected@file@percent }
\newlabel{fig:bp1}{{2}{11}{Convergence with different subsolvers for the aforementioned nonconvex relaxation}{figure.2}{}}
\@writefile{toc}{\contentsline {paragraph}{Discussion:}{11}{section*.5}\protected@file@percent }
\newlabel{sec:theory}{{A}{12}{Proof of Theorem \ref {thm:main}}{appendix.A}{}}
\@writefile{toc}{\contentsline {section}{\numberline {A}Proof of Theorem \ref {thm:main} }{12}{appendix.A}\protected@file@percent }
\newlabel{eq:before_restriction}{{38}{12}{Proof of Theorem \ref {thm:main}}{equation.A.38}{}}
\newlabel{eq:restrited_pre}{{39}{12}{Proof of Theorem \ref {thm:main}}{equation.A.39}{}}
\newlabel{eq:before_dual_controlled}{{40}{12}{Proof of Theorem \ref {thm:main}}{equation.A.40}{}}
\newlabel{eq:dual growth}{{41}{12}{Proof of Theorem \ref {thm:main}}{equation.A.41}{}}
\newlabel{eq:cvg metric part 2}{{43}{12}{Proof of Theorem \ref {thm:main}}{equation.A.43}{}}
\newlabel{eq:cvg metric part 1 brk down}{{44}{13}{Proof of Theorem \ref {thm:main}}{equation.A.44}{}}
\newlabel{eq:part_1_2}{{45}{13}{Proof of Theorem \ref {thm:main}}{equation.A.45}{}}
\newlabel{eq:cvg metric part 1}{{46}{13}{Proof of Theorem \ref {thm:main}}{equation.A.46}{}}
\newlabel{eq:sec}{{49}{13}{Proof of Theorem \ref {thm:main}}{equation.A.49}{}}
\@writefile{toc}{\contentsline {section}{\numberline {B}Proof of Corollary\nobreakspace {}\ref {cor:first}}{14}{appendix.B}\protected@file@percent }
\newlabel{eq:acc_to_b}{{50}{14}{Proof of Corollary~\ref {cor:first}}{equation.B.50}{}}
\newlabel{eq: tk_bound}{{51}{14}{Proof of Corollary~\ref {cor:first}}{equation.B.51}{}}
\newlabel{sec:proof of smoothness lemma}{{C}{14}{Proof of Lemma \ref {lem:smoothness}}{appendix.C}{}}
\@writefile{toc}{\contentsline {section}{\numberline {C}Proof of Lemma \ref {lem:smoothness}}{14}{appendix.C}\protected@file@percent }
\newlabel{sec:verification2}{{D}{15}{Clustering}{appendix.D}{}}
\@writefile{toc}{\contentsline {section}{\numberline {D}Clustering }{15}{appendix.D}\protected@file@percent }
\newlabel{eq:Jacobian clustering}{{59}{15}{Clustering}{equation.D.59}{}}
\newlabel{eq:exp-subgrad-cluster}{{61}{15}{Clustering}{equation.D.61}{}}
\citation{obozinski2011group}
\newlabel{sec:verification1}{{E}{16}{Basis Pursuit}{appendix.E}{}}
\@writefile{toc}{\contentsline {section}{\numberline {E}Basis Pursuit }{16}{appendix.E}\protected@file@percent }
\newlabel{eq:jacob-bp}{{63}{16}{Basis Pursuit}{equation.E.63}{}}
\newlabel{eq:cnd-bp-pre}{{64}{16}{Basis Pursuit}{equation.E.64}{}}
\@writefile{toc}{\contentsline {paragraph}{Discussion}{16}{section*.7}\protected@file@percent }
\citation{Ilyas2017}
\citation{Goodfellow2014}
\citation{ge2016efficient}
\citation{boumal2016non}
\citation{boumal2016global}
\citation{ge2016efficient}
\citation{manopt}
\@writefile{toc}{\contentsline {subsection}{\numberline {E.1}$\ell _\infty $ Denoising with a Generative Prior}{17}{subsection.E.1}\protected@file@percent }
\newlabel{fig:comparison_fab}{{E.1}{17}{$\ell _\infty $ Denoising with a Generative Prior}{equation.E.67}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Augmented Lagrangian vs Adam for $\ell _\infty $ denoising (left). $\ell _2$ vs $\ell _\infty $ denoising as defense against adversarial examples}}{17}{figure.3}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {E.2}Generalized Eigenvalue Problem}{17}{subsection.E.2}\protected@file@percent }
\newlabel{eq:eig}{{68}{17}{Generalized Eigenvalue Problem}{equation.E.68}{}}
\newlabel{eq:eig-sdp}{{69}{17}{Generalized Eigenvalue Problem}{equation.E.69}{}}
\newlabel{eq:eig-equiv}{{70}{17}{Generalized Eigenvalue Problem}{equation.E.70}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces {\it {(Top)}} Objective convergence for calculating top generalized eigenvalue and eigenvector of $B$ and $C$. {\it {(Bottom)}} Eigenvalue structure of the matrices. For (i),(ii) and (iii), $C$ is positive semidefinite; for (iv), (v) and (vi), $C$ contains negative eigenvalues. {[(i): Generated by taking symmetric part of iid Gaussian matrix. (ii): Generated by randomly rotating diag($1^{-p}, 2^{-p}, \cdots , 1000^{-p}$)($p=1$). (iii): Generated by randomly rotating diag($10^{-p}, 10^{-2p}, \cdots , 10^{-1000p}$)($p=0.0025$).]} }}{18}{figure.4}\protected@file@percent }
\newlabel{fig:geig1}{{4}{18}{{\it {(Top)}} Objective convergence for calculating top generalized eigenvalue and eigenvector of $B$ and $C$. {\it {(Bottom)}} Eigenvalue structure of the matrices. For (i),(ii) and (iii), $C$ is positive semidefinite; for (iv), (v) and (vi), $C$ contains negative eigenvalues. {[(i): Generated by taking symmetric part of iid Gaussian matrix. (ii): Generated by randomly rotating diag($1^{-p}, 2^{-p}, \cdots , 1000^{-p}$)($p=1$). (iii): Generated by randomly rotating diag($10^{-p}, 10^{-2p}, \cdots , 10^{-1000p}$)($p=0.0025$).]}}{figure.4}{}}

Event Timeline