Page MenuHomec4science

main.aux
No OneTemporary

File Metadata

Created
Tue, Jul 30, 01:19

main.aux

\relax
\providecommand\hyper@newdestlabel[2]{}
\providecommand\HyperFirstAtBeginDocument{\AtBeginDocument}
\HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined
\global\let\oldcontentsline\contentsline
\gdef\contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}}
\global\let\oldnewlabel\newlabel
\gdef\newlabel#1#2{\newlabelxx{#1}#2}
\gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}}
\AtEndDocument{\ifx\hyper@anchor\@undefined
\let\contentsline\oldcontentsline
\let\newlabel\oldnewlabel
\fi}
\fi}
\global\let\hyper@last\relax
\gdef\HyperFirstAtBeginDocument#1{#1}
\providecommand\HyField@AuxAddToFields[1]{}
\providecommand\HyField@AuxAddToCoFields[2]{}
\citation{parikh2014proximal}
\citation{khot2011grothendieck,lovasz2003semidefinite,zhao1998semidefinite}
\citation{mossel2015consistency,song2007dependence}
\citation{singer2011angular,singer2011three}
\citation{zhao1998semidefinite}
\citation{raghavendra2008optimal}
\@writefile{toc}{\contentsline {section}{\numberline {1}Introduction}{1}{section.1}}
\newlabel{intro}{{1}{1}{Introduction}{section.1}{}}
\newlabel{prob:01}{{1}{1}{Introduction}{equation.1.1}{}}
\@writefile{toc}{\contentsline {paragraph}{Vignette: Burer-Monteiro splitting.}{1}{section*.1}}
\newlabel{eq:sdp}{{2}{1}{Vignette: Burer-Monteiro splitting}{equation.1.2}{}}
\citation{yurtsever2018conditional}
\citation{burer2003nonlinear,burer2005local}
\citation{pataki1998rank,barvinok1995problems}
\citation{waldspurger2018rank}
\citation{burer2005local}
\citation{boumal2016non}
\citation{burer2003nonlinear,burer2005local,kulis2007fast}
\citation{parikh2014proximal}
\citation{nocedal2006numerical}
\citation{nedelcu2014computational,lan2016iteration,xu2017inexact}
\citation{karimi2016linear}
\citation{bertsekas1982constrained}
\newlabel{prob:nc}{{3}{2}{Vignette: Burer-Monteiro splitting}{equation.1.3}{}}
\newlabel{sec:preliminaries}{{2}{2}{Preliminaries}{section.2}{}}
\@writefile{toc}{\contentsline {section}{\numberline {2}Preliminaries }{2}{section.2}}
\@writefile{toc}{\contentsline {paragraph}{\textbf {{Notation.}}}{2}{section*.2}}
\citation{hestenes1969multiplier,powell1969method}
\citation{bertsekas1982constrained,birgin2014practical}
\newlabel{eq:indicator}{{4}{3}{\textbf {{Notation.}}}{equation.2.4}{}}
\newlabel{eq:smoothness basic}{{5}{3}{\textbf {{Notation.}}}{equation.2.5}{}}
\newlabel{eq:minmax}{{6}{3}{\textbf {{Notation.}}}{equation.2.6}{}}
\newlabel{eq:Lagrangian}{{7}{3}{\textbf {{Notation.}}}{equation.2.7}{}}
\newlabel{e:exac}{{8}{3}{\textbf {{Notation.}}}{equation.2.8}{}}
\newlabel{sec:opt cnds}{{2}{3}{\textbf {{Notation.}}}{equation.2.8}{}}
\newlabel{e:inclu1}{{9}{3}{\textbf {{Notation.}}}{equation.2.9}{}}
\newlabel{e:inclu2}{{10}{3}{\textbf {{Notation.}}}{equation.2.10}{}}
\newlabel{eq:inclu3}{{11}{3}{\textbf {{Notation.}}}{equation.2.11}{}}
\newlabel{eq:cvg metric}{{12}{3}{\textbf {{Notation.}}}{equation.2.12}{}}
\citation{bertsekas1976penalty}
\newlabel{eq:dist_subgrad}{{13}{4}{Vignette: Burer-Monteiro splitting}{equation.2.13}{}}
\newlabel{eq:sec_opt}{{15}{4}{Vignette: Burer-Monteiro splitting}{equation.2.15}{}}
\@writefile{toc}{\contentsline {paragraph}{{\textbf {Smoothness lemma.}}}{4}{section*.3}}
\newlabel{lem:smoothness}{{2.1}{4}{\textbf {smoothness}}{theorem.2.1}{}}
\newlabel{eq:smoothness of Lagrangian}{{17}{4}{\textbf {smoothness}}{equation.2.17}{}}
\newlabel{sec:AL algorithm}{{3}{4}{Algorithm}{section.3}{}}
\@writefile{toc}{\contentsline {section}{\numberline {3}Algorithm }{4}{section.3}}
\newlabel{sec:cvg rate}{{4}{4}{Convergence Rate}{section.4}{}}
\@writefile{toc}{\contentsline {section}{\numberline {4}Convergence Rate }{4}{section.4}}
\@writefile{loa}{\contentsline {algorithm}{\numberline {1}{\ignorespaces Inexact ALM for solving\nobreakspace {}\textup {\hbox {\mathsurround \z@ \normalfont (\ignorespaces \ref {prob:01}\unskip \@@italiccorr )}}}}{5}{algorithm.1}}
\newlabel{Algo:2}{{1}{5}{Algorithm}{algorithm.1}{}}
\newlabel{thm:main}{{4.1}{5}{Convergence Rate}{theorem.4.1}{}}
\newlabel{eq:defn_restricted_lipsichtz}{{19}{5}{Convergence Rate}{equation.4.19}{}}
\newlabel{eq:regularity}{{20}{5}{Convergence Rate}{equation.4.20}{}}
\newlabel{eq:stat_prec_first}{{21}{5}{Convergence Rate}{equation.4.21}{}}
\citation{karimi2016linear}
\citation{xu2017globally}
\citation{bolte2018nonconvex}
\citation{rockafellar1993lagrange}
\citation{bolte2018nonconvex}
\citation{flores2012complete}
\citation{ghadimi2016accelerated}
\citation{cartis2012complexity}
\citation{ghadimi2016accelerated}
\citation{nesterov1983method}
\@writefile{toc}{\contentsline {paragraph}{Penalty method.}{6}{section*.4}}
\newlabel{sec:first-o-opt}{{4.1}{6}{First-Order Optimality}{subsection.4.1}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.1}First-Order Optimality }{6}{subsection.4.1}}
\citation{ghadimi2016accelerated}
\citation{ghadimi2016accelerated}
\citation{ghadimi2016accelerated}
\citation{cartis2012complexity}
\citation{nouiehed2018convergence}
\citation{cartis2012complexity}
\citation{cartis2012complexity}
\citation{cartis2012complexity}
\citation{cartis2012complexity}
\newlabel{eq:iter_1storder}{{24}{7}{First-Order Optimality}{equation.4.24}{}}
\newlabel{cor:first}{{4.2}{7}{First-Order Optimality}{theorem.4.2}{}}
\newlabel{sec:second-o-opt}{{4.2}{7}{Second-Order Optimality}{subsection.4.2}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2}Second-Order Optimality }{7}{subsection.4.2}}
\newlabel{eq:sec_inn_comp}{{26}{7}{Second-Order Optimality}{equation.4.26}{}}
\newlabel{cor:second}{{4.3}{7}{Second-Order Optimality}{theorem.4.3}{}}
\citation{hestenes1969multiplier,powell1969method}
\citation{lan2016iteration,nedelcu2014computational,tran2018adaptive,xu2017inexact}
\citation{bertsekas2014constrained}
\citation{fernandez2012local}
\citation{birgin2016evaluation}
\citation{cartis2018optimality}
\citation{birgin2016evaluation,cartis2018optimality}
\citation{cartis2011evaluation}
\citation{bolte2018nonconvex}
\citation{bolte2017error}
\citation{clason2018acceleration}
\citation{chambolle2011first}
\citation{burer2003nonlinear,burer2005local}
\citation{bhojanapalli2016dropping,park2016provable}
\citation{bhojanapalli2018smoothed}
\newlabel{sec:related work}{{5}{8}{Related Work}{section.5}{}}
\@writefile{toc}{\contentsline {section}{\numberline {5}Related Work }{8}{section.5}}
\newlabel{eq:pen_sdp}{{29}{8}{Related Work}{equation.5.29}{}}
\newlabel{eq:pen_nc}{{30}{8}{Related Work}{equation.5.30}{}}
\citation{boumal2014manopt,boumal2016global,boumal2016non}
\citation{boumal2016global}
\citation{nesterov2009primal,yurtsever2015universal,yurtsever2018conditional}
\citation{jaggi2013revisiting}
\citation{dai2002convergence,mascarenhas2004bfgs}
\citation{cartis2012complexity}
\citation{Peng2007}
\citation{kulis2007fast}
\newlabel{sec:experiments}{{6}{9}{Numerical Evidence}{section.6}{}}
\@writefile{toc}{\contentsline {section}{\numberline {6}Numerical Evidence }{9}{section.6}}
\@writefile{toc}{\contentsline {subsection}{\numberline {6.1}Clustering}{9}{subsection.6.1}}
\newlabel{eq:sdp_svx}{{31}{9}{Clustering}{equation.6.31}{}}
\newlabel{eq:nc_cluster}{{32}{9}{Clustering}{equation.6.32}{}}
\citation{fletcher2013practical}
\citation{yurtsever2018conditional}
\citation{yang2015sdpnal}
\citation{mixon2016clustering}
\citation{xiao2017/online}
\citation{kulis2007fast}
\citation{tepper2018clustering}
\citation{kulis2007fast}
\citation{tepper2018clustering}
\citation{chen2001atomic,candes2008introduction,arora2018compressed}
\citation{tran2018adaptive,chambolle2011first}
\citation{chambolle2011first}
\citation{tran2018smooth}
\citation{tran2018adaptive}
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Convergence of different algorithms for k-Means clustering with fashion MNIST dataset. The solution rank for the template\nobreakspace {}\textup {\hbox {\mathsurround \z@ \normalfont (\ignorespaces \ref {eq:sdp_svx}\unskip \@@italiccorr )}} is known and it is equal to number of clusters $k$ (Theorem\nobreakspace {}1. \cite {kulis2007fast}). As discussed in\nobreakspace {}\cite {tepper2018clustering}, setting rank $r>k$ leads more accurate reconstruction in expense of speed. Therefore, we set the rank to 20.}}{10}{figure.1}}
\newlabel{fig:clustering}{{1}{10}{Convergence of different algorithms for k-Means clustering with fashion MNIST dataset. The solution rank for the template~\eqref {eq:sdp_svx} is known and it is equal to number of clusters $k$ (Theorem~1. \cite {kulis2007fast}). As discussed in~\cite {tepper2018clustering}, setting rank $r>k$ leads more accurate reconstruction in expense of speed. Therefore, we set the rank to 20}{figure.1}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {6.2}Basis Pursuit}{10}{subsection.6.2}}
\newlabel{eq:bp_main}{{34}{10}{Basis Pursuit}{equation.6.34}{}}
\citation{obozinski2011group}
\bibstyle{abbrv}
\bibdata{bibliography.bib}
\bibcite{arora2018compressed}{{1}{}{{}}{{}}}
\bibcite{barvinok1995problems}{{2}{}{{}}{{}}}
\bibcite{bertsekas1976penalty}{{3}{}{{}}{{}}}
\bibcite{bertsekas1982constrained}{{4}{}{{}}{{}}}
\newlabel{eq:bp-equiv}{{35}{11}{Basis Pursuit}{equation.6.35}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Convergence with different subsolvers for the aforementioned nonconvex relaxation. }}{11}{figure.2}}
\newlabel{fig:bp1}{{2}{11}{Convergence with different subsolvers for the aforementioned nonconvex relaxation}{figure.2}{}}
\@writefile{toc}{\contentsline {paragraph}{Discussion:}{11}{section*.5}}
\bibcite{bertsekas2014constrained}{{5}{}{{}}{{}}}
\bibcite{bhojanapalli2018smoothed}{{6}{}{{}}{{}}}
\bibcite{bhojanapalli2016dropping}{{7}{}{{}}{{}}}
\bibcite{birgin2016evaluation}{{8}{}{{}}{{}}}
\bibcite{birgin2014practical}{{9}{}{{}}{{}}}
\bibcite{bolte2017error}{{10}{}{{}}{{}}}
\bibcite{bolte2018nonconvex}{{11}{}{{}}{{}}}
\bibcite{boumal2016global}{{12}{}{{}}{{}}}
\bibcite{boumal2014manopt}{{13}{}{{}}{{}}}
\bibcite{boumal2016non}{{14}{}{{}}{{}}}
\bibcite{burer2003nonlinear}{{15}{}{{}}{{}}}
\bibcite{burer2005local}{{16}{}{{}}{{}}}
\bibcite{candes2008introduction}{{17}{}{{}}{{}}}
\bibcite{cartis2011evaluation}{{18}{}{{}}{{}}}
\bibcite{cartis2012complexity}{{19}{}{{}}{{}}}
\bibcite{cartis2018optimality}{{20}{}{{}}{{}}}
\bibcite{chambolle2011first}{{21}{}{{}}{{}}}
\bibcite{chen2001atomic}{{22}{}{{}}{{}}}
\bibcite{clason2018acceleration}{{23}{}{{}}{{}}}
\bibcite{dai2002convergence}{{24}{}{{}}{{}}}
\bibcite{fernandez2012local}{{25}{}{{}}{{}}}
\bibcite{fletcher2013practical}{{26}{}{{}}{{}}}
\bibcite{flores2012complete}{{27}{}{{}}{{}}}
\bibcite{ge2016efficient}{{28}{}{{}}{{}}}
\bibcite{ghadimi2016accelerated}{{29}{}{{}}{{}}}
\bibcite{Goodfellow2014}{{30}{}{{}}{{}}}
\bibcite{hestenes1969multiplier}{{31}{}{{}}{{}}}
\bibcite{Ilyas2017}{{32}{}{{}}{{}}}
\bibcite{jaggi2013revisiting}{{33}{}{{}}{{}}}
\bibcite{karimi2016linear}{{34}{}{{}}{{}}}
\bibcite{khot2011grothendieck}{{35}{}{{}}{{}}}
\bibcite{kulis2007fast}{{36}{}{{}}{{}}}
\bibcite{lan2016iteration}{{37}{}{{}}{{}}}
\bibcite{lovasz2003semidefinite}{{38}{}{{}}{{}}}
\bibcite{mascarenhas2004bfgs}{{39}{}{{}}{{}}}
\bibcite{mixon2016clustering}{{40}{}{{}}{{}}}
\bibcite{mossel2015consistency}{{41}{}{{}}{{}}}
\bibcite{nedelcu2014computational}{{42}{}{{}}{{}}}
\bibcite{nesterov2009primal}{{43}{}{{}}{{}}}
\bibcite{nesterov1983method}{{44}{}{{}}{{}}}
\bibcite{nocedal2006numerical}{{45}{}{{}}{{}}}
\bibcite{nouiehed2018convergence}{{46}{}{{}}{{}}}
\bibcite{obozinski2011group}{{47}{}{{}}{{}}}
\bibcite{parikh2014proximal}{{48}{}{{}}{{}}}
\bibcite{park2016provable}{{49}{}{{}}{{}}}
\bibcite{pataki1998rank}{{50}{}{{}}{{}}}
\bibcite{Peng2007}{{51}{}{{}}{{}}}
\bibcite{powell1969method}{{52}{}{{}}{{}}}
\bibcite{raghavendra2008optimal}{{53}{}{{}}{{}}}
\bibcite{rockafellar1993lagrange}{{54}{}{{}}{{}}}
\bibcite{singer2011angular}{{55}{}{{}}{{}}}
\bibcite{singer2011three}{{56}{}{{}}{{}}}
\bibcite{song2007dependence}{{57}{}{{}}{{}}}
\bibcite{tepper2018clustering}{{58}{}{{}}{{}}}
\bibcite{tran2018adaptive}{{59}{}{{}}{{}}}
\bibcite{tran2018smooth}{{60}{}{{}}{{}}}
\bibcite{waldspurger2018rank}{{61}{}{{}}{{}}}
\bibcite{xiao2017/online}{{62}{}{{}}{{}}}
\bibcite{xu2017inexact}{{63}{}{{}}{{}}}
\bibcite{xu2017globally}{{64}{}{{}}{{}}}
\bibcite{yang2015sdpnal}{{65}{}{{}}{{}}}
\bibcite{yurtsever2015universal}{{66}{}{{}}{{}}}
\bibcite{yurtsever2018conditional}{{67}{}{{}}{{}}}
\bibcite{zhao1998semidefinite}{{68}{}{{}}{{}}}
\newlabel{sec:theory}{{A}{16}{Proof of Theorem \ref {thm:main}}{appendix.A}{}}
\@writefile{toc}{\contentsline {section}{\numberline {A}Proof of Theorem \ref {thm:main} }{16}{appendix.A}}
\newlabel{eq:before_restriction}{{38}{16}{Proof of Theorem \ref {thm:main}}{equation.A.38}{}}
\newlabel{eq:restrited_pre}{{39}{16}{Proof of Theorem \ref {thm:main}}{equation.A.39}{}}
\newlabel{eq:before_dual_controlled}{{40}{16}{Proof of Theorem \ref {thm:main}}{equation.A.40}{}}
\newlabel{eq:dual growth}{{41}{16}{Proof of Theorem \ref {thm:main}}{equation.A.41}{}}
\newlabel{eq:cvg metric part 2}{{43}{16}{Proof of Theorem \ref {thm:main}}{equation.A.43}{}}
\newlabel{eq:cvg metric part 1 brk down}{{44}{17}{Proof of Theorem \ref {thm:main}}{equation.A.44}{}}
\newlabel{eq:part_1_2}{{45}{17}{Proof of Theorem \ref {thm:main}}{equation.A.45}{}}
\newlabel{eq:cvg metric part 1}{{46}{17}{Proof of Theorem \ref {thm:main}}{equation.A.46}{}}
\newlabel{eq:sec}{{49}{17}{Proof of Theorem \ref {thm:main}}{equation.A.49}{}}
\@writefile{toc}{\contentsline {section}{\numberline {B}Proof of Corollary\nobreakspace {}\ref {cor:first}}{18}{appendix.B}}
\newlabel{eq:acc_to_b}{{50}{18}{Proof of Corollary~\ref {cor:first}}{equation.B.50}{}}
\newlabel{eq: tk_bound}{{51}{18}{Proof of Corollary~\ref {cor:first}}{equation.B.51}{}}
\newlabel{sec:proof of smoothness lemma}{{C}{18}{Proof of Lemma \ref {lem:smoothness}}{appendix.C}{}}
\@writefile{toc}{\contentsline {section}{\numberline {C}Proof of Lemma \ref {lem:smoothness}}{18}{appendix.C}}
\newlabel{sec:verification2}{{D}{19}{Clustering}{appendix.D}{}}
\@writefile{toc}{\contentsline {section}{\numberline {D}Clustering }{19}{appendix.D}}
\newlabel{eq:Jacobian clustering}{{59}{19}{Clustering}{equation.D.59}{}}
\newlabel{eq:exp-subgrad-cluster}{{61}{19}{Clustering}{equation.D.61}{}}
\newlabel{eq:final-cnd-cluster}{{62}{19}{Clustering}{equation.D.62}{}}
\citation{Ilyas2017}
\citation{Goodfellow2014}
\newlabel{sec:verification1}{{E}{20}{Basis Pursuit}{appendix.E}{}}
\@writefile{toc}{\contentsline {section}{\numberline {E}Basis Pursuit }{20}{appendix.E}}
\newlabel{eq:jacob-bp}{{63}{20}{Basis Pursuit}{equation.E.63}{}}
\newlabel{eq:cnd-bp-pre}{{64}{20}{Basis Pursuit}{equation.E.64}{}}
\newlabel{eq:final-bp-cnd}{{66}{20}{Basis Pursuit}{equation.E.66}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {E.1}$\ell _\infty $ Denoising with a Generative Prior}{20}{subsection.E.1}}
\citation{ge2016efficient}
\citation{boumal2016non}
\citation{boumal2016global}
\citation{ge2016efficient}
\citation{manopt}
\newlabel{fig:comparison_fab}{{E.1}{21}{$\ell _\infty $ Denoising with a Generative Prior}{equation.E.67}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Augmented Lagrangian vs Adam for $\ell _\infty $ denoising (left). $\ell _2$ vs $\ell _\infty $ denoising as defense against adversarial examples}}{21}{figure.3}}
\@writefile{toc}{\contentsline {subsection}{\numberline {E.2}Generalized Eigenvalue Problem}{21}{subsection.E.2}}
\newlabel{eq:eig}{{68}{21}{Generalized Eigenvalue Problem}{equation.E.68}{}}
\newlabel{eq:eig-sdp}{{69}{21}{Generalized Eigenvalue Problem}{equation.E.69}{}}
\newlabel{eq:eig-equiv}{{70}{21}{Generalized Eigenvalue Problem}{equation.E.70}{}}
\newlabel{eq:jacobian-gen-eval}{{71}{21}{Generalized Eigenvalue Problem}{equation.E.71}{}}
\providecommand\NAT@force@numbers{}\NAT@force@numbers
\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces {\it {(Top)}} Objective convergence for calculating top generalized eigenvalue and eigenvector of $B$ and $C$. {\it {(Bottom)}} Eigenvalue structure of the matrices. For (i),(ii) and (iii), $C$ is positive semidefinite; for (iv), (v) and (vi), $C$ contains negative eigenvalues. {[(i): Generated by taking symmetric part of iid Gaussian matrix. (ii): Generated by randomly rotating diag($1^{-p}, 2^{-p}, \cdots , 1000^{-p}$)($p=1$). (iii): Generated by randomly rotating diag($10^{-p}, 10^{-2p}, \cdots , 10^{-1000p}$)($p=0.0025$).]} }}{22}{figure.4}}
\newlabel{fig:geig1}{{4}{22}{{\it {(Top)}} Objective convergence for calculating top generalized eigenvalue and eigenvector of $B$ and $C$. {\it {(Bottom)}} Eigenvalue structure of the matrices. For (i),(ii) and (iii), $C$ is positive semidefinite; for (iv), (v) and (vi), $C$ contains negative eigenvalues. {[(i): Generated by taking symmetric part of iid Gaussian matrix. (ii): Generated by randomly rotating diag($1^{-p}, 2^{-p}, \cdots , 1000^{-p}$)($p=1$). (iii): Generated by randomly rotating diag($10^{-p}, 10^{-2p}, \cdots , 10^{-1000p}$)($p=0.0025$).]}}{figure.4}{}}

Event Timeline