Page MenuHomec4science

exo3.tex
No OneTemporary

File Metadata

Created
Sun, Mar 2, 01:02

exo3.tex

\documentclass{article}
% Change "article" to "report" to get rid of page number on title page
\usepackage{amsmath,amsfonts,amsthm,amssymb,url}
\usepackage{setspace}
\usepackage{fancyhdr}
\usepackage{lastpage}
\usepackage{extramarks}
\usepackage{chngpage}
\usepackage{soul}
\usepackage[usenames,dvipsnames]{color}
\usepackage{graphicx,float,wrapfig}
\usepackage{ifthen}
\usepackage{listings}
\usepackage{courier}
\usepackage{mathtools}
\usepackage{gensymb}
\usepackage{subcaption}
% units of mesure
\usepackage{siunitx}
% comments
\usepackage{comment}
% pgfplots environment
%package setup for graphs
\usepackage{tikz}
\usepackage{pgfplots}
\usepackage{pgfplotstable}
\usetikzlibrary{patterns}
\usepgfplotslibrary{external}
\pgfplotsset{compat=newest}
%pgfplot setup
\makeatletter
\pgfplotsset{
/pgfplots/flexible xticklabels from table/.code n args={3}{%
\pgfplotstableread[#3]{#1}\coordinate@table
\pgfplotstablegetcolumn{#2}\of{\coordinate@table}\to\pgfplots@xticklabels
\let\pgfplots@xticklabel=\pgfplots@user@ticklabel@list@x
},
% layer definition
layers/my layer set/.define layer set={
background,
main,
foreground
}{
% you could state styles here which should be moved to
% corresponding layers, but that is not necessary here.
% That is why we don't state anything here
},
% activate the newly created layer set
set layers=my layer set
}
\makeatother
\IfFileExists{./tikzext.perm}{\tikzexternalize[prefix=tikzext/]}{\tikzexternalize}
% definition of the absolute value
\DeclarePairedDelimiter\abs{\lvert}{\rvert}
\usepackage{scalerel,stackengine}
\stackMath
\newcommand\reallywidehat[1]{%
\savestack{\tmpbox}{\stretchto{%
\scaleto{%
\scalerel*[\widthof{\ensuremath{#1}}]{\kern-.6pt\bigwedge\kern-.6pt}%
{\rule[-\textheight/2]{1ex}{\textheight}}%WIDTH-LIMITED BIG WEDGE
}{\textheight}%
}{0.5ex}}%
\stackon[1pt]{#1}{\tmpbox}%
}
% !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
%
% Here put your info (name, due date, title etc).
% the rest should be left unchanged.
%
%
%
% !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
% Homework Specific Information
\newcommand{\hmwkTitle}{Report 2}
\newcommand{\hmwkSubTitle}{Linear systems solving and diagonalization methods}
\newcommand{\hmwkDueDate}{April 30, 2020}
\newcommand{\hmwkClass}{Computational Physics III}
\newcommand{\hmwkClassTime}{\today}
%\newcommand{\hmwkClassInstructor}{Prof. Oleg Yazyev}
\newcommand{\hmwkAuthorName}{Raffaele Ancarola}
%
%
% In case you need to adjust margins:
\topmargin=-0.45in %
\evensidemargin=0in %
\oddsidemargin=0in %
\textwidth=6.5in %
\textheight=9.5in %
\headsep=0.25in %
% This is the color used for comments below
\definecolor{MyDarkGreen}{rgb}{0.0,0.4,0.0}
% For faster processing, load Matlab syntax for listings
\lstloadlanguages{Matlab}%
\lstset{language=Matlab, % Use MATLAB
frame=single, % Single frame around code
basicstyle=\small\ttfamily, % Use small true type font
keywordstyle=[1]\color{Blue}\bf, % MATLAB functions bold and blue
keywordstyle=[2]\color{Purple}, % MATLAB function arguments purple
keywordstyle=[3]\color{Blue}\underbar, % User functions underlined and blue
identifierstyle=, % Nothing special about identifiers
% Comments small dark green courier
commentstyle=\usefont{T1}{pcr}{m}{sl}\color{MyDarkGreen}\small,
stringstyle=\color{Purple}, % Strings are purple
showstringspaces=false, % Don't put marks in string spaces
tabsize=3, % 5 spaces per tab
breaklines=True,
linewidth=\textwidth,
%
%%% Put standard MATLAB functions not included in the default
%%% language here
morekeywords={xlim,ylim,var,alpha,factorial,poissrnd,normpdf,normcdf},
%
%%% Put MATLAB function parameters here
morekeywords=[2]{on, off, interp},
%
%%% Put user defined functions here
morekeywords=[3]{FindESS, homework_example},
%
morecomment=[l][\color{Blue}]{...}, % Line continuation (...) like blue comment
numbers=left, % Line numbers on left
firstnumber=1, % Line numbers start with line 1
numberstyle=\tiny\color{Blue}, % Line numbers are blue
stepnumber=1 % Line numbers go in steps of 5
}
% Setup the header and footer
\pagestyle{fancy} %
\lhead{\hmwkAuthorName} %
%\chead{\hmwkClass\ (\hmwkClassInstructor\ \hmwkClassTime): \hmwkTitle} %
\rhead{\hmwkClass\ : \hmwkTitle} %
%\rhead{\firstxmark} %
\lfoot{\lastxmark} %
\cfoot{} %
\rfoot{Page\ \thepage\ of\ \protect\pageref{LastPage}} %
\renewcommand\headrulewidth{0.4pt} %
\renewcommand\footrulewidth{0.4pt} %
% This is used to trace down (pin point) problems
% in latexing a document:
%\tracingall
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Some tools
\newcommand{\enterProblemHeader}[1]{\nobreak\extramarks{#1}{#1 continued on next page\ldots}\nobreak%
\nobreak\extramarks{#1 (continued)}{#1 continued on next page\ldots}\nobreak}%
\newcommand{\exitProblemHeader}[1]{\nobreak\extramarks{#1 (continued)}{#1 continued on next page\ldots}\nobreak%
\nobreak\extramarks{#1}{}\nobreak}%
\newlength{\labelLength}
\newcommand{\labelAnswer}[2]
{\settowidth{\labelLength}{#1}%
\addtolength{\labelLength}{0.25in}%
\changetext{}{-\labelLength}{}{}{}%
\noindent\fbox{\begin{minipage}[c]{\columnwidth}#2\end{minipage}}%
\marginpar{\fbox{#1}}%
% We put the blank space above in order to make sure this
% \marginpar gets correctly placed.
\changetext{}{+\labelLength}{}{}{}}%
\setcounter{secnumdepth}{0}
\newcommand{\homeworkProblemName}{}%
\newcounter{homeworkProblemCounter}%
\newenvironment{homeworkProblem}[1][Problem \arabic{homeworkProblemCounter}]%
{\stepcounter{homeworkProblemCounter}%
\renewcommand{\homeworkProblemName}{#1}%
\section{\homeworkProblemName}%
\enterProblemHeader{\homeworkProblemName}}%
{\exitProblemHeader{\homeworkProblemName}}%
\newcommand{\problemAnswer}[1]
{\noindent\fbox{\begin{minipage}[c]{\columnwidth}#1\end{minipage}}}%
\newcommand{\problemLAnswer}[1]
{\labelAnswer{\homeworkProblemName}{#1}}
\newcommand{\homeworkSectionName}{}%
\newlength{\homeworkSectionLabelLength}{}%
\newenvironment{homeworkSection}[1]%
{% We put this space here to make sure we're not connected to the above.
% Otherwise the changetext can do funny things to the other margin
\renewcommand{\homeworkSectionName}{#1}%
%\settowidth{\homeworkSectionLabelLength}{\homeworkSectionName}%
\addtolength{\homeworkSectionLabelLength}{0.25in}%
\changetext{}{-\homeworkSectionLabelLength}{}{}{}%
\subsection{\homeworkSectionName}%
\enterProblemHeader{\homeworkProblemName\ [\homeworkSectionName]}}%
{\enterProblemHeader{\homeworkProblemName}%
% We put the blank space above in order to make sure this margin
% change doesn't happen too soon (otherwise \sectionAnswer's can
% get ugly about their \marginpar placement.
\changetext{}{+\homeworkSectionLabelLength}{}{}{}}%
\newcommand{\sectionAnswer}[1]
{% We put this space here to make sure we're disconnected from the previous
% passage
\noindent\fbox{\begin{minipage}[c]{\columnwidth}#1\end{minipage}}%
\enterProblemHeader{\homeworkProblemName}\exitProblemHeader{\homeworkProblemName}%
\marginpar{\fbox{\homeworkSectionName}}%
% We put the blank space above in order to make sure this
% \marginpar gets correctly placed.
}%
\newcommand{\unlimwrap}[3][0.45\linewidth]
{
\begin{minipage}{\linewidth}
\hspace{-0.5cm}
\begin{minipage}{#1}
#2
\end{minipage}
\hspace{1cm}
\begin{minipage}{0.7\linewidth}
#3
\end{minipage}
\end{minipage}
}
%%% I think \captionwidth (commented out below) can go away
%%%
%% Edits the caption width
%\newcommand{\captionwidth}[1]{%
% \dimen0=\columnwidth \advance\dimen0 by-#1\relax
% \divide\dimen0 by2
% \advance\leftskip by\dimen0
% \advance\rightskip by\dimen0
%}
% Includes a figure
% The first parameter is the label, which is also the name of the figure
% with or without the extension (e.g., .eps, .fig, .png, .gif, etc.)
% IF NO EXTENSION IS GIVEN, LaTeX will look for the most appropriate one.
% This means that if a DVI (or PS) is being produced, it will look for
% an eps. If a PDF is being produced, it will look for nearly anything
% else (gif, jpg, png, et cetera). Because of this, when I generate figures
% I typically generate an eps and a png to allow me the most flexibility
% when rendering my document.
% The second parameter is the width of the figure normalized to column width
% (e.g. 0.5 for half a column, 0.75 for 75% of the column)
% The third parameter is the caption.
\newcommand{\scalefig}[3]{
\begin{figure}[ht!]
% Requires \usepackage{graphicx}
\centering
\includegraphics[width=#2\columnwidth]{#1}
%%% I think \captionwidth (see above) can go away as long as
%%% \centering is above
%\captionwidth{#2\columnwidth}%
\caption{#3}
\label{#1}
\end{figure}}
% Includes a MATLAB script.
% The first parameter is the label, which also is the name of the script
% without the .m.
% The second parameter is the optional caption.
%\newcommand{\matlabscript}[2]
% {\begin{itemize}\item[]\lstinputlisting[caption=#2,label=#1]{#1.m}\end{itemize}}
\newcommand{\matlabscript}[2]
{\lstinputlisting[caption=#2,label=#1]{#1.m}}
% pfgplots add graphics
\makeatletter
\newcommand\addplotgraphicsnatural[2][]{%
\begingroup
% set options in this local group (will be lost afterwards):
\pgfqkeys{/pgfplots/plot graphics}{#1}%
% measure the natural size of the graphics:
\setbox0=\hbox{\includegraphics{#2}}%
%
% compute the required unit vector ratio:
\pgfmathparse{\wd0/(\pgfkeysvalueof{/pgfplots/plot graphics/xmax} - \pgfkeysvalueof{/pgfplots/plot graphics/xmin})}%
\let\xunit=\pgfmathresult
\pgfmathparse{\ht0/(\pgfkeysvalueof{/pgfplots/plot graphics/ymax} - \pgfkeysvalueof{/pgfplots/plot graphics/ymin})}%
\let\yunit=\pgfmathresult
%
% configure pgfplots to use it.
% The \xdef expands all macros except those prefixed by '\noexpand'
% and assigns the result to a global macro named '\marshal'.
\xdef\marshal{%
\noexpand\pgfplotsset{unit vector ratio={\xunit\space \yunit}}%
}%
\endgroup
%
% use our macro here:
\marshal
%
\addplot graphics[#1] {#2};
}
\makeatother
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Make title
%\title{\vspace{2in}\textmd{\textbf{\hmwkClass:\ \hmwkTitle\ifthenelse{\equal{\hmwkSubTitle}{}}{}{\\\hmwkSubTitle}}}\\\normalsize\vspace{0.1in}\small{Due\ on\ \hmwkDueDate}\\\vspace{0.1in}\large{\textit{\hmwkClassInstructor\ \hmwkClassTime}}\vspace{3in}}
\title{\vspace{2in}\textmd{\textbf{\hmwkClass:\ \hmwkTitle\ifthenelse{\equal{\hmwkSubTitle}{}}{}{\\\hmwkSubTitle}}}\\\normalsize\vspace{0.1in}\small{Due\ on\ \hmwkDueDate}\\\vspace{0.1in}\large{\textit{ \hmwkClassTime}}\vspace{3in}}
\date{}
\author{\textbf{\hmwkAuthorName}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
\begin{spacing}{1.1}
\maketitle
% Uncomment the \tableofcontents and \newpage lines to get a Contents page
% Uncomment the \setcounter line as well if you do NOT want subsections
% listed in Contents
%\setcounter{tocdepth}{1}
\newpage
\tableofcontents
\newpage
\section{Introduction}
Solving linear problems is often non-trivial for a human mind when the problem
is particularly big or complex, however in the case of a machine the automation allows
to quickly find that kind of solution.
In the first part of this research the linear problem solving will be specifically treated
implementing the \textit{LU decomposition} algorithm and giving some solving example, while in a second part
the eigenvalues problem solving will be specifically implemented.
The previously mentioned approach allowed to discover some
important results in many fields, one of them is the quantum mechanics.
Instead of simulating a temporal evolution, in the case of a time independent hemiltonian it's possible to solve
the system using the diagonalisation.
\section{Solving a system of linear equations}
A \textit{linear problem} could be defined as a system of which the describing equations
are all linear. Furthermore, a linear system is said to be \textit{determined} if the number of equations $N$
is finite and it corresponds to the number of the unknowns.
Such a system defined on a field $\mathbb{K}$ takes the advantage to be written in a matrix form:
\begin{equation} \label{matrix_form}
A \cdot \vec{x} = \vec{b}
\end{equation}
where $A$ is the describing matrix and $\vec{b} \in \mathbb{K}^N$ the affine component of the system,
or the components which are independent with respect to the unknows contained in $\vec{x}$.
Because the system is \textit{determined}, the condition that $A$ must satisfy is the \textit{inversibility},
then $A \in \mathcal{GL}(N)$ and a solving the system means to find $\vec{x} \in \mathbb{K}^N$ such that eq. (\ref{matrix_form}) is satisfied.
There exist various approaches that can reach this aptempt, in this report three cases will be analysed:
the \textit{gauss elimination}, the \textit{LU decomposition} and the \textit{diagonalisation}.
\subsection{Gauss elimination algorithm}
The \textit{Gauss elimination} bases to the fact that any square matrix can
be decomposed into a finite sequence of elementary operations $\{P_k\}_{1 \le k \le M}, M \in \mathbb{N^*}$.
There are basically three kinds of them:
\begin{itemize}
\item Multipling of a row by a scalar factor $\lambda \in \mathbb{K}$
\item Switching a row with another
\item Adding a row with a multiple of another
\end{itemize}
The purpose of this method is to reduce the involved matrix $A$ into the identity
applying the same operations to the vector $\vec{b}$, as shown in the equation (\ref{gauss_redux}).
\begin{equation} \label{gauss_redux}
A = P_1 \cdot ... \cdot P_M \implies \vec{x} = P_M^{-1} \cdot ... \cdot P_1^{-1} \cdot \vec{b} \:, \quad M \in \mathbb{N^*}
\end{equation}
\subsection{LU decomposition}
The \textit{LU decomposition} is not a direct method which solves a linear system, but it
allows to simplify the resolution by decomposing the $A$ matrix into a lower-triangular matrix $L$ and
an upper-triangular matrix $U$. The simplification is due to the major facility to invert the two matrices precedently
presented.
Once $A$ is decomposed, the process is straigh-forward:
\begin{align}
A \cdot \vec{x} = L \cdot U \cdot \vec{x} &= \vec{b} \nonumber \\
L \cdot \vec{y} &= \vec{b} \label{L_solve} \\
U \cdot \vec{x} &= \vec{y} \label{U_solve}
\end{align}
Both equations (\ref{L_solve}) and (\ref{U_solve}) can be solved sequentially using the
\textit{Gauss elimination} method.
\subsection{Diagonalization: introduction}
In case $A$ is a symmetric matrix, the spectral theorem \cite{spectral_thm} states that
such a matrix is equivalent (definition of equivalence here: \cite{matrix_equiv}) to a diagonal matrix $D$,
where the transition matrix $P$ is unitary ($P^{-1} = \bar{P}^T$), then:
\begin{equation} \label{diag_solve}
A = P \cdot D \cdot \bar{P}^T \implies \vec{x} = P \cdot D^{-1} \cdot \bar{P}^T \cdot \vec{b}
\end{equation}
Generally diagonalization is not used to solve general systems of linear equations,
but it's convenient when the problem is related to find the eigen-base related to the eigen-values.
%\newpage
\begin{homeworkProblem}
% When problems are long, it may be desirable to put a \newpage or a
% \clearpage before each homeworkProblem environment
\begin{homeworkSection}{(1) LU decomposition implementation}
This algorithm separes the input matrix $A$ into a lower triangular $L$ and an upper triangular $U$, garanteeing
that $A = L \cdot U$. Neverthless, not all the invertible square matrices are purely LU decomposable, then it may happen that the output can result ill formed.
The code (\ref{lu_decomposition}) shows at line 23 that
a division by the diagonal values is performed, causing eventually a singularity.
A possible work-around is to apply the partial pivoting technique in order to swap the problematic lines.
In listing (\ref{lu_decomposition}) is shown a full implementation with partial pivoting.
\matlabscript{lu_decomposition}{\textit{LU decomposition} implementation with partial pivoting}
\subsubsection{Partial pivoting}
The \textit{LU decomposition} algorithm (presented below in exercise 1.1) can easily run into
singularities, especially when $A$ presents zeros as diagonal terms.
In order to avoid divergent results, it would better select the rows of which element is not zero
in the requested columns and swap them with the current one.
More precisely, at the $k$-th step, select the $r$-th row such that $A_{rk} = \max\limits_{k \le i \le N} \abs*{A_{ik}}$,
then swap rows at the position $k$ and $r$.
If the pivoting is applied the resulting \textit{LU decomposition} won't be anymore like it was defined in the
previous section, but a correction to equation (\ref{L_solve}) must be applied:
\begin{equation} \label{pivot_correction}
P \cdot A = L \cdot U \implies L \cdot \vec{y} = P \cdot \vec{b}
\end{equation}
where $P$ is the orthogonal matrix that accumulated all row switching applications.
The rest of the solving method remains unchanged.
\subsubsection{(1.1) Solving a linear system}
A linear system can be solved applying the LU decomposition and then a gauss elimination process, as
shown in the equations (\ref{pivot_correction}) and (\ref{U_solve}).
For example, the system in equation (\ref{first_system}) is determined and can be solved using
the \texttt{solve.m} script. Addictionally the \texttt{test\_solve.m} script compares with the matlab
\texttt{x = A \symbol{92} b} verifying that the solution $\vec{x}$ is given correctly by the \texttt{solve.m} script.
\begin{equation} \label{first_system}
\begin{cases}
2 x_1 + x_2 - x_3 + 5 x_4 &= 13 \\
x_1 + 2 x_2 + 3 x_3 - x_4 &= 37 \\
x_1 + x_3 + 6 x_4 &= 30 \\
x_1 + 3 x_2 - x_3 + 5 x_4 &= 19 \\
\end{cases}
\quad \implies A =
\begin{pmatrix}
2 & 1 & -1 & 5 \\
1 & 2 & 3 & -1 \\
1 & 0 & 1 & 6 \\
1 & 3 & -1 & 5 \\
\end{pmatrix}, \quad \vec{b} =
\begin{pmatrix}
13 \\
37 \\
30 \\
19 \\
\end{pmatrix}
\quad \implies \vec{x} = A^{-1} \cdot \vec{b} =
\begin{pmatrix}
2 \\
4 \\
10 \\
3 \\
\end{pmatrix}
\end{equation}
\subsubsection{(1.2) Comparing with \texttt{matlab lu} built-in function}
\noindent
\begin{minipage}{\textwidth}
\begin{minipage}{0.45\textwidth}
The graph in figure (\ref{lu_compare}) underlines a comparison between
the previously implemented method and the built-in matlab \texttt{lu} function
within a range of sizes between $1$ and $100$.
The evident result is that they are the almost equal and
the matrices $U$ and $P$ are probably determined exactly in the same way
because of the completely null difference. However, there is a small difference, in the order
of $10^{-13}$ in the determination of $L$ and it seems to depend on the input matrix size and
the specific composition.
\end{minipage}
\hspace{0.8cm}
\begin{minipage}{0.57\textwidth}
\resizebox{\textwidth}{!}{
\input{graphs/lu_size.tex}
}
\captionof{figure}{Comparison of the output $L$, $U$ and $P$ between code (\ref{lu_decomposition}) and \texttt{matlab lu} function}
\label{lu_compare}
\end{minipage}
\end{minipage}
\subsubsection{(1.3) Decomposition of a matrix}
The example taken in equation (\ref{lu_example})
is a problematic case where a pure LU decomposition
doesn't exist. A necessary and sufficient condition to the existance of
a pure LU decomposition is that the matrix must be gauss reductible
without any row exchange (ref. \cite{exists_LU}), that's why if such a decomposition
exists, then pivoting matrix $P$ is the identity matrix.
So, the form $P \cdot A = L \cdot U$ is
obtainable using the pivoting described in the previous section.
\begin{equation} \label{lu_example}
A =
\begin{pmatrix}
1 & 2 & 3 \\
2 & 4 & 9 \\
4 & -3 & 1 \\
\end{pmatrix}
\implies
L =
\begin{pmatrix}
1 & 0 & 0 \\
0.5 & 1 & 0 \\
0.25 & 0.5 & 1 \\
\end{pmatrix}, \quad
U =
\begin{pmatrix}
4 & -3 & 1 \\
0 & 5.5 & 8.5 \\
0 & 0 & -1.5 \\
\end{pmatrix} \quad
P =
\begin{pmatrix}
0 & 0 & 1 \\
0 & 1 & 0 \\
1 & 0 & 0 \\
\end{pmatrix} \quad
\end{equation}
\end{homeworkSection}
\begin{homeworkSection}{(2) Puzzle board game}
The puzzle game problem is described by using the following formalization:
\begin{equation} \label{puzzle_form}
b_k = x_k + x_{k-N} + x_{k+N} + x_{k-1} + x_{k+1}, \quad
A = T \otimes 1 + 1 \otimes T - 1 \otimes 1
\end{equation}
where $b_k$ corresponds to the number of times that the $k_{ij}$ is pressed.
Using the \texttt{kron} matlab function it's possible to easy construct the matrix $A$,
as shown in the code listing (\ref{puzzleA}).
\matlabscript{puzzleA}{Construction of the matrix describing the puzzle game problem}
The index $k$ describes a remapping of a $N \times N$ grid into a $N^2$ vector,
precisely $K_{ij} = (i-1) \cdot N + j$, $1 \le i,j \le N$.
One the $A$ matrix is composed and given the $\vec{b}$ vector, then the solution is
simply given by \texttt{x = A \symbol{92} b}.
The code is shown in the attached script \texttt{puzzle.m}.
\subsubsection{(2.1) Results of \texttt{test\_puzzle.m} script}
The script \texttt{test\_puzzle.m} takes the initial grids $B_1$ and $B_2$ of equation (\ref{puzzle_init}) and
solves both by remapping them into a column vector and solving it using the matrix given by function in listing (\ref{puzzleA}).
The second solution $X_2$ is not an acceptable solution, because the matrix coefficients are not natural numbers.
Because of the unicity of the solution of a determined system of linear equations,
the conclusion is that the initial grid $B_2$ doesn't have a solution.
\begin{equation} \label{puzzle_init}
B_1 =
\begin{pmatrix}
6 & 8 & 9 & 4 & 6 & 2 & 3 & 4 & 4 & 5 \\
6 & 10 & 6 & 11 & 6 & 5 & 4 & 4 & 8 & 5 \\
4 & 6 & 10 & 6 & 10 & 9 & 5 & 4 & 4 & 7 \\
6 & 6 & 2 & 7 & 9 & 7 & 7 & 2 & 4 & 6 \\
7 & 10 & 9 & 6 & 8 & 9 & 4 & 2 & 3 & 5 \\
9 & 11 & 6 & 9 & 7 & 6 & 5 & 6 & 2 & 4 \\
8 & 8 & 10 & 6 & 6 & 5 & 6 & 8 & 5 & 3 \\
7 & 10 & 7 & 6 & 4 & 4 & 4 & 8 & 5 & 5 \\
6 & 9 & 7 & 6 & 4 & 8 & 5 & 8 & 5 & 8 \\
4 & 5 & 7 & 4 & 7 & 8 & 7 & 3 & 6 & 6
\end{pmatrix}, \quad \quad
B_2 =
\begin{pmatrix}
7 & 1 & 3 & 6 & 11 & 3 & 1 & 7 & 1 & 9 \\
4 & 8 & 12 & 8 & 6 & 5 & 12 & 10 & 1 & 1 \\
1 & 7 & 6 & 7 & 13 & 6 & 8 & 3 & 4 & 0 \\
3 & 1 & 3 & 2 & 5 & 5 & 11 & 9 & 8 & 0 \\
7 & 7 & 7 & 4 & 14 & 7 & 10 & 11 & 7 & 8 \\
12 & 9 & 10 & 3 & 7 & 3 & 15 & 15 & 7 & 8 \\
6 & 12 & 7 & 7 & 11 & 8 & 8 & 6 & 2 & 8 \\
7 & 2 & 4 & 6 & 4 & 1 & 5 & 0 & 6 & 5 \\
2 & 4 & 3 & 4 & 4 & 4 & 4 & 5 & 4 & 6 \\
3 & 2 & 7 & 4 & 7 & 4 & 6 & 0 & 0 & 7
\end{pmatrix}, \quad \quad
\end{equation}
\begin{align} \label{puzzle_results}
X_1 &=
\begin{pmatrix}
4 & 2 & 1 & 2 & 1 & 1 & 0 & 1 & 2 & -0 \\
0 & 1 & 4 & 0 & 2 & 0 & 1 & 1 & 1 & 3 \\
1 & 3 & 0 & 3 & 3 & 1 & 2 & 0 & 1 & 1 \\
0 & 1 & 0 & 0 & 1 & 3 & 1 & 0 & 1 & 2 \\
4 & 2 & 1 & 3 & 2 & 1 & 1 & 0 & 0 & 2 \\
1 & 2 & 3 & 0 & 1 & 2 & 1 & 1 & 0 & 1 \\
2 & 3 & 0 & 2 & 2 & 1 & 0 & 4 & 0 & 1 \\
2 & 1 & 2 & 2 & 0 & 0 & 0 & 3 & 0 & 1 \\
2 & 2 & 2 & 0 & 0 & 3 & 1 & 1 & 1 & 3 \\
0 & 2 & 1 & 2 & 1 & 4 & 0 & 2 & 0 & 3
\end{pmatrix}, \\
X_2 &=
\begin{pmatrix}
18.1304 & -33.7826 & 23.0870 & 12.5217 & -23.7826 & 17.3913 & -1.1304 & -13.3913 & 32.0435 & -28.8261 \\
22.6522 & -6.4348 & 1.1739 & -5.8261 & 4.8696 & 10.5217 & -1.8696 & -10.5217 & 11.1739 & 5.7826 \\
-30.3478 & 24.3913 & 0.0000 & -4.7391 & 20.2174 & -25.9130 & 15.0000 & 24.6087 & -37.4783 & 12.8696 \\
-15.6957 & 19.3913 & -14.8261 & -2.6522 & 18.5652 & -13.8261 & -3.8261 & 11.3913 & -7.1739 & 18.8261 \\
29.6522 & -12.2609 & 1.0870 & 5.6522 & -17.3043 & 30.0000 & 2.2609 & -16.0000 & 22.4348 & -24.5217 \\
5.3043 & -30.8696 & 27.3478 & 17.2174 & -22.9130 & 5.8696 & -2.4348 & -9.0870 & 32.2609 & -8.7391 \\
7.9130 & 19.4783 & -4.7826 & -24.3043 & 24.1304 & -7.5217 & 18.3913 & 10.2609 & -29.8696 & 9.0000 \\
-26.6957 & 20.2609 & -10.7391 & -5.2609 & 41.6087 & -32.8696 & -10.6957 & 16.3043 & -19.6522 & 37.6087 \\
5.5217 & -0.3043 & 4.5217 & 4.6957 & -23.6087 & 10.4783 & 13.8696 & 3.7826 & 1.6087 & -21.9565 \\
23.4783 & -26.0000 & 4.8261 & 23.6522 & -29.1739 & 36.1304 & -13.4348 & -30.5652 & 40.2174 & -11.2609
\end{pmatrix}
\end{align}
\end{homeworkSection}
\begin{homeworkSection}{(3) Helicopter power formula: dimensional analysis}
The elicopter problem is a dimension problem, because knowing that
the involved quantities are $P$, $g$, $L$, $\rho_h$ and $\rho_a$,
their relation will only depend on the units of measure expression.
Thus, if the second helicopter has $1/3$ of the length with respect the first one,
then, taking the formula in the document \cite{exo67}, its power is given by $P_2 = 3^{-\beta} P_1$.
\subsubsection{(3.1) Approaching the problem}
The same formula cited above can be expressed in a logarithmic form:
\begin{equation} \label{elic_log}
\ln(P) = \alpha \cdot \ln(g) + \beta \cdot \ln(L)
+ \gamma \cdot \ln(\rho_h) + \delta \cdot \ln(\rho_a)
\end{equation}
Assigning for each quantity its corresponding SI unit of measure \cite{SI_units},
or rather, $[P] =$ \si{kg.m^2/s^3}, $[g] =$ \si{m/s^2}, $[L] =$ \si{m}, $[\rho] =$ \si{kg/m^3},
then the logarithm of \si{m}, \si{s} and \si{kg} can be treated as a vector basis.
At this point the equation (\ref{elic_log}) can be rewritten as:
\begin{align} \label{elic_unit}
\ln(\si{kg}) \cdot (1 - \gamma - \delta) +&
\ln(\si{m}) \cdot (2 - \alpha - \beta + 3 \gamma + 3 \delta) +
\ln(\si{s}) \cdot (-3 + 2 \alpha) = 0 \\
\implies &
\begin{cases}
\gamma + \delta =& 1 \\
\alpha + \beta - 3 \gamma - 3 \delta =& 2 \\
2 \alpha =& 3
\end{cases}
\end{align}
This system is indetermined, thus it cannot be computationally solved using the
\texttt{solve.m} script, because it's matrix representation is not a square matrix.
\subsubsection{(3.2) + (3.3), Adding a constraint}
In the case where $\alpha = \gamma$, the equation found in the previous point reduces to
a determined system of linear equations, which has a square matrix form $A$.
\begin{equation} \label{elic_solveable}
\begin{cases}
\alpha + \delta &= 1 \\
2\alpha - \beta + 3 \delta &= -2 \\
2 \alpha &= 3
\end{cases} \quad \implies
A =
\begin{pmatrix}
1 & 0 & 1 \\
2 & -1 & 3 \\
2 & 0 & 0
\end{pmatrix},
\quad
\vec{b} =
\begin{pmatrix}
1 \\
-2 \\
3
\end{pmatrix}
\end{equation}
Now the system is solveable and the solution is straight forward:
\begin{align} \label{elic_solution}
\alpha = \gamma = \frac{3}{2}, & & \beta = \frac{7}{2}, & & \delta = -\frac{1}{2}
\end{align}
So, the output power of the second helicopter $P_2 = 3^{-\frac{7}{2}} \cdot P_1 \approx 0.021 P_1$.
\end{homeworkSection}
\end{homeworkProblem}
\section{The eigenvalue problem and diagonalization}
Let $\hat{A}$ be an operator defined over an hilbert space $\mathcal{H}$.
By solving an eigenvalue problem is meant to find all vectors (or functions) $x \in \mathcal{H}$
such that there exists a real (or complex) value $\lambda$ that satisfies the following condition:
\begin{equation} \label{eig_def}
\hat{A} \cdot x = \lambda \cdot x, \lambda \in \mathbb{K}
\end{equation}
In the case of this report, the interest is to computationally solve the eigenvalue problem for finite rank
operators, which can be expressed as square matrices.
So, let $N$ be rank of a square matrix $A$ and $\vec{v} \in \mathbb{K}^N$, then the equation (\ref{eig_def}) is equivalent to:
\begin{equation} \label{eig_vec}
A \cdot \vec{v} = \lambda \cdot \vec{v}, \lambda \in \mathbb{K}
\end{equation}
\subsection{Power method} \label{power_section}
The power method bases its functioning on the iterative application of a specific operation $T$.
The principle is that every iteration step tends to minimise of the distance between the old evaluated eigen value $\lambda_{k-1}$ and the current $\lambda_k$.
Given the unitary vector $\vec{v_k} \in \mathbb{K}^N | \|\vec{v_k}\| = 1$ at the iteration step $k$, the corresponding diagonal value, relative to a square matrix $A$, is given by the hermitian scalar product (see \cite{inner_product} for the notation):
\begin{equation} \label{herm_eig}
\lambda_k = \langle \vec{v_k}, A \vec{v_k} \rangle, \quad \lambda_k \in \mathbb{K}
\end{equation}
The operation $T$ mentioned above variates depending on the specific method, which of there are three:
\begin{itemize}
\item \textit{Power} method: $T = A$.
\item \textit{Inverse power} method: $T = (A - 1 \cdot \tau)^{-1}$, $\tau \in \mathbb{K}$ is a fixed eigenvalue target.
\item \textit{Rayleigh quotient} method: $T = (A - 1 \cdot \lambda_{k-1})^{-1}$, $\lambda_{k-1} \in \mathbb{K}$ is the old evaluated eigenvalue, as defined in equation (\ref{herm_eig}).
\end{itemize}
Notice that for the \textit{Tayleigh quotient} method the application is adapting during each
iteration, garanteeing a faster convergence.
\subsection{Jacobi method}
Let $A$ be a real symmetric matrix, by the spectral theorem \cite{spectral_thm} such a matrix is diagonalizable in a form $A = PDP^T$,
where $P$ is an orthogonal matrix and $D$ a real diagonal matrix.
The idea at the base is that any orthogonal matrix $P$ can be decomposed into a series of \textit{axis aligned} rotation matrices $\{J^{(pq)}(\theta)\}, p,q \in \mathbb{N*}, p < q$:
an \textit{axis aligned} rotation matrix $J^{(pq)}$ is defined as:
\begin{equation} \label{aarm} % axis aligned rotation matrix
\begin{cases}
J^{(pq)}_{pp} = J^{(pq)}_{qq} &= \cos(\theta) \\
J^{(pq)}_{qp} = -J^{(pq)}_{pq} &= \sin(\theta) \\
\forall i,j \neq p,q: & J^{(pq)}_{ij} = \delta_{ij}
\end{cases}
\end{equation}
A property of such a matrix is that the determinant is $1$ for any value of $\theta$ and $J^{-1} = J^T$.
The \textit{Jacobi} method essentially applies a series of axis aligned rotations to the matrix $A$ until the diagonal form is reached,
adapting each step the value of $\theta$.
So, let $M$ be the total number of steps necessary to reach the diagonalization, then for $0 \le k \le M$ and $A_0 = A$ the
relation (\ref{jacobi_step}) shows by induction that the next step is realized by an axis aligned rotation of the matrix $A_k$
and it's also possible at the end to obtain the transition matrix $P = P_M$.
\begin{align} \label{jacobi_step}
D = J_{M-1}^T A_{M-1} J_{M-1} = P^T A P & & \implies & & A_{k+1} = J_k^T A_k J_k, & & P_{k+1} = P_k J_k
\end{align}
\begin{homeworkProblem}
% When problems are long, it may be desirable to put a \newpage or a
% \clearpage before each homeworkProblem environment
\begin{homeworkSection}{(1) Power iteration methods implementation}
As mentioned in the section (\ref{power_section}), the all methods involve
a matrix application, which means that the temporal complexity is at least $N^2$,
where $N$ is the size of the involved matrix.
Addictionally, the number of iterations is not simple to estimate
and the worst case is potentially infinite.
The same could be claimed for the
tolerance that the minimized value $\abs*{\lambda_k - \lambda_{k-1}}$ should have.
In fact the matlab constant \texttt{eps} is often too low as tolerance and for larger
values of $N$ the loop doesn't exit.
% TODO, find a way to test the performance
The implementation codes can be found in the scripts \texttt{eig\_power.m},
\texttt{eig\_ipower.m} and \texttt{eig\_rq.m}.
\end{homeworkSection}
\begin{homeworkSection}{(2) Eigenmodes of a vibrating string}
\subsubsection{(2.1) Formalization of the problem}
The description of a vibrating string follows the wave equation (\ref{wave_eq}).
Applying a then separation of variables, the problem reveals to be
depend on an addictional variable $\lambda \in \mathbb{R}$.
\begin{align}
\frac{\delta^2 u}{\delta x^2} &= \frac{\kappa}{\rho} \frac{\delta^2 u}{\delta t^2} &
u(0,t) &= 0, & u(L,t) &= 0 &
u(x,y) &= \omega(x) \cdot v(t) &\implies
\begin{cases}
\omega''(x) + \lambda \omega(x) &= 0 \\
v''(t) + \frac{\lambda\rho}{\kappa} v(t) &= 0
\end{cases} \label{wave_eq}
\end{align}
The sign of $\lambda$ will now determine the form of the solution.
Analysing first the case of $\lambda = 0$, the solution would be
a linear equation $\omega(x) = C x + B$. Unfortunately, applying the
boundary conditions $\omega(0) = B = 0$ and $\omega(L) = C L + B = 0$
the implication is $B = 0$ and $C = 0$, thus $\omega(x) = 0$.
The same proof is applicable to the case $\lambda < 0$:
the solution would be $\omega(x) = B \exp(\gamma x) + C \exp(-\gamma x), \gamma = \sqrt{-\lambda}$,
but $\omega(0) = B + C = 0$ and $\omega(L) = B \exp(\gamma L) + C \exp(-\gamma L) = 0$.
These conditions imply that $C = -B$, $2 B \sinh(\gamma L) = 0$, which means that $\gamma = 0$ or $B = 0$,
in both cases the solution turns to be trivial.
It remains the case where $\lambda > 0$, here the solution takes a sinusoidal form:
\begin{equation}
\begin{cases}
\omega(x) &= B_x \sin(\gamma x) + C_x \cos(\gamma x), \quad \gamma = \sqrt{\lambda} \\
v(t) &= B_t \sin(2\pi\nu t) + C_t \cos(2\pi\nu t), \quad \nu = \frac{\sqrt{\frac{\lambda \rho}{\kappa}}}{2\pi}
\end{cases} \label{wave_solution}
\end{equation}
This solution is not ill formed because $\omega(0) = C_x = 0$, then by
the second boundary condition $\omega(L) = B_x \sin(\gamma L) = 0$.
Imposing $B_x \neq 0$, the resulting condition is $\sin(\gamma L) = 0 \iff \gamma_n L = \pi n, \quad n \in \mathbb{N*}$.
In other words, this means that for each natural number $n$, there exists a $\lambda_n$ given by:
\begin{equation} \label{lambda_n}
\lambda_n = \left( \frac{\pi n}{L} \right)^2
\end{equation}
Thus, the implicit condition to obtain a non-trivial solution is given by equation (\ref{lambda_n}).
Notice, by its expression, that dimensionally $\lambda$ is \si{1\per m^2} in the SI unit system \cite{SI_units}.
\subsubsection{(2.2) Implementation}
Using a finite difference discretization, or rather given a step $\Delta x$ and setting $x_i = (i-1) * \Delta x$, the problem
is expressed as follow:
\begin{align} \label{secnd_deriv}
\frac{- \omega_{i-1} + 2 \omega_i - \omega_{i+1}}{\Delta x^2} = \lambda \omega_i & & \iff & &
A \vec{\omega} = \lambda \vec{\omega}, & &\vec{\omega} = (\omega_1, ..., \omega_N)
\end{align}
$A$ in equation (\ref{secnd_deriv}) is the tridiagonal matrix containing $-1$ in the lower and the upper diagonal and $2$ on the diagonal,
all the terms divided by the squared step $\Delta x^2$. This matrix is symmetric, then by the spectral theorem \cite{spectral_thm}, it's eigenvalues
are real and for each pair of eigenvalues, the associated eigenspaces are orthogonal each other. Furthermore, the matrix is positive definite,
thus all the eigenvalues are strictly positive.
Notice that this setup automatically meets the boundary conditions, because the first and the last line of $A$ already discards the boundary terms (setting them implicitly to zero).
% TODO, units of measure!!!!
\subsubsection{(2.3) + (2.4), Find the first eigenvalues}
The first thing to notice is that the matrix $A$ is not degenerate (by its positivity), then
each eigenspace's dimension is $1$. This clearly means that the first four eigenvalues are different each other.
Theoretically, by the expression in equation (\ref{lambda_n}), the eigenvalues for $L = 1$ are
$\lambda_1 = \pi^2 \approx 9.8696$, $\lambda_2 = 4 \pi^2 \approx 39.4784$,
$\lambda_3 = 9 \pi^2 \approx 88.8264$, $\lambda_4 = 16 \pi^2 \approx 157.9137$ and $\lambda_5 = 25 \pi^2 \approx 264.7401$.
In order to find the first one, it's enough to use the \textit{inverse power} method setting the target to $0$.
\par
The other four can be found using an \textit{interval bisection} strategy:
let $l_k \in \mathbb{R}_+, k \in \mathbb{N}, l_0 = \lambda_1$, let $l_{k+1}$ be the
eigenvalue closest to $2 l_k$ and consider the interval $I_k = [l_k, l_{k+1}]$.
The strategy consists in finding all eigenvalues contained in the interval $I_k$ using a bisection algorithm
and iterating with a new interval until the number of expected eigenvalues is reached.
It may happen that $l_{k+1} = l_k$, then $l_{k}$ is doubled until the evaluation of $l_{k+1}$ mentioned above
converges to a different and bigger value than $l_k$.
Notice that $l_k$ is always an eigenvalue, this characteristic is foundamental to speed up the eigenvalue research
inside the interval $I_k$.
Consider now a real interval $U$ such that the extrema are two eigenvalues $a_1$ and a non
necessarely consecutive $a_n$,
so the idea is to divide the interval into two disjoint sub-intervals $U_1 = [a_1, a_i]$ and $U_2 = [a_i, a_n]$
where $a_i$ is the eigenvalue closest to $\frac{a_1 + a_n}{2}$, evaluated with the \textit{inverse power} method.
If $a_i = a_1$ or $a_i = a_n$, then all the eigenvalues have been found inside the interval $U$,
otherwise the algorithm recurses inside $U_1$ and $U_2$ until all eigenvalues are found.
\noindent
\begin{figure}[h!]
\hspace{-0.5cm}
\begin{subfigure}{0.35\textwidth}
\includegraphics[width=\textwidth,keepaspectratio]{strategy.pdf}
\caption{Scheme of the \textit{interval bisection} strategy}
\label{string_strategy}
\end{subfigure}
\hspace{0.5cm}
\begin{subfigure}{0.65\textwidth}
\resizebox{\textwidth}{!}{
\input{graphs/eigs.tex}
}
\caption{Eigenvectors corresponding to the first five eigenvalues}
\label{eigs_string}
\end{subfigure}
\end{figure}
Now taking $U = I_k$ for each $k$ iteration step, the eigenvalues are garanteed to be found in exponentially growing
intervals. This approach increases the computation efficiency in most cases, but it can perform a lot
of useless operations if the last requested eigenvalues are situated in an extremely large interval,
inducing an averange case time complexity of $\mathcal{O}(2^{\log(n)}) = \mathcal{O}(n)$, where $n$ is the number
of requested eigenvalues.
The figure (\ref{string_strategy}) graphically shows how the strategy is applied and the code listing (\ref{eig_first}) contains a matlab
implementation.
The graph in figure (\ref{eigs_string}) shows the resulting eigenvectors with their
correspective eigenvalues. Notice that they don't differ from the expected
eigenvalues, which demonstrate that the \textit{inverse power} method is enough reliable.
\end{homeworkSection}
\begin{homeworkSection}{(3) Jacobi method implementation}
\noindent
\begin{minipage}{\textwidth}
\begin{minipage}{0.45\textwidth}
As presented in previous section (Jacobi method), this algorithm diagonalises
a symmetric matrix $A$ applying a series of rotations.
The most important step of the algorithm is the determination of the rotation angle cosine and sine
$\cos(\theta)$ and $\sin(\theta)$ respectively starting from given $p$ and $q$.
The better those values are determined, then the faster is the convergence of $A_k$
to a diagonal matrix.
The code in the listing (\ref{jacobi_from_pq}) shows exactly how those
values are determined, maintaining the idea that each rotation should
reduce the target $A_{pq}$ to zero.
Furthermore the algorithm exits when all the off-diagonal terms are reduces to zero
or rather this is done by evaluating the euclidean squared norm of the off-diagonal values.
\end{minipage}
\hspace{0.8cm}
\begin{minipage}{0.57\textwidth}
\matlabscript{jacobi_from_pq}{Determination of $\cos(\theta)$ and $\sin(\theta)$}
\end{minipage}
\end{minipage}
\subsubsection{(3.1) + (3.2) Comparing classic and cyclic jacobi methods}
\noindent
\begin{minipage}{\textwidth}
\begin{minipage}{0.45\textwidth}
There are two possible approaches in order to determine the $p$ and $q$ coefficients:
the \textit{classic Jacobi} method shown in listing (\ref{jacobi_classic}) takes $p$ and $q$ as the indexes corresponding to the maximum
off-diagonal term, while \textit{cyclic Jacobi} method loops directly over all the indexes of the
lower triangular side of $A$ as shown in script (\ref{jacobi_cyclic}).
Although both algorithm share the same time complexity $\mathcal{O}(N^3)$, where $N$ is the size of $A$,
the \textit{cyclic Jacobi} method performs a constant number of \texttt{while} iterations for any value of $N$.
The graphs in figures (\ref{jacobi_time}) and (\ref{jacobi_count}) show exactly this result, demonstrating
that the \textit{cyclic} approach is better than the \textit{classic} one.
\end{minipage}
\hspace{0.8cm}
\begin{minipage}{0.6\textwidth}
\matlabscript{jacobi_classic}{Implementation of the classic Jacobi method}
\end{minipage}
\end{minipage}
\matlabscript{jacobi_cyclic}{Implementation of the cyclic Jacobi method}
\noindent
\begin{figure}[h!]
\hspace{-0.5cm}
\begin{subfigure}{0.5\textwidth}
\resizebox{\textwidth}{!}{
\input{graphs/jacobi_time.tex}
}
\caption{Elapsed time of both algorithm as function of the matrix size $N$}
\label{jacobi_time}
\end{subfigure}
\hspace{0.5cm}
\begin{subfigure}{0.5\textwidth}
\resizebox{\textwidth}{!}{
\input{graphs/jacobi_count.tex}
}
\caption{Iteration count of both algorithm as function of the matrix size}
\label{jacobi_count}
\end{subfigure}
\end{figure}
\subsubsection{(3.3) Applying the axis aligned rotation matrix $J$}
The \texttt{transition} function is supposed to preform the matrix multiplications
$A_{k+1} = J^T A_k J$ and $P_{k+1} = P_k J$. However, the standard matrix multiplication
is unnecessarily expensive and given that the $J$ application differs from the identity just for
few symmetric terms, then the computation time can be drammaticly reduced.
\par
Let $B = J^T A$ and noting $a_{ij}$ and $b_{ij}$ the $i,j$-th components of $A$ and $B$ respectively, then:
\begin{equation} \label{aarm_opt_col}
\forall j: \quad b_{ij} =
\begin{cases}
i = p:& \cos(\theta) \; a_{pj} + \sin(\theta) \; a_{qj} \\
i = q:& - \sin(\theta) \; a_{pj} + \cos(\theta) \; a_{qj} \\
\text{otherwise}:& a_{ij}
\end{cases}
\end{equation}
The same reasonning can be done for the multiplication $B J$ noticing that
$B J = (J^T B^T)^T = (J^T (b_{ji}))^T$, which means that if
$a'_{ij} = (B J)_{ij}$, the entire transformation becomes:
\begin{equation} \label{aarm_opt_row}
\forall i: \quad a'_{ij} =
\begin{cases}
j = p:& \cos(\theta) \; b_{ip} + \sin(\theta) \; b_{iq} \\
j = q:& - \sin(\theta) \; b_{ip} + \cos(\theta) \; b_{iq} \\
\text{otherwise}:& b_{ij}
\end{cases}
\end{equation}
The same can be applied at the same time for the evolution of the matrix $P$ (see equation (\ref{jacobi_step})).
The code listing (\ref{jacobi_transition}) contains a matlab implementation of the transformation of $A$ and $P$;
notice that this approach reduces exactly the time complexity to $\mathcal{O}(N)$, avoiding the $\mathcal{O}(N^3)$
standard matrix product.
\matlabscript{jacobi_transition}{Transition optimized code}
\end{homeworkSection}
\begin{homeworkSection}{(4) Landau levels in a square-lattice model}
\subsubsection{(4.1) Formalization of the problem}
The time independent schöedinger equation under the influence of a magnetic
field described by the vector potential $\vec{A}(x,y)$ is written as follow \cite{schroedinger}:
\begin{equation} \label{schroedinger_eq}
\mathcal{\hat{H}} \lvert \psi\rangle = \frac{(i \hbar \vec{\nabla} - \frac{q\vec{A}(x,y)}{c})^2}{2m} \lvert \psi\rangle = E \lvert \psi\rangle
\end{equation}
Where $\hbar$ is the plank constant \cite{plank}, $m$ the particle mass,
$q$ the particle charge and $c$ the speed of light in vacuum \cite{lightspeed}.
For simplificity, all the constants will be negletted, or rather set $\hbar = 1$, $m = \frac{1}{2}$, $q = c$.
Applying then the \textit{tight binding approximation} as shown in the attached document \cite{exo89},
the equation (\ref{schroedinger_eq}) reduces to (\ref{landau_eq}) in the bi-dimensional case.
\begin{equation} \label{landau_eq}
\mathcal{\hat{H}} \lvert \psi\rangle = -\left(\frac{\delta_{\gamma}^2}{\delta x^2} + \frac{\delta_{\gamma}^2}{\delta y^2}\right) \lvert \psi\rangle = E \lvert \psi\rangle
\end{equation}
The differential operators $\frac{\delta_{\gamma}^2}{\delta x^2}$ and $\frac{\delta_{\gamma}^2}{\delta y^2}$
are equivalent to the partial second derivative except for a small correction in the off-diagonals values.
In order to better express those operators, let the space be discretized as $x_j = (j-1) \cdot a$ and $y_i = (i-1) \cdot a$
in a bi-dimensional square grid, where $1 \le i,j \le N$. Furthermore, let $k_{ij} = (N-1) \cdot i + j$ be the one-dimensional remapping
of the previously mentioned grid; notice that $k_{11} = 1$ is the lower bound and $k_{NN} = N^2$ the upper bound respectively.
Using the $ij$-coordinate grid it's possible to express the hamiltonian into finite differentiation, as it's done in the next equation:
\begin{equation} \label{landau_eq}
\mathcal{\hat{H}} \psi_{ij} = \frac{\gamma_D^{(ij)} \psi_{i-1,j} + \gamma_L^{(ij)} \psi_{i,j-1} + 4 \psi_{ij} + \gamma_R^{(ij)} \psi_{i,j+1} + \gamma_U^{(ij)} \psi_{i+1,j}}{a^2} = E \psi_{ij}
\end{equation}
The terms in the previous equation $\gamma_D^{(ij)}$, $\gamma_L^{(ij)}$, $\gamma_R^{(ij)}$, $\gamma_U^{(ij)}$ correspond respectively to
the down, left, right and up directional hopping integrals. For a constant z-directed magnetic field $\vec{B}$,
its corresponding vector potential $\vec{A} = (-\frac{By}{2}, \frac{Bx}{2})$, which leads to the expression of the
hopping integrals:
\begin{align}
\gamma_{\alpha} = - \exp\left(\int_{\alpha}\vec{A}(x,y)d\vec{r}\right), & & \alpha = D, \; L, \; R, \; U \label{hop_int} \\
\gamma_D^{(ij)} = - \exp\left(-i \frac{B x_j a}{2}\right), & & \gamma_U^{(ij)} = - \exp\left(i \frac{B x_j a}{2}\right) \label{hop_int_DU} \\
\gamma_L^{(ij)} = - \exp\left(i \frac{B y_i a}{2}\right), & & \gamma_R^{(ij)} = - \exp\left(-i \frac{B y_i a}{2}\right) \label{hop_int_LR}
\end{align}
The equations (\ref{hop_int_DU}) and (\ref{hop_int_LR}) underline the self-adjoint nature of the hamiltonian, because
$\gamma_D^{(ij)} = \bar{\gamma}_U^{(ij)}$ and $\gamma_L^{(ij)} = \bar{\gamma}_R^{(ij)}$, this means that the applied approximation
of the momentum term in equation (\ref{schroedinger_eq}) is consistent.
Thus, by the spectral theorem \cite{spectral_thm} all the eigenvalues are real and furthermore,
these values are also supposed to be positive because $\abs*{\gamma_{\alpha}} \le 1$ so $\mathcal{\hat{H}}$ is
minored by the opposite laplace operator $-\vec{\nabla}^2$, which is positive definite.
In fact, the opposite laplace operator corresponds to the limit of $B = 0$.
At this point, the equation (\ref{landau_eq}) can be re-expressed in the $k$-coordinates in order to form the hamiltonian matrix:
\begin{equation} \label{hamiltonian}
\forall \; 1 \le i,j \le N, \forall \; 1 \le k \le N^2: \quad
a^2 \cdot \mathcal{\hat{H}}_{k_{ij},l} =
\begin{cases}
l = k_{i-1,j}\text{ and }i > 1:& \gamma_D^{(ij)} \\
l = k_{i,j-1}\text{ and }j > 1:& \gamma_L^{(ij)} \\
l = k_{ij}:& 4 \\
l = k_{i,j+1}\text{ and }j < N:& \gamma_R^{(ij)} \\
l = k_{i+1,j}\text{ and }i < N:& \gamma_U^{(ij)} \\
\text{otherwise}:& 0
\end{cases}
\end{equation}
\subsubsection{(4.2) Boundary conditions: finite-size and periodic models}
\noindent
\begin{minipage}{\textwidth}
\begin{minipage}{0.7\textwidth}
The expression presented in equation (\ref{hamiltonian}) by construction
already meets the \textit{finite-size} model boundary conditions: this is due to
the fact that the outer boundary terms are all discarded or implicitly set to $0$.
\par
On the other hand, the \textit{periodic} model needs a small correction on some of the hamiltonian
terms. In particular, the boundary terms are connected to the opposite terms spacially mapped over the grid
and the outer bound positions become well defined, as they loop towards the outer direction (see figure (\ref{period_fig})).
% TODO make that figure
In practice, the addictional terms with respect to the equation (\ref{hamiltonian}) are:
\end{minipage}
\hspace{0.8cm}
\begin{minipage}{0.3\textwidth}
\includegraphics[width=\textwidth,keepaspectratio]{periodic.pdf}
\captionof{figure}{Periodic boundary}
\label{period_fig}
\end{minipage}
\vspace{0.5cm}
\end{minipage}
\begin{equation} \label{periodic}
\forall \; 1 \le i \le N:
\begin{cases}
a^2 \cdot \mathcal{\hat{H}}_{k_{i,1},k_{i,N}} &= \gamma_L^{(i1)} \\
a^2 \cdot \mathcal{\hat{H}}_{k_{i,N},k_{i,1}} &= \gamma_R^{(iN)}
\end{cases}
\quad \quad
\forall \;1 \le j \le N:
\begin{cases}
a^2 \cdot \mathcal{\hat{H}}_{k_{1,j},k_{N,j}} &= \gamma_D^{(1j)} \\
a^2 \cdot \mathcal{\hat{H}}_{k_{N,j},k_{1,j}} &= \gamma_U^{(Nj)}
\end{cases}
\end{equation}
Addictionally, the hopping integrals must be periodic too, this means that given
a value of $B$ and $N$, not all values of $a$ are admitted, but only the ones
such that:
\begin{equation} \label{periodic_hop}
\exp\left(\frac{iBNa^2}{2}\right) = 1 \quad \implies \quad
a = \sqrt{\frac{4\pi}{BN}}
\end{equation}
\subsubsection{(4.3) Energy spectra, $B = 0$}
This particular case can be analytically solved.
The equation (\ref{schroedinger_eq}), having $B = 0$, reduces itself to:
\begin{equation} \label{schroedinger_free}
- \vec{\nabla}^2 \psi(x,y) = E \psi(x,y) \iff
- \frac{\delta^2 \psi}{\delta x^2} - \frac{\delta^2 \psi}{\delta y^2} = E \psi(x,y)
\end{equation}
Applying a variable separation $\psi(x,y) = f(x) \cdot g(y)$, an addictional floating real constant $\lambda$ appears
in the system and the equation (\ref{schroedinger_free}) becomes:
\begin{equation} \label{free_solution}
\begin{cases}
f''(x) + \lambda f(x) &= 0 \\
g''(y) + (E - \lambda) g(y) &= 0
\end{cases}
\end{equation}
If the boundary conditions matches the one specified for the \textit{finite-size}
model, then $\lambda$ is a real value and the energy levels
depend in two positive integers $n$ and $m$ can be expressed as:
\begin{equation} \label{finite_size_energy}
E_{nm} = \frac{\pi^2}{L^2}(n^2 + m^2), \quad n,m \in \mathbb{N}^*
\end{equation}
The previous equation (\ref{finite_size_energy}) shows that the energy spectra
are discretized, however the graph in figure (\ref{dirich_B0_spectrum})
seems to demonstrate a continous behaviour of that spectra.
On the other hand, setting $L = 1$ the behaviour of the smallest eigenvalue is correct:
using the relation (\ref{finite_size_energy}), the smallest value should be given by $E_{11} = \pi^2$
and computationally the value found for $N = 25$ and $N = 50$ are respectively $19.715$ and $19.733$, which
suggests that the solution converges to analytic one. Notice in fact, that the second term differs from the expected at the precision
of $10^-3$. This means that the spectra in figure (\ref{dirich_B0_spectrum}) assumes
the discretized values given by equation (\ref{finite_size_energy}) but the evaluation range is so high that
the energies appear to be continously distributed.
In all the plots, the eigenfunction is represented by its complex squared module or rather the
probability distribution of the particle position.
\begin{figure}[h!]
\hspace{-0.5cm}
\begin{subfigure}{0.55\textwidth}
\resizebox{\textwidth}{!}{
\input{graphs/landau_dirichlet_B0_N25_fig.tex}
}
\caption{Lowest energy state plot}
\label{dirich_B0_plot}
\end{subfigure}
\hspace{0.5cm}
\begin{subfigure}{0.55\textwidth}
\resizebox{\textwidth}{!}{
\input{graphs/landau_dirichlet_B0_N25_spectra.tex}
}
\caption{Energy spectra}
\label{dirich_B0_spectrum}
\end{subfigure}
\caption{Finite-size model, $B = 0$, $N = 25$}
\end{figure}
The \textit{periodic} model, differently from the previous case,
allows a periodic solution in both $x$ and $y$ directions and
not being a \textit{bound state}, the spectrum is supposed to be continous,
thus, the minimal allowed value is $E = 0$. That's in fact the result shown
in figure (\ref{dirich_B0_plot}): the uniform vector (the vector composed only by $1$)
belongs to the kernel of the hamiltonian matrix, then its correspective eigenvalue is $0$.
\begin{figure}[h!]
\hspace{-0.5cm}
\begin{subfigure}{0.55\textwidth}
\resizebox{\textwidth}{!}{
\input{graphs/landau_periodic_B0_N25_fig.tex}
}
\caption{Lowest energy state plot}
\label{dirich_B0_plot}
\end{subfigure}
\hspace{0.5cm}
\begin{subfigure}{0.55\textwidth}
\resizebox{\textwidth}{!}{
\input{graphs/landau_periodic_B0_N25_spectra.tex}
}
\caption{Energy spectra}
\label{dirich_B0_spectrum}
\end{subfigure}
\caption{Periodic model, $B = 0$, $N = 25$}
\end{figure}
%Notice that physically a constant wave function is equivalent to a trivial solution, because of the symmetries of
%the \textit{Schrödinger} equation and because such a wave has a null energy, thus it describes the vacuum state.
\subsubsection{(4.4) Landau levels, periodic $B > 0$}
\noindent
\begin{minipage}{\textwidth}
\begin{minipage}{0.45\textwidth}
When a small magnetic field is applied, such as $B = 0.05$ \si{T},
then the state becomes periodic within the prefixed boundary.
In this case the \textit{Landau levels} appear in the energy spectra as
discretized steps as it's shown in the graph in figure (\ref{landau_energies}).
In the spectra, there is also a range of energy where the augmentation
is quasi continous, specisely between $E = 0.727$ \si{J} and $E = 0.865$ \si{J}.
Taking the wave function for $E = 0.8$ \si{J} plot shown in figure (\ref{landau_transition}),
it can be noticed that the distribution of the peaks (yellow regions) is chaotic compared to
the ones situated in low or high energies.
Finally, the plot in figure (\ref{landau_stable}) shows a particular case of lower
energy, where the particle is localized in a circular region and its neighbourhood assumes a dune-like pattern.
The plot in figure (\ref{landau_lowest}) shows how it's distributed the particle
in its lowest level $E = 0.04844$ \si{J}.
\end{minipage}
\hspace{0.8cm}
\begin{minipage}{0.6\textwidth}
\resizebox{\textwidth}{!}{
\input{graphs/landau_periodic_B0.05_N50_spectra.tex}
}
\captionof{figure}{Energy spectra for $B = 0.05$ \si{T}, $N = 50$, Periodic model}
\label{landau_energies}
\end{minipage}
\vspace{0.4cm}
\end{minipage}
The distribution of probability is clearly periodic in half of the domain size and
a circular tail caused by the magnetic field can be seen following the localized peak.
In this case $N$ was set to $50$ in order to better visualize the patterns.
\begin{figure}[h!]
\hspace{-0.5cm}
\begin{subfigure}{0.55\textwidth}
\resizebox{\textwidth}{!}{
\input{graphs/landau_transition.tex}
}
\caption{$E = 0.8$ \si{J}, transition energies}
\label{landau_transition}
\end{subfigure}
\hspace{0.5cm}
\begin{subfigure}{0.55\textwidth}
\resizebox{\textwidth}{!}{
\input{graphs/landau_stable.tex}
}
\caption{$E = 0.3121$ \si{J}, stable energy}
\label{landau_stable}
\end{subfigure}
\vspace{0.4cm}
\hspace{0.15\textwidth}
\begin{subfigure}{0.65\textwidth}
\resizebox{\textwidth}{!}{
\input{graphs/landau_lowest.tex}
}
\caption{$E = 0.04844$ \si{J}, lowest energy, degeneracy $100$}
\label{landau_lowest}
\end{subfigure}
\caption{Periodic model, $B = 0.05$ \si{T}, $N = 50$}
\end{figure}
\subsubsection{(4.5) Hall edge states, finite-size $B > 0$}
\noindent
\begin{minipage}{\textwidth}
\begin{minipage}{0.45\textwidth}
Limiting now the system to the \textit{finite-size} model, the
eigenfunction boundary is constraint to be null.
However, there exist some particular energy levels where the state
is peaked also around the boundary. These extreme states are defined as the
\textit{Hall edges}. As the graph in figure (\ref{hall_energies}) demonstrates,
the energy levels are still the \textit{Landau levels}, the same which are
described in figure (\ref{landau_energies}).
The difference between the twos is that the in the \textit{finite-size} model
the energy gaps are continously reliated; the \textit{Hall edges} are precisely situated in these transitions.
The figure (\ref{hall_edge}) underlines this behaviour, as a reflection of the wave on the boundary, because
such constraint can be interpretated as an infinitely high potential barrier.
\end{minipage}
\hspace{0.8cm}
\begin{minipage}{0.6\textwidth}
\resizebox{\textwidth}{!}{
\input{graphs/landau_dirichlet_B0.05_N50_spectra.tex}
}
\captionof{figure}{Energy spectra for $B = 0.05$ \si{T}, $N = 50$, Finite-size model}
\label{hall_energies}
\end{minipage}
\end{minipage}
\begin{figure}[h!]
\hspace{-1cm}
\begin{subfigure}{0.6\textwidth}
\resizebox{\textwidth}{!}{
\input{graphs/hall_edge_187.tex}
}
\caption{$E = 0.161$ \si{J}}
\label{hall_edge_low}
\end{subfigure}
\hspace{0.2cm}
\begin{subfigure}{0.6\textwidth}
\resizebox{\textwidth}{!}{
\input{graphs/hall_edge_679.tex}
}
\caption{$E = 0.537$ \si{J}}
\label{hall_edge_high}
\end{subfigure}
\caption{Finite-size model, $B = 0.05$ \si{T}, $N = 50$}
\label{hall_edge}
\end{figure}
\end{homeworkSection}
\end{homeworkProblem}
\section{Conclusion}
The proposed methods revealed a great efficacy
on computing a solution in the case of a time independent quantum system.
However, the efficiency of the \textit{Jacobi} method or the \textit{LU decomposition}
is at least in the order of $\mathcal{O}(N^3)$, which
is very slow if the linear system that must be solved has a very large size.
On the other hand, nowdays these methods are the best known for linear problem solving,
unless there is a specific symmetry that can be used instead, thus they
are the best current solution.
\newpage
\section{Appendix: matlab codes}
\matlabscript{solve}{Algorithm which solves a system}
\matlabscript{eig_power}{Power iteration method implementation}
\matlabscript{eig_ipower}{Inverse power iteration method implementation}
\matlabscript{eig_rq}{Rayleigh quotient iteration method implementation}
\matlabscript{eig_first}{Interval bisection strategy implementation}
\end{spacing}
\def\refname{D\MakeLowercase{ocumentation and sources}}
\begin{thebibliography}{99}
\bibitem{exo67}
Computational Physics III (PHYS-332) 2019–2020
O. Yazyev, K. Cernevics, Y. Guan, J. Colbois, J. Felisaz
Exercise sessions 6–7: LU decomposition and systems of linear equations.
\bibitem{exo89}
Computational Physics III (PHYS-332) 2019–2020
O. Yazyev, K. Cernevics, Y. Guan, J. Colbois, J. Felisaz
Exercise sessions 8–9: Diagonalization algorithms and eigenvalue problems.
\bibitem{spectral_thm}
\url{https://en.wikipedia.org/wiki/Spectral_theorem}
\bibitem{matrix_equiv}
\url{https://en.wikipedia.org/wiki/Matrix_equivalence}
\bibitem{SI_units}
\url{https://en.wikipedia.org/wiki/SI_base_unit}
\bibitem{exists_LU}
\url{https://math.stackexchange.com/questions/1274373/proof-for-existence-of-lu-decomposition}
\bibitem{inner_product}
\url{https://en.wikipedia.org/wiki/Inner_product_space}
\bibitem{schroedinger}
\url{http://galileo.phys.virginia.edu/classes/752.mf1i.spring03/ParticleMagneticField.htm}
\bibitem{plank}
\url{https://en.wikipedia.org/wiki/Planck_constant}
\bibitem{lightspeed}
\url{https://en.wikipedia.org/wiki/Speed_of_light}
\end{thebibliography}
\end{document}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

Event Timeline