Page MenuHomec4science

3_adaptive.tex
No OneTemporary

File Metadata

Created
Thu, Mar 13, 02:29

3_adaptive.tex

\documentclass[aspectratio=169]{beamer}
%\documentclass[aspectratio=169,handout]{beamer}
\def\stylepath{../styles}
\usepackage{\stylepath/com303}
\usepackage{pst-3dplot,pstricks-add,ifthen}
\begin{document}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% steepestdescent{a}{b}{x0}{y0}{step}{iterations}{view}{algo}:
% a, b: ellipsoid axes
% x0, y0, step, iterations: gradient descent initial position, learning step and iterations
% view: 3D or 2D (top view)
% algo: LS (least squares) or LMS (stochastic gradient descent)
%
\def\steepestdescent#1#2#3#4#5#6#7#8{%
% cartesian elliptic paraboloid z = f(x, y); expects to find [x y] on the stack
% add an offset to lift the paraboloid off the origin (it's an error surface)
\def\ellparC{#2 div dup mul exch #1 div dup mul add 0.2 add}
% parametric form (x, y, z) = f(theta, rho); expects [theta rho] on the stack
\def\ellparP{/r exch def dup cos #1 r mul mul exch sin #2 r mul mul r r mul 0.2 add}
% x and y updates in steepest descent: x -= stepsize * dz/dx
\ifthenelse{\equal{#8}{LMS}}{\def\rndp{rand 2147483647 div 0.8 sub 0.3 mul}}{\def\rndp{0}}
\def\gradx{ pop dup -2 #5 mul mul #1 dup mul div add \rndp\space add}
\def\grady{exch pop dup -2 #5 mul mul #2 dup mul div add \rndp\space add}
%
% center plot in psbox according to view
\ifthenelse{\equal{#7}{2D}}{\def\la{-2.6}\def\lb{3}}{\def\la{-1}\def\lb{5}}
\begin{pspicture}(-3,\la)(3,\lb)
\ifthenelse{\equal{#7}{2D}}{%
\psset{Beta=90}\psset{Alpha=180}}{
\psset{Beta=30}\psset{Alpha=150}}
\pstThreeDCoor[linecolor=darkgray,%
xMin=-2.5,xMax=2.5,nameX={$h_1$},
yMin=-2.5,yMax=2.5,nameY={$h_0$},
zMin=0,zMax=3,nameZ={}]
% plot the level sets
\parametricplotThreeD[xPlotpoints=180,yPlotpoints=20,linecolor=red,linewidth=0.5pt](0,360)(0, 2){%
t u \ellparP}
\parametricplotThreeD[xPlotpoints=180,yPlotpoints=17,linecolor=red,linewidth=0.5pt](0, 2)(0,360){%
u t \ellparP}
% gradient descent
\pstVerb{/xx #3 def /yy #4 def /xn xx yy \gradx\space def /yn xx yy \grady\space def}
\multido{\nA=0+1}{#6}{%
\pstThreeDLine[linecolor=blue,arrows=->](xx, yy, xx yy \ellparC)(xn, yn, xn yn \ellparC\space)%
\pstVerb{/xx xn def /yy yn def /xn xx yy \gradx\space def /yn xx yy \grady\space def}}
\end{pspicture}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame} \frametitle{Adaptive signal processing}
\centering
\begin{dspBlocks}{2}{1}
$x[n]~$ & \BDfilter{$H(z)$} & \BDsplit & \\
& & \BDsub & $~~~d[n]$ %
\psset{arrows=->}
\ncline{1,1}{1,2}\ncline{-}{1,2}{1,3}\naput{$\hat{d}[n]$}\ncline{1,3}{1,4}
\ncline{1,3}{2,3}
\ncline{2,4}{2,3}
\ncangle[angleA=180,angleB=-90,linewidth=\BDwidth]{2,3}{1,2}\naput{$e[n]$}
\end{dspBlocks}
\vspace{1em}
\begin{itemize}
\item $x[n]$: non-deterministic (unknown) input
\item $H(z)$ adaptive filter
\item $\hat{d}[n] = (x \ast h)[n]$: filter's output
\item $d[n]$: desired (target) output
\item $e[n] = d[n] - \hat{d}[n]$: error signal
\end{itemize}
\end{frame}
\begin{frame} \frametitle{Adaptive filters}
\centering
how can we find the filter's coefficients?
\vspace{2em}
\begin{itemize}
\item adapt the coefficients to minimize a \textit{cost function}
\item cost function depends on error signal $e[n]$
\item it's not realistic to require $e[n] = 0$ for all $n$...
\item ... but we can try to minimize the power of the error!
\end{itemize}
\end{frame}
\begin{frame} \frametitle{Mean Squared Error (MSE)}
\centering
over a finite-length analysis window:
\[
P_{e,N} = \hlBox{a}{green!30}{\displaystyle\frac{1}{2N+1}\sum_{n=-N}^{N}}\hlBox[\displaystyle\sum_{n=-N}^{N}]{b}{red!30}{e^2[n]}
\]
\vspace{2em}
\hspace{2em}\rnode{A}{\psframebox[linestyle=none]{mean}}
\hspace{2em}\rnode{B}{\psframebox[linestyle=none]{squared error}}
\nccurve[linecolor=darkred,angleA=90,angleB=-90]{->}{A}{a}
\nccurve[linecolor=darkred,angleA=90,angleB=-90]{->}{B}{b}
\vspace{1em}
\begin{itemize}
\item $P_{e,N}$ is the power of the error over $[-N, \ldots, N]$
\item in the limit, minimize the expected power of the error signal:
\[
\lim_{N\rightarrow\infty}P_{e,N} = P_e = \expt{|e[n]|^2}
\]
\end{itemize}
\end{frame}
\begin{frame} \frametitle{Optimal adaptive filter}
\centering
optimal filter $H(z)$ \textit{minimizes} the Mean Square Error
\[
H(z) = \underset{H(z)}{\operatorname{arg}\,\operatorname{min}}\;\{\expt{|e[n]|^2}\}
\]
\vspace{2em}
Advantages of a squared error measure:
\begin{itemize}
\item minimum always exist
\item error easily differentiable
\item output will be orthogonal to error
\item only need second moments!
\end{itemize}
\end{frame}
\begin{frame} \frametitle{Just FIR adaptive filters for us}
\centering
Will only consider FIR adaptive filters:
\[
\hat{d}[n] = \sum_{k=0}^{M-1}h[k]x[n-k]
\]
\end{frame}
\begin{frame} \frametitle{Finding the minimum squared error}
Two cases:
\begin{itemize}
\item for WSS signals, one-shot solution: Optimal Least Squares
\item for ``almost'' WSS signals, iterative solutions: stochastic gradient descent or LMS
\end{itemize}
\end{frame}
\begin{frame} \frametitle{Optimal Least Squares}
\centering
\[
e[n] = d[n] - \sum_{k=0}^{M-1}h[k]x[n-k]
\]
\pause
\vspace{1em}
Minimum is found by setting all partial derivatives to zero
\begin{align*}
\frac{\partial \expt{e^2[n]}}{\partial h[i]} &= 2\expt{e[n] \, \frac{\partial e[n]}{\partial h[i]}} \\ \pause
&= -2\expt{e[n] \, x[n-i]} = 0
\end{align*}
\end{frame}
\begin{frame} \frametitle{Orthogonality principle}
\centering
\[
\expt{e[n] \, x[n-i]} = 0
\]
\vspace{1em}
error is orthogonal to all input values we used: \\
all useful information has been extracted!
\end{frame}
\begin{frame} \frametitle{Optimal Least Squares}
\begin{align*}
e[n] &= d[n] - \sum_{k=0}^{M-1}h[k]x[n-k] \\[1em] \pause
\frac{1}{2}\,\frac{\partial \expt{e^2[n]}}{\partial h[i]} &= -\expt{e[n] \, x[n-i]} \\ \pause
&= \expt{\displaystyle \sum_{k=0}^{M-1}h[k]x[n-k]x[n-i]} - \expt{d[n]x[n-i]} \\ \pause
&= \sum_{k=0}^{M-1}h[k]r_x[i-k] - r_{dx}[i] \qquad \mbox{\it (WSS signals)}
\end{align*}
\end{frame}
\begin{frame} \frametitle{Optimal Least Squares}
\centering
setting all partial derivatives to zero:
\[
\sum_{k=0}^{M-1}h[k]r_x[i-k] = r_{dx}[i]
\]
\vspace{2em}
in matrix form:
\[
\mathbf{Rh = g}
\]
\end{frame}
\begin{frame} \frametitle{Optimal Least Squares solution in matrix form}
\begin{align*}
\mathbf{h} &= \mathbf{R}^{-1}\mathbf{g} \\ \\
\mathbf{h} &= \begin{bmatrix}
h[0] & h[1] & h[2] & \ldots & h[M-1]
\end{bmatrix}^T \\
\mathbf{R} &= \begin{bmatrix}
r_x[0] & r_x[1] & r_x[2] & \ldots & r_x[M-1] \\
r_x[1] & r_x[0] & r_x[1] & \ldots & r_x[M-2] \\
r_x[2] & r_x[1] & r_x[0] & \ldots & r_x[M-3] \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
r_x[M-1] & r_x[M-2]& \ldots & \ldots & r_x[0]
\end{bmatrix} \\
\mathbf{g} &= \begin{bmatrix}
r_{dx}[0] & r_{dx}[1] & r_{dx}[2] & \ldots & r_{dx}[M-1]
\end{bmatrix}^T
\end{align*}
\end{frame}
\begin{frame} \frametitle{Intuition}
\begin{itemize}
\item the optimal MSE filter depends only on auto- and cross-correlations
\item correlations express power spectral distributions
\item PSDs are ``invariant'' descriptors, robust to changes in signal shape
\item we don't need to know the signals exactly since we implicitly use their PSD only
\end{itemize}
\end{frame}
\begin{frame} \frametitle{Let's look at the MSE again (I)}
\begin{align*}
e[n] &= d[n] - \hat{d}[n] \\
\hat{d}[n] &= \sum_{m=0}^{M-1}h[m] x[n-m] \\
&= \begin{bmatrix}
h[0] & h[1] & h[2] & \ldots & h[M-1]
\end{bmatrix}
\begin{bmatrix}
x[n] \\ x[n-1] \\ x[n-2] \\ \vdots \\ x[n-M+1]
\end{bmatrix} \\
&= \mathbf{h}^T\mathbf{x}_n
\end{align*}
\end{frame}
\begin{frame} \frametitle{Let's look at the MSE again (II)}
\begin{align*}
e^2[n] &= (d[n] - \hat{d}[n])^2 \\
&= d^2[n] - 2d[n] \mathbf{h}^T\mathbf{x}_n + (\mathbf{h}^T\mathbf{x}_n)^2 \\
&= d^2[n] - 2 \mathbf{h}^T (d[n]\mathbf{x}_n) + \mathbf{h}^T\mathbf{x}_n\mathbf{x}^T_n \mathbf{h}\\
&= d^2[n] - 2 \mathbf{h}^T \mathbf{g}_n + \mathbf{h}^T\mathbf{X}_n\mathbf{h}
\end{align*}
\footnotesize
\begin{align*}
\mathbf{g}_n &= \begin{bmatrix}
d[n]x[n] & d[n]x[n-1] & \ldots & d[n]x[n-M+1]
\end{bmatrix}^T\\
\mathbf{X}_n &= \begin{bmatrix*}[l]
x^2[n] & x[n]x[n-1] & x[n]x[n-2] & \ldots & x[n]x[n-M+1] \\
x[n-1]x[n] & x[n-1]x[n-1] & x[n-1]x[n-2] & \ldots & x[n-1]x[n-M+1] \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
x[n-M+1]x[n] & x[n-M+1]x[n-1] & \ldots & \ldots & x[n-M+1]x[n-M+1]
\end{bmatrix*}
\end{align*}
\end{frame}
\begin{frame} \frametitle{Let's look at the MSE again (III)}
\begin{align*}
P_{e}(\mathbf{h}) &= \expt{e^2[n]} \\
&= \expt{d^2[n]}+ \expt{\mathbf{h}^T \mathbf{X}_n \mathbf{h}} - 2 \expt{\mathbf{h}^T \mathbf{g}_n} \\
&= r_d[0] + \mathbf{h}^T \mathbf{R} \mathbf{h} - 2 \mathbf{h}^T \mathbf{g}
\end{align*}
\vspace{1em}
if we plug in the optimal solution $\mathbf{h}_{\text{opt}} = \mathbf{R}^{-1}\mathbf{g}$ we obtain the minimum achievable MSE:
\[
P_e(\mathbf{h}_{\text{opt}}) = r_d[0]
\]
\end{frame}
\begin{frame} \frametitle{Error surface}
\only<2>{\hspace{5em}\rnode[lc]{T1}{minimum achievable MSE}}
\only<3>{\hspace{2em}\rnode[lc]{T2}{quadratic form}}
\[
P_e(\mathbf{h}) = \only<2>{\hlBox{t1}{red!30}}{r_d[0]} + \only<3>{\hlBox{t2}{green!30}}{\mathbf{h}^T\mathbf{Rh} - 2\mathbf{h}^T\mathbf{g}}
\]
\only<2>{\nccurve[linecolor=green!30,angleA=0,angleB=105]{->}{T1}{t1}}
\only<3>{\nccurve[linecolor=green!30,angleA=0,angleB=105]{->}{T2}{t2}}
\only<4->{
\vspace{2em}
error surface is an elliptic paraboloid:
\begin{itemize}
\item axes inversely proportional to square roots of $\mathbf{R}$'s eigenvalues
\item input autocorrelation determines the shape of the error surface
\end{itemize}}
\end{frame}
\begin{frame} \frametitle{Error surface for $M=2$}
\begin{align*}
e[n] &= d[n] - (h[0]x[n] + h[1]x[n-1]) \\
&= d[n] - \begin{bmatrix}h[0] & h[1]\end{bmatrix} \begin{bmatrix}x[n] \\ x[n-1]\end{bmatrix} \\ \\
P_e(\mathbf{h}) &= r_d[0] + \mathbf{h}^T \begin{bmatrix}r_x[0] & r_x[1] \\ r_x[1] & r_x[0]\end{bmatrix} \mathbf{h} - 2\mathbf{h} \begin{bmatrix}r_{dx}[0] \\ r_{dx}[1]\end{bmatrix}
\end{align*}
\end{frame}
\begin{frame} \frametitle{Error surface for white noise input}
\centering
$r_x[k] = \delta[k], \qquad \mathbf{R} = \begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix}$
\psset{unit=10mm}
\begin{tabular}{cc}
{\steepestdescent{1}{1}{-1.3}{1.3}{0.1}{0}{3D}{LS}} &
{\steepestdescent{1}{1}{-1.3}{1.3}{0.1}{0}{2D}{LS}}
\end{tabular}
\end{frame}
\begin{frame} \frametitle{Error surface for correlated input}
\centering
$\mathbf{R} = \begin{bmatrix} 2 & 1 \\ 1 & 2 \end{bmatrix}$
\psset{unit=10mm}
\begin{tabular}{cc}
{\steepestdescent{1}{0.4}{-1.3}{1.3}{0.1}{0}{3D}{LS}} &
{\steepestdescent{1}{0.4}{-1.3}{1.3}{0.1}{0}{2D}{LS}}
\end{tabular}
\end{frame}
\begin{frame} \frametitle{So, how is it done in practice?}
\begin{itemize}
\item the optimal solution requires $M$ values of auto- and cross-correlations
\item in practice we can only compute sample correlations using the data we have:
\[
r_x[k] \leftarrow \hat{r}_x[k] = \frac{1}{N}\sum_{n=0}^{N-1-|k|}x^*[n+|k|]x[n]
\]
\item this requires that we first collect input data
\item we should have $N > 4M$
\item usually operate on successive data segments
\end{itemize}
\end{frame}
\begin{frame} \frametitle{Example: linear prediction coding of speech}
\centering
\includegraphics[height=70mm]{lpc.eps}
\end{frame}
\begin{frame} \frametitle{All-pole models}
\[
H(z) = \frac{1}{A(z)} = \frac{1}{1 - a_1 z^{-1} - a_2 z^{-2} - \ldots - a_M z^{-M}};
\]
\vspace{2em}
\begin{itemize}
\item poles model natural resonances of physical systems
\item model is also called autoregressive (output is purely recursive)
\end{itemize}
\end{frame}
\begin{frame} \frametitle{Estimating an all-pole model}
\centering
\begin{dspBlocks}{1}{1}
$e[n]~~$ & \BDfilter{$1/A(z)$} & $~~~x[n]$
\psset{arrows=->}
\ncline{1,1}{1,2}\ncline{1,2}{1,3}
\end{dspBlocks}
\vspace{2em}
\only<2>{
\begin{itemize}
\item $e[n]$: unknown excitation
\item $x[n]$: observable signal
\item can we determine $A(z)$?
\end{itemize}}
\end{frame}
\begin{frame} \frametitle{Linear Prediction}
\begin{align*}
X(z) &= E(z)/A(z) \\ \pause
E(z) &= X(z)A(z) \\ \\ \pause
e[n] &= x[n] - \sum_{k=1}^{M} a_k x[n-k]
\end{align*}
\end{frame}
\begin{frame} \frametitle{Remember the optimal Least Squares solution...}
\[
e[n] = d[n] - \sum_{k=0}^{M-1}h[k]x[n-k]
\]
\end{frame}
\begin{frame} \frametitle{Linear Prediction}
\[
e[n] = x[n] - \sum_{k=1}^{M} a_k x[n-k]
\]
\vspace{1em}
\begin{itemize}
\item we shouldn't be able to predict excitation $e[n]$
\item excitation and prediction should be orthogonal
\item Least Squares solution is \textit{the} solution
\end{itemize}
\end{frame}
\begin{frame} \frametitle{Linear Prediction}
by setting $\partial \expt{e^2[n]}/\partial a_i$ to zero...
\[
\mathbf{R\hat{a} = r}
\]
\[
\begin{bmatrix}
r_x[0] & r_x[1] & \ldots & r_x[M-1] \\
r_x[1] & r_x[0] & \ldots & r_x[M-2] \\
\vdots & \vdots & \ddots & \vdots \\
r_x[M-1] & r_x[M-2]& \ldots & r_x[0]
\end{bmatrix}
\begin{bmatrix}
\hat{a}_1 \\ \hat{a}_2 \\ \vdots \\ \hat{a}_M
\end{bmatrix} =
\begin{bmatrix}
r_x[1] \\ r_x[2] \\ \vdots \\ r_x[M]
\end{bmatrix}.
\]
\end{frame}
\begin{frame} \frametitle{LPC speech coding}
\begin{itemize}
\item segment speech in 20ms chunks (approx. stationary)
\item find the coefficients for an all-pole model
\item inverse filter and find the residual
\item classify the residual excitation as voiced/unvoiced
\end{itemize}
\end{frame}
\begin{frame} \frametitle{LPC order selection}
\centering
\begin{dspPlot}[xtype=freq,xticks=none,yticks=none]{0, 0.7}{0, 30}
\dspCustomTicks{0 $0$ 0.7 $0.7\pi$}
\dspFuncData[linewidth=0.5pt]{0.000 1.032 0.003 3.946 0.006 1.522 0.009 3.315 0.013 9.486 0.016 28.335 0.019 7.367 0.022 5.142 0.025 7.885 0.028 17.662 0.031 24.412 0.034 8.163 0.037 4.155 0.041 3.531 0.044 11.966 0.047 9.724 0.050 6.683 0.053 5.400 0.056 6.014 0.059 24.546 0.062 4.555 0.066 0.697 0.069 4.168 0.072 11.327 0.075 16.578 0.078 4.988 0.081 2.965 0.084 4.597 0.087 6.698 0.091 6.949 0.094 1.626 0.097 1.480 0.100 3.226 0.103 5.024 0.106 2.831 0.109 2.091 0.113 0.326 0.116 2.605 0.119 5.985 0.122 1.631 0.125 1.210 0.128 0.516 0.131 1.910 0.134 3.508 0.138 0.689 0.141 0.984 0.144 1.490 0.147 3.292 0.150 3.718 0.153 1.573 0.156 0.915 0.159 1.669 0.163 2.744 0.166 1.799 0.169 2.157 0.172 1.576 0.175 4.013 0.178 6.629 0.181 1.634 0.184 2.229 0.188 2.203 0.191 4.141 0.194 6.570 0.197 3.931 0.200 3.150 0.203 1.583 0.206 7.722 0.209 12.751 0.212 9.065 0.216 2.051 0.219 1.209 0.222 4.332 0.225 2.593 0.228 5.901 0.231 6.797 0.234 0.700 0.237 2.986 0.241 6.158 0.244 5.257 0.247 2.083 0.250 2.485 0.253 2.002 0.256 1.170 0.259 1.893 0.263 1.858 0.266 3.603 0.269 4.275 0.272 4.068 0.275 1.357 0.278 1.043 0.281 1.866 0.284 1.218 0.287 2.384 0.291 2.011 0.294 1.664 0.297 1.731 0.300 3.189 0.303 4.059 0.306 3.623 0.309 3.590 0.312 2.637 0.316 2.839 0.319 3.392 0.322 3.139 0.325 4.262 0.328 4.209 0.331 6.186 0.334 4.408 0.338 1.870 0.341 3.318 0.344 1.764 0.347 2.931 0.350 1.830 0.353 0.914 0.356 1.243 0.359 0.891 0.362 1.771 0.366 1.737 0.369 1.381 0.372 0.673 0.375 0.820 0.378 1.058 0.381 1.693 0.384 1.248 0.388 0.307 0.391 1.554 0.394 1.075 0.397 1.398 0.400 1.256 0.403 0.054 0.406 1.451 0.409 2.293 0.412 0.960 0.416 1.048 0.419 0.875 0.422 1.148 0.425 1.857 0.428 0.902 0.431 1.045 0.434 0.663 0.438 2.455 0.441 4.253 0.444 2.403 0.447 1.411 0.450 1.962 0.453 2.467 0.456 4.276 0.459 2.663 0.463 0.642 0.466 2.873 0.469 4.175 0.472 4.425 0.475 2.058 0.478 0.394 0.481 3.268 0.484 3.816 0.487 0.949 0.491 1.008 0.494 0.411 0.497 1.317 0.500 2.771 0.503 0.557 0.506 0.668 0.509 1.220 0.512 1.730 0.516 1.706 0.519 0.297 0.522 0.179 0.525 0.317 0.528 0.752 0.531 1.101 0.534 0.575 0.537 0.093 0.541 0.755 0.544 0.973 0.547 0.631 0.550 0.347 0.553 0.474 0.556 0.146 0.559 0.871 0.562 0.410 0.566 0.165 0.569 0.912 0.572 0.760 0.575 1.178 0.578 0.139 0.581 0.637 0.584 0.297 0.588 0.394 0.591 0.289 0.594 0.364 0.597 0.370 0.600 0.270 0.603 0.490 0.606 0.704 0.609 0.340 0.613 0.234 0.616 0.162 0.619 0.317 0.622 0.237 0.625 0.213 0.628 0.158 0.631 0.282 0.634 0.645 0.637 0.064 0.641 0.240 0.644 0.156 0.647 0.187 0.650 0.413 0.653 0.167 0.656 0.249 0.659 0.172 0.662 0.283 0.666 0.004 0.669 0.214 0.672 0.179 0.675 0.156 0.678 0.271 0.681 0.144 0.684 0.185 0.688 0.129 0.691 0.221 0.694 0.096 0.697 0.171 0.700 0.197}
\dspFunc[linecolor=dspColorTwo]{x \dspTFM{1}{1 -2.243 2.556 -1.809 0.429 0.545 -0.66 0.183 0.099 -0.113 0.066}}
\only<2->{\dspFunc[linecolor=dspColorThree]{x \dspTFM{1}{1 -2.179 2.414 -1.679 0.363 0.562 -0.705 0.196 0.187 -0.288 0.235 -0.058 -0.081 0.095 0.124 -0.136 0.068 -0.062 -0.031 0.043
-0.017}}}
\only<3->{\dspFunc[linecolor=dspColorFour]{x \dspTFM{1}{1 -2.172 2.4 -1.665 0.36 0.556 -0.692 0.174 0.189 -0.292 0.252 -0.067 -0.074 0.086 0.13 -0.12 0.063 -0.081 0.015 -0.031
0.087 -0.138 0.145 -0.141 0.032 0.067 -0.029 -0.059 0.123 -0.112 0.058}}}
\dspLegend(0.4, 25){dspColorTwo {LPC 10} dspColorThree {LPC 20} dspColorFour {LPC 30}}
\end{dspPlot}
\end{frame}
\begin{frame} \frametitle{LPC speech coding}
\begin{itemize}
\item normally $M=20$
\item average bitrate 4Kbit/sec (raw data: 48Kbit/sec)
\item many improvements exist: CELP \& Co
\end{itemize}
\vspace{2em}
\centering
\movie[inlinesound]{\fbox{\small{original}}}{speech2.wav}
\hspace{5em}
\movie[inlinesound]{\fbox{\small{LPC-coded}}}{lpc9.6.wav}
\end{frame}
\intertitle{gradient descent}
\begin{frame} \frametitle{Error minimization by gradient descent}
\begin{itemize}
\item MSE minimization leads to a quadratic cost function; in that case (and in that case only) the location of the minimum is available in closed form as $\mathbf{h} = \mathbf{R}^{-1}\mathbf{g}$
\item most machine learning problems involve more complicated loss functions
\item how can we try to find a minimum in these cases?
\end{itemize}
\centering
\vspace{2em}
iteratively, via gradient descent
\end{frame}
\begin{frame} \frametitle{Gradient descent}
problem setup:
\begin{itemize}
\item assume $f(\mathbf{t})$ is a differentiable multivariate function ($\mathbf{t} = \begin{bmatrix}t_0 & t_1 & \ldots & t_D\end{bmatrix}$)
\item its gradient in $\mathbf{t}$ is the vector:
\[
\nabla f(\mathbf{t}) = \begin{bmatrix}
\displaystyle \frac{\partial f(\mathbf{t})}{\partial t_0} &
\displaystyle \frac{\partial f(\mathbf{t})}{\partial t_1} & \ldots &
\displaystyle \frac{\partial f(\mathbf{t})}{\partial t_{D}}
\end{bmatrix}^T
\]
\end{itemize}
\vspace{1em}
to find a (local) minimum with the gradient descent algorithm:
\begin{itemize}
\item start with a guess $\mathbf{t}_{0}$ for the location of the minimum
\item iteratively update the guess by moving in the direction of steepest descent
\[
\mathbf{t}_{n+1} = \mathbf{t}_{n} - \alpha_n \, \nabla f(\mathbf{t}_{n})
\]
\item the learning factor $\alpha_n < 1$ is a ``brake'' to prevent overshoots
\end{itemize}
\end{frame}
\begin{frame} \frametitle{Gradient descent for nonconvex cost function}
\centering
\includegraphics[height=70mm]{errorsurf.eps}
\end{frame}
\begin{frame} \frametitle{Gradient descent for MSE}
\begin{itemize}
\item for a quadratic error surface, minimum is always global
\item gradient is easy to compute:
\begin{align*}
\nabla P_e(\mathbf{h}) &= \begin{bmatrix}
\displaystyle \frac{\partial \expt{e^2[n]}}{\partial h[0]} &
\displaystyle \frac{\partial \expt{e^2[n]}}{\partial h[1]} & \ldots &
\displaystyle \frac{\partial \expt{e^2[n]}}{\partial h[M-1]}
\end{bmatrix}^T \\
& = 2(\mathbf{Rh - g})
\end{align*}
\end{itemize}
\end{frame}
\begin{frame} \frametitle{Steepest descent for white input}
\centering
\psset{unit=10mm}
\begin{tabular}{cc}
{\steepestdescent{1}{1}{-1.3}{1.3}{0.1}{15}{3D}{LS}} &
{\steepestdescent{1}{1}{-1.3}{1.3}{0.1}{15}{2D}{LS}}
\end{tabular}
\end{frame}
\begin{frame} \frametitle{Steepest descent for white input}
\centering
\psset{unit=10mm}
\begin{tabular}{cc}
{\steepestdescent{1}{1}{2}{0.2}{0.1}{15}{3D}{LS}} &
{\steepestdescent{1}{1}{2}{0.2}{0.1}{15}{2D}{LS}}
\end{tabular}
\end{frame}
\begin{frame} \frametitle{Error surface for correlated input: good guess}
\centering
\psset{unit=10mm}
\begin{tabular}{cc}
{\steepestdescent{1}{0.4}{-2}{0}{0.1}{12}{3D}{LS}} &
{\steepestdescent{1}{0.4}{-2}{0}{0.1}{12}{2D}{LS}}
\end{tabular}
\end{frame}
\begin{frame} \frametitle{Error surface for correlated input: so-so guess}
\centering
\psset{unit=10mm}
\begin{tabular}{cc}
{\steepestdescent{1}{0.4}{-1.1}{0.5}{0.15}{15}{3D}{LS}} &
{\steepestdescent{1}{0.4}{-1.1}{0.5}{0.15}{15}{2D}{LS}}
\end{tabular}
\end{frame}
\begin{frame} \frametitle{Error surface for correlated input: learning factor too large!}
\centering
\psset{unit=10mm}
\begin{tabular}{cc}
{\steepestdescent{1}{0.4}{-1.1}{0.5}{0.17}{5}{3D}{LS}} &
{\steepestdescent{1}{0.4}{-1.1}{0.5}{0.17}{15}{2D}{LS}}
\end{tabular}
\end{frame}
\intertitle{stochastic gradient descent}
\begin{frame} \frametitle{Iterative minimization}
\begin{itemize}[<+->]
\item for WSS signals, one-shot and iterative are the same
\item for time-varying signals, we need to follow the changes: iterative solution
\item computation of time-varying correlations is costly
\item \textit{stochastic} gradient descent:
\[
\expt{e^2[n]} \, \leftarrow \, e^2[n]
\]
\end{itemize}
\end{frame}
\begin{frame} \frametitle{Stochastic gradient descent}
\[
\nabla P_e = \begin{bmatrix}
\displaystyle \frac{\partial \expt{e^2[n]}}{\partial h[0]} &
\displaystyle \frac{\partial \expt{e^2[n]}}{\partial h[1]} & \ldots &
\displaystyle \frac{\partial \expt{e^2[n]}}{\partial h[M-1]}
\end{bmatrix}^T
\]
\end{frame}
\begin{frame} \frametitle{Stochastic gradient descent}
\[
\nabla e^2[n] = \begin{bmatrix}
\displaystyle \frac{\partial e^2[n]}{\partial h[0]} &
\displaystyle \frac{\partial e^2[n]}{\partial h[1]} & \ldots &
\displaystyle \frac{\partial e^2[n]}{\partial h[M-1]}
\end{bmatrix}^T
\]
\end{frame}
\begin{frame} \frametitle{Stochastic gradient descent}
\[
e[n] = d[n] - \sum_{k=0}^{M-1}h[k]x[n-k]
\]
\pause
\[
\frac{\partial e^2[n]}{\partial h[i]} = -2e[n]\,x[n-i].
\]
\pause
\vspace{2em}
\[
\nabla e^2[n] = -2e[n]\, \mathbf{x_n}
\]
\[
\mathbf{x}_n = \begin{bmatrix}
x[n] & x[n-1] & x[n-2] & \ldots & x[n - M + 1]
\end{bmatrix}^T.
\]
\end{frame}
\begin{frame} \frametitle{The LMS adaptive filter}
\begin{itemize}
\item start with an initial guess for the filter coefficients:
\[
\mathbf{h}_0 = \begin{bmatrix}h_0[0] & h_0[1] & \ldots & h_0[M-1]\end{bmatrix}^T
\]
\item for each new input sample $x[n]$:
\begin{itemize}
\item collect $\mathbf{x}_n = \begin{bmatrix} x[n] & x[n-1] & x[n-2] & \ldots & x[n - M + 1] \end{bmatrix}^T$
\item compute the instantaneous error $e[n] = d[n] - \mathbf{h}_n^T\mathbf{x}_n$
\item update the filter coefficients using $\nabla e^2[n] = -2e[n]\mathbf{x}_n$
\end{itemize}
\end{itemize}
\vspace{1em}
\begin{align*}
e[n] &= d[n] - \mathbf{h}_n^T\mathbf{x}_n \\
\mathbf{h}_{n+1} &= \mathbf{h}_n + \alpha_n\, e[n] \, \mathbf{x}_n
\end{align*}
\end{frame}
\begin{frame} \frametitle{LMS for white input}
\centering
\psset{unit=10mm}
\begin{tabular}{cc}
{\steepestdescent{1}{1}{-1.3}{1.3}{0.1}{25}{3D}{LMS}} &
{\steepestdescent{1}{1}{-1.3}{1.3}{0.1}{25}{2D}{LMS}}
\end{tabular}
\end{frame}
\begin{frame} \frametitle{LMS for correlated input}
\centering
\psset{unit=10mm}
\begin{tabular}{cc}
{\steepestdescent{1}{0.4}{-2}{0}{0.1}{25}{3D}{LMS}} &
{\steepestdescent{1}{0.4}{-2}{0}{0.1}{25}{2D}{LMS}}
\end{tabular}
\end{frame}
\begin{frame} \frametitle{Analysis of the LMS filter}
\begin{itemize}
\item algorithm is extremely simple and low-cost
\item it works very very well
\item it keeps adapting all the time: can handle changing conditions
\item used in almost all telecommunication devices
\item theoretical analysis extremely difficult, however (like AI ;)
\end{itemize}
\end{frame}
\def\mike{\raisebox{-0.7em}{\includegraphics[height=2em]{micro.eps}}}
\def\loud#1{\raisebox{-1.1em}{\scalebox{#1}[1]{\includegraphics[height=3em]{speaker.eps}}}}
\def\head#1{\raisebox{-0.8em}{\scalebox{#1}[1]{\includegraphics[height=3em]{head.eps}}}}
\begin{frame} \frametitle{Example: adaptive echo cancellation}
\centering
\begin{dspBlocks}{1}{0.5}
\head{-1} & \rnode{B}{\mike} & & & & & & \loud{1} & \\
A & & & & & & & & \\
& \rnode{A}{\loud{-1}} & & & & & & \mike & \head{1} \\
& & & & & & & & B\\
\end{dspBlocks}
\ncline{->}{1,2}{1,8}
\ncline{->}{3,8}{3,2}
\psset{linecolor=dspColorTwo,linewidth=2pt,linestyle=dashed}
\only<2->{\ncarc[nodesep=3pt,arcangle=90,ncurv=1]{->}{A}{B}}
\end{frame}
\begin{frame} \frametitle{Example: adaptive echo cancellation}
\centering
\begin{dspBlocks}{1}{0.3}
& \rnode{B}{\mike} & \BDsub & & & & & \loud{1} & \\
$H(z)~~~~$ & & \BDfilter{$\hat{H}(z)$} & & & & & & \\
& \rnode{A}{\loud{-1}} & \BDsplit & & & & & \mike & \head{1} \\
\end{dspBlocks}
\ncline{->}{1,2}{1,3}\naput{$m_e[n]$}\ncline{->}{1,3}{1,8}\naput{$\hat{m}[n]$}
\ncline{->}{3,8}{3,2}\nbput{$s[n]$}
\ncline{->}{3,3}{2,3}\ncline{->}{2,3}{1,3}
\psset{linecolor=dspColorTwo,linewidth=2pt,linestyle=dashed}
\ncarc[nodesep=3pt,arcangle=90,ncurv=1]{->}{A}{B}
\end{frame}
\begin{frame} \frametitle{The echo-corrupted signal}
Signal captured by the microphone:
\vspace{2em}
\[
m_e[n] = \only<2>{\hlBox{t1}{green!30}}{m[n]} + \only<3>{\hlBox{t2}{green!30}}{h[n]} \ast \only<4>{\hlBox{t3}{green!30}}{s[n]}
\]
\only<2>{\hspace{2em}\rnode[lc]{T1}{speaker A's voice}
\nccurve[linecolor=green!30,angleA=0,angleB=-90]{->}{T1}{t1}}
\only<3>{\hspace{4em}\rnode[lc]{T2}{echo transfer function}
\nccurve[linecolor=green!30,angleA=0,angleB=-90]{->}{T2}{t2}}
\only<4>{\hspace{6em}\rnode[lc]{T3}{speaker B's voice}
\nccurve[linecolor=green!30,angleA=0,angleB=-90]{->}{T3}{t3}}
\only<5->{
\vspace{3em}
\centering
we need to estimate $h[n]$ in order to \textit{subtract} the unwanted echo
}
\end{frame}
\begin{frame} \frametitle{Echo cancellation as adaptive filtering}
\centering
\begin{dspBlocks}{1}{1}
$s[n]~$ & \BDfilter{$\hat{H}(z)$} & \BDsplit & \\
& & \BDsub & $~~~d[n] = h[n] \ast s[n] $ %
\psset{arrows=->}
\ncline{1,1}{1,2}\ncline{-}{1,2}{1,3}\naput{$\hat{d}[n]$}\ncline{1,3}{1,4}
\ncline{1,3}{2,3}
\ncline{2,4}{2,3}
\ncangle[angleA=180,angleB=-90,linewidth=\BDwidth]{2,3}{1,2}\naput{$e[n]$}
\end{dspBlocks}
\end{frame}
\begin{frame} \frametitle{Training the filter}
\begin{itemize}
\item ``desired'' signal is the echo (so we can subtract it)
\item normally, only one person talks at a time: when B is speaking, $m_e[n] = h[n]\ast s[n]$
\item people move, volume changes: $H(z)$ is time varying!
\item use the LMS filter
\end{itemize}
\end{frame}
\begin{frame} \frametitle{Example: simple echo model}
\centering
\begin{dspBlocks}{1}{1}
% 1 2 3 4 5
$x[n]~~$ & \BDadd & & & \BDsplit & $~~d[n]$ \\
& & \BDfilter{$H(z)$} & \BDdelayN{M} &
\ncline{->}{1,1}{1,2}\ncline{->}{1,2}{1,6}
\ncline{->}{2,2}{1,2}\ncline{2,2}{2,3}\taput{$\alpha$}\ncline{<-}{2,3}{2,4}\ncline{->}{2,5}{2,4}
\ncline{2,5}{1,5}
\end{dspBlocks}
\vspace{2em}
\[
H(z) = (1-\lambda)/(1 - \lambda z^{-1})
\]
\end{frame}
\begin{frame} \frametitle{Echo impulse response}
\centering
$M=100, \alpha=-0.8, \lambda=0.6$
\begin{dspPlot}[xticks=100,xout=true]{0,500}{-0.4,1.1}
\moocStyle
\dspFuncDataAt{0}{%
1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. -0.32 -0.192 -0.115 -0.069 -0.041 -0.025 -0.015 -0.009 -0.005
-0.003 -0.002 -0.001 -0.001 -0. -0. -0. -0. -0. -0. -0.
-0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
-0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
-0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
-0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
-0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
-0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
-0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
-0. -0. -0. 0.102 0.123 0.111 0.088 0.066 0.048 0.033
0.023 0.015 0.01 0.007 0.004 0.003 0.002 0.001 0.001 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. -0.033 -0.059 -0.071 -0.071 -0.064
-0.054 -0.043 -0.033 -0.025 -0.018 -0.013 -0.009 -0.006 -0.004 -0.003
-0.002 -0.001 -0.001 -0.001 -0. -0. -0. -0. -0. -0. -0.
-0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
-0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
-0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
-0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
-0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
-0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
-0. -0. -0. -0. -0. -0. -0. -0. 0.01 0.025
0.038 0.045 0.048 0.046 0.041 0.035 0.029 0.023 0.018 0.014
0.01 0.008 0.006 0.004 0.003 0.002 0.001 0.001 0.001 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.}
\end{dspPlot}
\end{frame}
\begin{frame} \frametitle{Running the LMS adaptation}
\centering
white input, averaged MSE over 200 experiments
\begin{dspPlot}[xticks=custom,xout=true,sidegap=0,xlabel={iterations}]{0,500}{0,1.5}
\moocStyle
\dspCustomTicks[]{0 0 250 2500 500 5000}
\dspFuncDataAt{0}{%
1.086 1.414 1.378 1.23 1.195 1.248 0.985 1.318 0.989 1.073
1.057 1.022 0.92 1.017 1.032 1.086 0.968 0.981 1.059 1.121
0.959 0.908 1.021 0.967 0.902 1.02 0.817 1.077 0.891 0.909
0.807 0.862 0.836 0.93 0.973 0.977 0.895 0.698 1.014 0.786
0.753 0.897 0.81 0.821 0.804 0.881 0.755 0.791 0.862 0.669
0.735 0.891 0.732 0.807 0.724 0.638 0.647 0.668 0.741 0.6 0.537
0.691 0.64 0.673 0.601 0.581 0.621 0.673 0.592 0.567 0.511
0.489 0.554 0.468 0.514 0.491 0.489 0.466 0.479 0.564 0.542
0.459 0.529 0.525 0.497 0.458 0.422 0.534 0.457 0.462 0.507
0.411 0.399 0.414 0.367 0.376 0.415 0.372 0.351 0.353 0.411
0.373 0.404 0.364 0.354 0.358 0.34 0.368 0.36 0.298 0.359
0.347 0.27 0.261 0.293 0.284 0.307 0.289 0.297 0.297 0.266
0.238 0.293 0.285 0.3 0.272 0.272 0.233 0.276 0.294 0.231
0.227 0.245 0.226 0.214 0.229 0.223 0.238 0.234 0.249 0.21
0.207 0.198 0.202 0.205 0.239 0.228 0.202 0.231 0.219 0.185
0.203 0.203 0.187 0.17 0.186 0.161 0.174 0.18 0.172 0.162
0.166 0.173 0.176 0.155 0.139 0.17 0.156 0.16 0.17 0.146
0.14 0.15 0.155 0.148 0.119 0.117 0.154 0.146 0.143 0.124
0.147 0.128 0.133 0.112 0.125 0.136 0.13 0.115 0.123 0.123
0.109 0.094 0.119 0.116 0.121 0.121 0.11 0.108 0.11 0.112
0.114 0.11 0.089 0.106 0.125 0.113 0.109 0.109 0.095 0.081
0.084 0.111 0.099 0.082 0.086 0.105 0.089 0.088 0.101 0.088
0.071 0.092 0.074 0.082 0.077 0.078 0.083 0.077 0.089 0.073
0.073 0.091 0.075 0.077 0.061 0.071 0.079 0.063 0.083 0.074
0.065 0.073 0.066 0.064 0.059 0.068 0.069 0.071 0.067 0.076
0.064 0.065 0.076 0.064 0.064 0.066 0.061 0.055 0.065 0.062
0.07 0.065 0.067 0.071 0.059 0.062 0.06 0.061 0.056 0.046
0.061 0.054 0.052 0.059 0.056 0.056 0.058 0.044 0.054 0.056
0.055 0.053 0.047 0.05 0.053 0.05 0.044 0.043 0.051 0.052
0.047 0.054 0.045 0.042 0.049 0.046 0.043 0.047 0.05 0.051
0.043 0.044 0.052 0.042 0.044 0.041 0.041 0.04 0.038 0.043
0.037 0.039 0.04 0.033 0.041 0.039 0.048 0.039 0.042 0.035
0.036 0.047 0.034 0.033 0.036 0.041 0.042 0.033 0.044 0.04
0.035 0.04 0.041 0.032 0.042 0.033 0.032 0.036 0.038 0.036
0.031 0.034 0.036 0.037 0.038 0.04 0.029 0.032 0.038 0.042
0.029 0.036 0.032 0.032 0.038 0.037 0.036 0.031 0.029 0.037
0.031 0.03 0.029 0.034 0.026 0.033 0.026 0.028 0.026 0.038
0.03 0.039 0.029 0.033 0.031 0.028 0.029 0.024 0.026 0.032
0.029 0.033 0.029 0.034 0.031 0.028 0.033 0.028 0.024 0.028
0.027 0.029 0.026 0.036 0.027 0.025 0.031 0.027 0.03 0.026
0.028 0.028 0.025 0.034 0.027 0.029 0.028 0.029 0.029 0.026
0.027 0.029 0.029 0.026 0.026 0.026 0.022 0.026 0.03 0.026
0.028 0.027 0.028 0.024 0.029 0.024 0.023 0.028 0.03 0.027
0.026 0.026 0.03 0.026 0.028 0.027 0.023 0.028 0.027 0.026
0.025 0.029 0.028 0.032 0.027 0.026 0.022 0.024 0.029 0.024
0.023 0.029 0.027 0.027 0.027 0.023 0.025 0.026 0.025 0.025
0.025 0.026 0.026 0.025 0.026 0.022 0.022 0.023 0.022 0.025
0.022 0.025 0.023 0.025 0.025 0.021 0.021 0.022 0.02 0.024
0.023 0.025 0.023 0.024 0.032 0.023 0.02 0.019 0.019 0.023
0.023 0.025 0.027 0.025 0.03 0.024 0.018 0.024 0.025}
\end{dspPlot}
\end{frame}
\begin{frame} \frametitle{LMS can catch up with changes}
\centering
echo delay changes from $M=100$ to $M=90$ at $n=3000$
\begin{dspPlot}[xticks=custom,xout=true,sidegap=0,xlabel={iterations}]{0,500}{0,1.5}
\moocStyle
\dspCustomTicks[]{0 0 250 2500 500 5000}
\dspFuncDataAt{0}{%
1.259 1.336 1.059 1.116 1.176 1.177 1.414 1.257 1.23 1.095
1.326 1.21 1.119 0.984 1.025 0.96 1.187 0.985 1.132 1.066
0.975 1.175 0.954 1.156 0.949 1.06 0.934 0.877 0.914 0.774
0.832 0.79 0.865 1.043 0.886 0.909 0.826 0.902 0.773 0.742
0.833 0.697 0.718 0.716 0.969 0.839 0.762 0.819 0.742 0.712
0.717 0.717 0.645 0.732 0.742 0.762 0.614 0.764 0.606 0.574
0.682 0.655 0.509 0.587 0.618 0.583 0.703 0.49 0.586 0.497
0.622 0.581 0.531 0.426 0.59 0.425 0.548 0.549 0.483 0.434
0.496 0.468 0.529 0.466 0.385 0.471 0.397 0.471 0.452 0.466
0.422 0.393 0.378 0.385 0.391 0.364 0.316 0.402 0.405 0.351
0.392 0.385 0.386 0.408 0.334 0.338 0.271 0.336 0.335 0.333
0.278 0.33 0.322 0.276 0.292 0.322 0.329 0.329 0.262 0.269
0.264 0.287 0.287 0.272 0.226 0.264 0.244 0.316 0.242 0.264
0.205 0.253 0.269 0.198 0.215 0.269 0.235 0.256 0.244 0.237
0.174 0.23 0.197 0.21 0.237 0.187 0.234 0.178 0.182 0.216
0.24 0.197 0.164 0.182 0.177 0.203 0.184 0.198 0.159 0.177
0.164 0.152 0.152 0.179 0.151 0.189 0.181 0.164 0.187 0.151
0.168 0.147 0.141 0.129 0.162 0.141 0.139 0.149 0.151 0.144
0.154 0.141 0.154 0.132 0.144 0.117 0.116 0.15 0.143 0.144
0.122 0.126 0.139 0.095 0.115 0.113 0.108 0.112 0.113 0.103
0.104 0.129 0.098 0.099 0.121 0.089 0.085 0.114 0.106 0.095
0.093 0.098 0.103 0.099 0.106 0.096 0.084 0.105 0.092 0.095
0.086 0.09 0.086 0.083 0.082 0.064 0.08 0.091 0.1 0.082
0.065 0.095 0.093 0.086 0.065 0.073 0.076 0.094 0.081 0.081
0.062 0.09 0.068 0.081 0.067 0.064 0.072 0.077 0.064 0.062
0.068 0.073 0.071 0.069 0.064 0.067 0.057 0.071 0.06 0.064
0.063 0.068 0.064 0.061 0.059 0.052 0.048 0.052 0.054 0.062
0.048 0.057 0.057 0.047 0.052 0.057 0.049 0.056 0.052 0.051
0.052 0.048 0.047 0.044 0.566 0.466 0.535 0.51 0.445 0.442
0.41 0.461 0.442 0.492 0.396 0.474 0.48 0.391 0.493 0.349
0.415 0.37 0.404 0.397 0.451 0.394 0.443 0.406 0.364 0.366
0.415 0.378 0.414 0.397 0.353 0.368 0.347 0.333 0.354 0.337
0.353 0.353 0.371 0.349 0.32 0.359 0.362 0.338 0.351 0.325
0.325 0.326 0.356 0.354 0.299 0.351 0.35 0.296 0.318 0.316
0.312 0.254 0.258 0.284 0.236 0.306 0.232 0.22 0.251 0.273
0.241 0.274 0.256 0.204 0.229 0.204 0.251 0.225 0.237 0.205
0.239 0.19 0.202 0.198 0.207 0.201 0.215 0.204 0.198 0.241
0.193 0.169 0.183 0.198 0.19 0.18 0.153 0.161 0.152 0.173
0.147 0.152 0.163 0.152 0.19 0.143 0.156 0.144 0.135 0.119
0.148 0.135 0.131 0.157 0.125 0.157 0.124 0.139 0.107 0.128
0.133 0.115 0.138 0.107 0.1 0.128 0.132 0.124 0.124 0.111
0.12 0.111 0.103 0.101 0.085 0.117 0.113 0.116 0.104 0.095
0.105 0.101 0.102 0.091 0.107 0.098 0.085 0.093 0.09 0.097
0.098 0.098 0.08 0.083 0.089 0.09 0.084 0.079 0.086 0.089
0.078 0.091 0.082 0.102 0.083 0.072 0.069 0.071 0.071 0.076
0.073 0.073 0.074 0.061 0.064 0.06 0.068 0.067 0.064 0.062
0.08 0.066 0.06 0.062 0.055 0.061 0.054 0.075 0.06 0.067
0.063 0.06 0.052 0.05 0.056 0.05 0.062 0.056 0.056 0.052
0.054 0.061 0.055 0.051 0.047 0.048 0.049 0.04 0.049 0.048
0.044 0.053 0.044 0.044 0.045 0.043 0.041 0.043 0.044 0.042}
\end{dspPlot}
\end{frame}
\end{document}

Event Timeline