diff --git a/4.2 Methodes iteratives.ipynb b/4.2 Methodes iteratives.ipynb new file mode 100644 index 0000000..542118d --- /dev/null +++ b/4.2 Methodes iteratives.ipynb @@ -0,0 +1,798 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4.2 Méthodes itératives" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# importing libraries used in this notebook\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "\n", + "# scipy.linalg.eig : eigenvalues of a matrix\n", + "from scipy.linalg import eig\n", + "# scipy.linalg.lu : LU decomposition\n", + "from scipy.linalg import lu\n", + "# scipy.linalg.cholesky : Cholesky decomposition\n", + "from scipy.linalg import cholesky\n", + "# scipy.linalg.hilbert : Hilbert matrix\n", + "from scipy.linalg import hilbert\n", + "\n", + "np.set_printoptions(precision=4, suppress=True, linewidth=120)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Méthode de Richardson\n", + "\n", + "$A$ et $\\mathbf b$ donnés; on cherche à approximer la solution $\\mathbf x$ de $A\\mathbf x = \\mathbf b$.\n", + "\n", + "Soit $\\mathbf x^{(0)}$ donné, $\\mathbf r^{(0)}= \\mathbf b - A\\mathbf x^{(0)}$ pour $k=0,1,2,...$ :\n", + "\n", + "\\begin{eqnarray*}\n", + "\\text{trouvez }\\mathbf{z}^{(k)} \\text{ tel que } && P \\mathbf{z}^{(k)} = \\mathbf{r}^{(k)}\\\\\n", + "\\text{choisissez }\\alpha_k && \\\\\n", + "&& \\mathbf{x}^{(k+1)} = \\mathbf{x}^{(k)} + \\alpha_k \\mathbf{z}^{(k)} \\\\\n", + "&& \\mathbf{r}^{(k+1)} = \\mathbf{r}^{(k)} - \\alpha_k A \\mathbf{z}^{(k)}.\n", + "\\end{eqnarray*}\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Exemple 1\n", + "On considère la matrice $A$ et le terme de droite $\\mathbf b$ suivants\n", + "\n", + "$$A=\\begin{pmatrix}\n", + "1 & 2 & 3 & 4 \\\\\n", + "5 & 6 & 7 & 8\\\\\n", + "9 & 10 & 11 & 12\\\\\n", + "13 & 14 & 15 & 16\n", + "\\end{pmatrix}\n", + "\\quad\n", + "b=\\begin{pmatrix}\n", + "1 \\\\ -1 \\\\ 1 \\\\ -1\n", + "\\end{pmatrix}.$$" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Méthode de Jacobi" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "La diagonale de $A$ est \n", + "$$D=\\begin{pmatrix}\n", + "1 & 0 & 0 & 0 \\\\\n", + "0 & 6 & 0 & 0\\\\\n", + "0 & 0 & 11 & 0\\\\\n", + "0 & 0 & 0 & 16\n", + "\\end{pmatrix};$$\n", + "Prenond $\\alpha=1$ et $P=D$.\n", + "La méthode de Richardson dans ce cas correspond à la méthode de Jacobi et s'écrit comme suit :" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "A = np.array([[1,2,3,4],\n", + " [5,6,7,8],\n", + " [9,10,11,12],\n", + " [13,14,15,16]])\n", + "b = np.array([1,-1,1,-1])\n", + "\n", + "# Diagonale de A\n", + "P = np.diag(np.diag(A)) \n", + "alpha = 1\n", + "\n", + "# Initialisation, par exemple\n", + "k=0\n", + "xk = np.array([1,0,0,0])\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Une itération de Richardson\n", + "\n", + "# résidu\n", + "# rk =\n", + "# résidu préconditionné :\n", + "# zk = np.linalg.solve(?,?)\n", + "\n", + "# correction de la solution, x(k+1) :\n", + "# xk1 = xk + ?\n", + "\n", + "# Préparer la prochaine itération\n", + "xk = xk1\n", + "print (xk)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Si on execute plusieurs fois ces itérations, la méthode ne converge pas. Pourquoi ?\n", + "\n", + "Dans ce cas, la matrice d'itération de la méthode de Richardson est égale à\n", + "$$B = D ^{-1}(D - A) = I - D^{-1}A =\n", + "\\begin{pmatrix}\n", + "0 & -2 & -3 & -4 \\\\\n", + "-5/6 & 0 & -7/6 & -4/3\\\\\n", + "-9/11 & -10/11 & 0 & -12/11\\\\\n", + "-13/16 & -14/16 & -15/16 & 0\n", + "\\end{pmatrix}.$$" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Jacobi\n", + "# the first diag extracts the diagonal of A, the second one builds a diagonal matrix starting from a vector\n", + "D = np.diag(np.diag(A)) \n", + "\n", + "# efficiently computing the Jacobi iteration matrix without explicitely computing the inverse of D\n", + "Bj = np.linalg.solve(D,(D-A))\n", + "\n", + "# What are the eigenvlues of Bj ? \n", + "lk, v = eig(Bj)\n", + "print(f'The eigenvalues of Bj are {lk}\\n')\n", + "print(f'The spectral radius of Bj is {np.max(np.abs(lk)):.2f} > 1, therefore the Jacobi method does not converge')\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Méthode de Gauss-Seidel\n", + "\n", + "Prenond $\\alpha=1$ et $P$ la partie triangulaire inférieure de $A$ avec la diagonale.\n", + "La méthode de Richardson dans ce cas correspond à la méthode de Gauss-Seidel et s'écrit comme suit :" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Gauss-Seidel\n", + "# Return a copy of an array with elements above the k-th diagonal set to zero. \n", + "Pgs = np.tril(A, k=0)\n", + "\n", + "# efficiently computing the Gauss-Siedel iteration matrix without explicitely computing the inverse of D\n", + "Bgs = np.linalg.solve(Pgs,(Pgs-A))\n", + "\n", + "# What are the eigenvlues of Bgs ? \n", + "lk, v = eig(Bgs)\n", + "print(f'The eigenvalues of Bgs are {lk}\\n')\n", + "print(f'The spectral radius of Bgs is {np.max(np.abs(lk)):.2f} > 1, therefore the Gauss-Seidel method does not converge')\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "La méthode de Gauss-Seidel appliqué à cette matrice ne converge pas." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Exercice \n", + "Réprenez ces deux méthodes avec la matrice \n", + "\n", + "$$A=\\begin{pmatrix}\n", + "3 & 2 & 1 & 0 \\\\\n", + "3 & 4 & 3 & 2\\\\\n", + "3 & 2 & 5 & 2\\\\\n", + "1 & 2 & 3 & 4\n", + "\\end{pmatrix}.$$\n", + "```python\n", + "A = np.array([[3,2,1,0],[3,4,3,2],[3,2,5,2],[1,2,3,4]])\n", + "```\n", + "\n", + "1. Vérifiez si Jacobi et Gauss-Seidel convergent à l'aide de la matrice d'itérations\n", + "2. Calculez les premier 10 itérations de ces méthodes." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Exemple 2\n", + "\n", + "Considerez la matrice $A$ et le vecteur $b$ suivants\n", + "\n", + "$$A = \\left(\n", + " \\begin{array}{ccc}\n", + " 3 & 2 & 1 \\\\[0.5em]\n", + " 1 & 4 & 1 \\\\[0.5em]\n", + " 2 & 4 & 8\n", + " \\end{array} \\right), \\quad\n", + " \\mathbf{b} = \\left( \\begin{array}{c} 4 \\\\ 5 \\\\ 6 \\end{array} \n", + "\\right) \\, .$$\n", + "\n", + "Exprimez (sans calculer) $P$, $B$, et $\\rho(B)$ dans le cas de Jacobi et Gauss-Seidel.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "A = np.array([[3, 2, 1],\n", + " [1, 4, 1],\n", + " [2, 4, 8] ])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Jacobi\n", + "\n", + "# To complete\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Gauss-Seidel\n", + "\n", + "# To complete\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Méthode de Richardson\n", + "\n", + "$A$ et $\\mathbf b$ donnés; on cherche à approximer la solution $\\mathbf x$ de $A\\mathbf x = \\mathbf b$.\n", + "\n", + "Soit $\\mathbf x^{(0)}$ donné, $\\mathbf r^{(0)}= \\mathbf b - A\\mathbf x^{(0)}$ pour $k=0,1,2,...$ :\n", + "\n", + "\\begin{eqnarray*}\n", + "\\text{trouvez }\\mathbf{z}^{(k)} \\text{ tel que } && P \\mathbf{z}^{(k)} = \\mathbf{r}^{(k)}\\\\\n", + "\\text{choisissez }\\alpha_k && \\\\\n", + "&& \\mathbf{x}^{(k+1)} = \\mathbf{x}^{(k)} + \\alpha_k \\mathbf{z}^{(k)} \\\\\n", + "&& \\mathbf{r}^{(k+1)} = \\mathbf{r}^{(k)} - \\alpha_k A \\mathbf{z}^{(k)}.\n", + "\\end{eqnarray*}\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Exemple 3 - Jacobi et Gauss-Seidel avec relaxation\n", + "\n", + "Nous avons vu que la méthode de Jacobi appliquée à la matrice\n", + "$$A=\\begin{pmatrix}\n", + "3 &2&1&0\\\\3&4&3&2\\\\3&2&5&2\\\\1&2&3&4\n", + "\\end{pmatrix}$$\n", + "ne converge pas, alors que celle de Gauss-Seidel converge.\n", + "\n", + "Nous allons utiliser la méthode de Richardson stationnaire avec un paramètre de relaxation $\\alpha$ constant pour voir si on peut améliorer la convergence.\n", + "\n", + "On va utiliser la méthode de Richardson stationnaire avec un paramètre de relaxation $\\alpha$ constant pour voir si on peut améliorer la convergence." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Partie 1 - Matrices d'itérations\n", + "\n", + "La matrice d'itérations pour la méthode de Richardson est $B(\\alpha_k) = I - \\alpha_kP^{-1}A$. En particulier, pour $\\alpha$ constant, nous pouvons faire un graphique du rayon spectral $\\rho(B(\\alpha))$ dans les cas d'un préconditionneur égale à la diagonale de $A$ (similaire à Jacobi) ou au triangle inférieur de $A$ (similaire à Gauss-Seidel):\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Voici comment voir que $B(\\alpha_k) = I - \\alpha_kP^{-1}A$ :\n", + "\n", + "Pour Richardson préconditionné nous avons que\n", + "\n", + "$$Px^{(k+1)} = Px^{(k)} + \\alpha_k (b - Ax^{(k)})$$\n", + "$$Px^{(k+1)} = \\alpha_k b + (P - \\alpha_k A)x^{(k)}$$\n", + "$$x^{(k+1)} = \\alpha_k P^{-1}b + P^{-1}(P - \\alpha_k A)x^{(k)}$$\n", + "\n", + "On reconnait la matric d'itération en $B(\\alpha_k) = P^{-1}(P - \\alpha_k A) = I - \\alpha_kP^{-1} A)$\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "A = np.array([[3,2,1,0],\n", + " [3,4,3,2],\n", + " [3,2,5,2],\n", + " [1,2,3,4]])\n", + "D = np.diag(np.diag(A)) \n", + "Pgs = np.tril(A,k=0)\n", + "\n", + "Alphas = np.linspace(0, 2, 3001)\n", + "\n", + "rhoBj = []\n", + "rhoBgs = []\n", + "\n", + "for alpha in Alphas :\n", + " Bgs = np.linalg.solve(Pgs,(Pgs-alpha*A))\n", + " Bj = np.linalg.solve(D,(D-alpha*A))\n", + "\n", + " lk, v = eig(Bj)\n", + " rhoBj.append( np.max(np.abs(lk)) )\n", + "\n", + " lk, v = eig(Bgs)\n", + " rhoBgs.append( np.max(np.abs(lk)) )\n", + "\n", + "plt.plot(Alphas, rhoBj, 'b:', label=r'$\\rho(B_J(\\alpha))$')\n", + "plt.plot(Alphas, rhoBgs, 'g:', label=r'$\\rho(B_{GS}(\\alpha))$')\n", + "\n", + "plt.xlabel(r'$\\alpha$'); plt.ylabel(r'$\\rho$');\n", + "plt.title('Spectral radius')\n", + "plt.grid(True)\n", + "plt.legend(['$\\\\rho(B_J(\\\\alpha))$','$\\\\rho(B_{GS}(\\\\alpha))$'])\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "On constate que:\n", + "\n", + "1. En utilisant $P=D$, Richardson converge pour $\\alpha\\lessapprox 0.8$ and the optimal one is for $\\alpha \\approx 0.75$\n", + "2. En utilisant $P$ égal au triangle inférieur de $A$, Richardson converge pour $\\alpha\\lessapprox 2$ ; le paramètre optimale est $\\alpha \\approx 1.5$\n", + "\n", + "#### Partie 2 - Implémentation de la Méthode de Richardson\n", + "\n", + "Définissez une fonction Python qui implémente la méthode de Richardson stationnaire avec la structure suivante:\n", + "```python\n", + "def Richardson(A, b, x0, P, alpha, maxIterations, tolerance) :\n", + " # Stationary Richardson method to approximate the solution of Ax=b\n", + " # INPUT :\n", + " # x0 : initial guess\n", + " # P : preconditioner\n", + " # alpha : constant relaxation parameter\n", + " # maxIterations : maximum number of iterations\n", + " # tolerance : tolerance for relative residual\n", + " # OUTPUT :\n", + " # xk : approximate solution to the linear system\n", + " # rk : vector of relative norm of the residuals\n", + "```\n", + "\n", + "La méthode de Richardson, comme toute méthode iterative a besoin d'un critère d'arrêt. Dans ce cas, le plus simple est de poser les critères suivants:\n", + "\n", + "1. On fixe le nombre maximal d'itérations\n", + "2. Le résidu relatif est plus petit qu'une tolérance $\\epsilon$ : \n", + " $$ \\frac{\\| \\mathbf b - A \\mathbf x^{(k)} \\|}{\\|\\mathbf b \\|} < \\epsilon$$\n", + " \n", + "On s'arrête dès que l'un des ces critières est rempli.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def Richardson(A, b, x0, P, alpha, maxIterations, tolerance) :\n", + " # Stationary Richardson method to approximate the solution of Ax=b\n", + " # INPUT :\n", + " # x0 : initial guess\n", + " # P : preconditioner\n", + " # alpha : constant relaxation parameter\n", + " # maxIterations : maximum number of iterations\n", + " # tolerance : tolerance for relative residual\n", + " # OUTPUT :\n", + " # xk : approximate solution to the linear system\n", + " # rk : vector of relative norm of the residuals\n", + " \n", + " # we do not keep track of all the sequence, just the last two entries\n", + " xk = x0\n", + " rk = b - A.dot(xk)\n", + "\n", + " # To complete\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Partie 3 - Utilisation de la Méthode de Richardson\n", + "\n", + "Utilisez la fonction `Richardson` pour approcher la solution de $A\\mathbf x = \\mathbf b$ pour $\\mathbf b = (0, 1, -1, 1)^T$ avec une tolérance sur le résidu relatif de $10^{-6}$ et\n", + "le vecteur nul comme point initial et un nombre maximum d'itérations de $200$\n", + "\n", + "1. En utilisant $P=D$, avec : $\\alpha = 0.7, 0.75, 0.79,0.81, 0.9$\n", + "2. En utilisant $P$ égal au triangle inférieur de $A$, avec : $\\alpha = 1, 1.5, 1.95,2.05$ \n", + "\n", + "Comment interprétez-vous ces résultats ?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "b = np.array([0,1,-1,1])\n", + "# Define the initial guess\n", + "x0 = 0*b # trick to have the right size for x0\n", + "\n", + "tolerance = 1e-6\n", + "maxIter = 200" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Jacobi-like preconditioner\n", + "P = np.diag(np.diag(A))\n", + "legend = []\n", + "\n", + "for alpha in [0.7, 0.75, 0.79,0.81, 0.9] :\n", + " x,relRes = Richardson(A,b,x0,P,alpha,maxIter,tolerance)\n", + " print(relRes[-1])\n", + " plt.plot( relRes, ':')\n", + " legend.append(str(alpha))\n", + "\n", + "plt.legend(legend)\n", + "\n", + "plt.xlabel('n'); plt.ylabel('res');\n", + "plt.title('Relative residuals')\n", + "plt.yscale('log')\n", + "plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Gauss-Seidel-like preconditioner\n", + "P = np.tril(A,k=0)\n", + "legend = []\n", + "\n", + "for alpha in [1, 1.5, 1.95,2.05] :\n", + " x,relRes = Richardson(A,b,x0,P,alpha,maxIter,tolerance)\n", + " print(relRes[-1])\n", + " plt.plot( relRes, ':')\n", + " legend.append(str(alpha))\n", + "\n", + "plt.legend(legend)\n", + "\n", + "plt.xlabel('n'); plt.ylabel('res');\n", + "plt.title('Relative residuals')\n", + "plt.yscale('log')\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Partie 4 - Implémentation de la Méthode du Gradient Préconditionné\n", + "\n", + "Définissez une fonction Python qui implémente la méthode de Gradient Préconditionné et qui a la structure \n", + "```python\n", + "def PrecGradient(A, b, x0, P, maxIterations, tolerance) :\n", + " # Preconditioned Gradient method to approximate the solution of Ax=b\n", + " # INPUT :\n", + " # x0 : initial guess\n", + " # P : preconditioner\n", + " # maxIterations : maximum number of iterations\n", + " # tolerance : tolerance for relative residual\n", + " # OUTPUTS :\n", + " # xk : approximate solution to the linear system\n", + " # rk : vector of relative norm of the residuals\n", + "```\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def PrecGradient(A, b, x0, P, maxIterations, tolerance) :\n", + " # Preconditioned Gradient method to approximate the solution of Ax=b\n", + " # INPUT :\n", + " # x0 : initial guess\n", + " # P : preconditioner\n", + " # maxIterations : maximum number of iterations\n", + " # tolerance : tolerance for relative residual\n", + " # OUTPUTS :\n", + " # xk : approximate solution to the linear system\n", + " # rk : vector of relative norm of the residuals\n", + " \n", + " # To complete. Start from Richardson, it is easier\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Partie 5 - Implémentation de la Méthode du Gradient Conjugué Préconditionné\n", + "\n", + "Définissez une fonction Python qui implémente la méthode du Gradient Conjugué Préconditionné avec la structure suivante:\n", + "```python\n", + "def PrecConjugateGradient(A, b, x0, P, maxIterations, tolerance) :\n", + " # Preconditionate Conjugate Gradient method to approximate the solution of Ax=b\n", + " # INPUT :\n", + " # x0 : initial guess\n", + " # P : preconditioner\n", + " # maxIterations : maximum number of iterations\n", + " # tolerance : tolerance for relative residual\n", + " # OUTPUTS :\n", + " # xk : approximate solution to the linear system\n", + " # rk : vector of relative norm of the residuals\n", + "```\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def PrecConjugateGradient(A, b, x0, P, maxIterations, tolerance) :\n", + " # Preconditionate Conjugate Gradient method to approximate the solution of Ax=b\n", + " # INPUT :\n", + " # x0 : initial guess\n", + " # P : preconditioner\n", + " # maxIterations : maximum number of iterations\n", + " # tolerance : tolerance for relative residual\n", + " # OUTPUTS :\n", + " # xk : approximate solution to the linear system\n", + " # rk : vector of relative norm of the residuals\n", + " \n", + " # we do not keep track of all the sequence, just the last two entries\n", + " xk = x0\n", + " rk = b - A.dot(xk)\n", + " zk = np.linalg.solve(P,rk)\n", + " pk = zk\n", + " \n", + " # To complete. Start from PrecGradient, it is easier" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Partie 6 - Utilisation de la Méthode du Gradient Preconditioné et du Gradient Conjugué Préconditionné\n", + "\n", + "Utilisez les fonctions `PrecGradient` et `PrecConjugateGradient` pour approcher la solution de $A\\mathbf x = \\mathbf b$ pour $\\mathbf b = (0, 1, -1, 1)^T$ avec une tolérance sur le résidu relatif de $10^{-6}$ et\n", + "le vecteur nul comme point initial, et un nombre maximum d'itérations de $200$\n", + "\n", + "1. En utilisant $P=D$\n", + "2. En utilisant $P$ égal au triangle inférieur de $A$\n", + "\n", + "Comment interprétez-vous ces résultats ?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "legend = []\n", + "\n", + "# 1) Jacobi-like precondtioner\n", + "P = np.diag(np.diag(A))\n", + "\n", + "x,relRes = PrecGradient(A,b,x0,P,maxIter,tolerance)\n", + "print(relRes[-1])\n", + "plt.plot( relRes, ':')\n", + "legend.append('Gradient, P=D')\n", + "\n", + "x,relRes = PrecConjugateGradient(A,b,x0,P,maxIter,tolerance)\n", + "print(relRes[-1])\n", + "plt.plot( relRes, ':')\n", + "legend.append('Conj Gradient, P=D')\n", + "\n", + "# Gauss-Seidel-like precondtioner\n", + "P = np.tril(A,k=0)\n", + "\n", + "x,relRes = PrecGradient(A,b,x0,P,maxIter,tolerance)\n", + "print(relRes[-1])\n", + "plt.plot( relRes, ':')\n", + "legend.append('Gradient, lower A')\n", + "\n", + "x,relRes = PrecConjugateGradient(A,b,x0,P,maxIter,tolerance)\n", + "print(relRes[-1])\n", + "plt.plot( relRes, ':')\n", + "legend.append('Conj Gradient, lower A')\n", + "\n", + "plt.legend(legend)\n", + "\n", + "plt.xlabel('n'); plt.ylabel('res');\n", + "plt.title('Relative residuals')\n", + "plt.yscale('log')\n", + "plt.show()\n", + "\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Autres Exemples\n", + "\n", + "Répétez les expériences numériques ci-dessous pour approcher les solutions de\n", + "\n", + "$$\\left(\n", + " \\begin{array}{ccc}\n", + " 2 & 1 \\\\[0.5em]\n", + " 1 & 3 \n", + " \\end{array} \\right)\n", + " \\mathbf x = \n", + " \\left( \\begin{array}{c} 1 \\\\ 0 \\end{array} \\right) \\quad,\\quad \n", + " \\left(\n", + " \\begin{array}{ccc}\n", + " 2 & 2 \\\\[0.5em]\n", + " -1 & 3 \n", + " \\end{array} \\right)\n", + " \\mathbf x = \n", + " \\left( \\begin{array}{c} 1 \\\\ 0 \\end{array} \\right) \\quad\\text{et}\\quad \n", + " \\left(\n", + " \\begin{array}{ccc}\n", + " 5 & 7 \\\\[0.5em]\n", + " 7 & 10 \n", + " \\end{array} \\right)\n", + " \\mathbf x = \n", + " \\left( \\begin{array}{c} 1 \\\\ 0 \\end{array} \\right) .$$\n", + " \n", + "Pour quelles matrices et méthodes vous attendez-vous à une convergence ? Pour la dernière matrice, le résultat n'est pas celui attendu, pourquoi ?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "b = np.array([1,0])\n", + "# Define the initial guess\n", + "x0 = np.array([1,1])\n", + "\n", + "tolerance = 1e-6\n", + "maxIter = 10\n", + "A = np.array([[2,1],[1,3]])\n", + "# A = np.array([[2,2],[-1,3]])\n", + "# A = np.array([[5,7],[7,10]])\n", + "\n", + "legend = []\n", + "\n", + "# To complete: similarly to previous example:\n", + "# 1) Jacobi, Gauss-Seidel (simple)\n", + "# 2) PrecGradient and PrecConjGradient, with\n", + "# both Jacobi-like and Gauss-Seidel-like preconditioning\n", + "\n", + "plt.legend(legend)\n", + "\n", + "plt.xlabel('n'); plt.ylabel('res');\n", + "plt.title('Relative residuals')\n", + "plt.yscale('log')\n", + "plt.show()\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### La matrice de Hilbert\n", + "\n", + "Peut-on utiliser une méthode de Richardson pour résoudre le problème du mauvais conditionnement de la matrice de Hilbert ?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "n = 10\n", + "A = hilbert(n)\n", + "xexact = np.ones(n)\n", + "b = A.dot(xexact)\n", + "\n", + "# Define the initial guess\n", + "x0 = b\n", + "\n", + "tolerance = 1e-6\n", + "maxIter = 10\n", + "\n", + "legend = []\n", + "\n", + "# To complete: similarly to previous example:\n", + "# 1) Jacobi, Gauss-Seidel (simple)\n", + "# 2) PrecGradient and PrecConjGradient, with\n", + "# both Jacobi-like and Gauss-Seidel-like preconditioning\n", + "\n", + "plt.legend(legend)\n", + "\n", + "plt.xlabel('n'); plt.ylabel('res');\n", + "plt.title('Relative residuals')\n", + "plt.yscale('log')\n", + "plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.16" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +}