Page MenuHomec4science

IterativeMethodsLib.py
No OneTemporary

File Metadata

Created
Wed, May 15, 19:16

IterativeMethodsLib.py

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 30
@author: Simone Deparis
"""
import numpy as np
def Richardson(A, b, x0, P, alpha, maxIterations, tolerance) :
# Richardson method to approximate the solution of Ax=b
# x0 : initial guess
# P : préconditioner
# alpha : relaxiation parameter
# maxIterations : maximum number of iterations
# tolerance : tolerance for relative residual
# Outputs :
# xk : approximate solution
# vector of relative norm of the residuals
# we do not keep track of all the sequence, just the last two entries
xk = x0
rk = b - A.dot(xk)
RelativeResidualNorm = []
for k in range(maxIterations) :
zk = np.linalg.solve(P,rk)
xk = xk + alpha*zk
rk = b - A.dot(xk)
# you can verify that this is equivalent to
# rk = rk - alpha*A.dot(zk)
RelativeResidualNorm.append(np.linalg.norm(rk)/np.linalg.norm(b))
if ( RelativeResidualNorm[-1] < tolerance ) :
print(f'Richardson converged in {k+1} iterations with a relative residual of {RelativeResidualNorm[-1]:1.3e}')
return xk, RelativeResidualNorm
print(f'Richardson did not converge in {maxIterations} iterations, the relative residual is {np.linalg.norm(rk)/np.linalg.norm(b):1.3e}')
return xk, RelativeResidualNorm
def PrecGradient(A, b, x0, P, maxIterations, tolerance) :
# Preconditioned Gradient method to approximate the solution of Ax=b
# x0 : initial guess
# P : préconditioner
# maxIterations : maximum number of iterations
# tolerance : tolerance for relative residual
# Outputs :
# xk : approximate solution
# vector of relative norm of the residuals
# we do not keep track of all the sequence, just the last two entries
xk = x0
rk = b - A.dot(xk)
RelativeResidualNorm = []
for k in range(maxIterations) :
zk = np.linalg.solve(P,rk)
Azk = A.dot(zk)
alphak = zk.dot(rk) / zk.dot(Azk)
xk = xk + alphak*zk
rk = b - A.dot(xk)
# you can verify that this is equivalent to
# rk = rk - alphak*A.dot(zk)
RelativeResidualNorm.append(np.linalg.norm(rk)/np.linalg.norm(b))
if ( RelativeResidualNorm[-1] < tolerance ) :
print(f'Gradient converged in {k+1} iterations with a relative residual of {RelativeResidualNorm[-1]:1.3e}')
return xk, RelativeResidualNorm
print(f'Graident did not converge in {maxIterations} iterations, the relative residual is {np.linalg.norm(rk)/np.linalg.norm(b):1.3e}')
return xk, RelativeResidualNorm
def PrecConjugateGradient(A, b, x0, P, maxIterations, tolerance) :
# Preconditionate Conjugate Gradient method to approximate the solution of Ax=b
# x0 : initial guess
# P : préconditioner
# maxIterations : maximum number of iterations
# tolerance : tolerance for relative residual
# Outputs :
# xk : approximate solution
# vector of relative norm of the residuals
# we do not keep track of all the sequence, just the last two entries
xk = x0
rk = b - A.dot(xk)
zk = np.linalg.solve(P,rk)
pk = zk
RelativeResidualNorm = []
for k in range(maxIterations) :
Apk = A.dot(pk)
alphak = pk.dot(rk) / pk.dot(Apk)
xk = xk + alphak*pk
rk = rk - alphak*Apk
# you can verify that this is equivalent to
# rk = b - A.dot(xk)
zk = np.linalg.solve(P,rk)
betak = Apk.dot(zk) / pk.dot(Apk)
pk = zk - betak*pk
RelativeResidualNorm.append(np.linalg.norm(rk)/np.linalg.norm(b))
if ( RelativeResidualNorm[-1] < tolerance ) :
print(f'Conjugate Gradient converged in {k+1} iterations with a relative residual of {RelativeResidualNorm[-1]:1.3e}')
return xk, RelativeResidualNorm
print(f'Conjugate Gradient did not converge in {maxIterations} iterations, the relative residual is {np.linalg.norm(rk)/np.linalg.norm(b):1.3e}')
return xk, RelativeResidualNorm

Event Timeline