diff --git a/IntegrationLib.py b/IntegrationLib.py new file mode 100644 index 0000000..ced7a7f --- /dev/null +++ b/IntegrationLib.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Jan 30 + +@author: Simone Deparis +""" + +import numpy as np + +# This function just provides the midpoint quadrature rule. +def Midpoint( a, b, N, f ) : + # [a,b] : inteval + # N : number of subintervals + # f : fonction to integrate using the midpoint rule + + # size of the subintervals + H = (b - a) / N + + # quadrature nodes + x = np.linspace(a+H/2, b-H/2, N) + + # approximate integral + return H * np.sum( f(x) ); + + +# This function just provides the midpoint quadrature rule. +def Trapezoidal( a, b, N, f ) : + # [a,b] : inteval + # N : number of subintervals + # f : fonction to integrate using the trapezoidal rule + + # size of the subintervals + H = (b - a) / N + + # quadrature nodes + x = np.linspace(a, b, N+1) + + # approximate integral + return H/2 * ( f(a) + f(b) ) + H * sum( f(x[1:N]) ); + +# This function just provides the Simpson quadrature rule. +def Simpson( a, b, N, f ) : + # [a,b] : inteval + # N : number of subintervals + # f : fonction to integrate using the trapezoidal rule + + # size of the subintervals + H = (b - a) / N + # points defining intervals + x = np.linspace(a, b, N+1) + + # left quadrature nodes + xl = x[0:-1] + # right quadrature nodes + xr = x[1:] + # middle quadrature nodes + xm = (xl+xr)/2 + + # approximate integral + return H/6 * sum( f(xl) + 4*f(xm) + f(xr) ); diff --git a/IterativeMethodsLib.py b/IterativeMethodsLib.py new file mode 100644 index 0000000..0bad723 --- /dev/null +++ b/IterativeMethodsLib.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Jan 30 + +@author: Simone Deparis +""" + +import numpy as np + +def Richardson(A, b, x0, P, alpha, maxIterations, tolerance) : + # Richardson method to approximate the solution of Ax=b + # x0 : initial guess + # P : préconditioner + # alpha : relaxiation parameter + # maxIterations : maximum number of iterations + # tolerance : tolerance for relative residual + # Outputs : + # xk : approximate solution + # vector of relative norm of the residuals + + # we do not keep track of all the sequence, just the last two entries + xk = x0 + rk = b - A.dot(xk) + + RelativeError = [] + for k in range(maxIterations) : + + zk = np.linalg.solve(P,rk) + xk = xk + alpha*zk + + rk = b - A.dot(xk) + # you can verify that this is equivalent to + # rk = rk - alpha*A.dot(zk) + RelativeError.append(np.linalg.norm(rk)/np.linalg.norm(b)) + if ( RelativeError[-1] < tolerance ) : + print(f'Richardson converged in {k+1} iterations with a relative residual of {RelativeError[-1]:1.3e}') + return xk, RelativeError + + print(f'Richardson did not converge in {maxIterations} iterations, the relative residual is {np.linalg.norm(rk)/np.linalg.norm(b):1.3e}') + return xk, RelativeError + + + +def PrecGradient(A, b, x0, P, maxIterations, tolerance) : + # Preconditioned Gradient method to approximate the solution of Ax=b + # x0 : initial guess + # P : préconditioner + # maxIterations : maximum number of iterations + # tolerance : tolerance for relative residual + # Outputs : + # xk : approximate solution + # vector of relative norm of the residuals + + # we do not keep track of all the sequence, just the last two entries + xk = x0 + rk = b - A.dot(xk) + + RelativeError = [] + for k in range(maxIterations) : + + zk = np.linalg.solve(P,rk) + Azk = A.dot(zk) + + alphak = zk.dot(rk) / zk.dot(Azk) + + xk = xk + alphak*zk + + rk = b - A.dot(xk) + # you can verify that this is equivalent to + # rk = rk - alphak*A.dot(zk) + RelativeError.append(np.linalg.norm(rk)/np.linalg.norm(b)) + if ( RelativeError[-1] < tolerance ) : + print(f'Gradient converged in {k+1} iterations with a relative residual of {RelativeError[-1]:1.3e}') + return xk, RelativeError + + print(f'Graident did not converge in {maxIterations} iterations, the relative residual is {np.linalg.norm(rk)/np.linalg.norm(b):1.3e}') + return xk, RelativeError + + +def PrecConjugateGradient(A, b, x0, P, maxIterations, tolerance) : + # Preconditionate Conjugate Gradient method to approximate the solution of Ax=b + # x0 : initial guess + # P : préconditioner + # maxIterations : maximum number of iterations + # tolerance : tolerance for relative residual + # Outputs : + # xk : approximate solution + # vector of relative norm of the residuals + + # we do not keep track of all the sequence, just the last two entries + xk = x0 + rk = b - A.dot(xk) + zk = np.linalg.solve(P,rk) + pk = zk + + RelativeError = [] + for k in range(maxIterations) : + + Apk = A.dot(pk) + alphak = pk.dot(rk) / pk.dot(Apk) + + xk = xk + alphak*pk + + rk = rk - alphak*Apk + # you can verify that this is equivalent to + # rk = b - A.dot(xk) + + zk = np.linalg.solve(P,rk) + + betak = Apk.dot(zk) / pk.dot(Apk) + pk = zk - betak*pk + + + RelativeError.append(np.linalg.norm(rk)/np.linalg.norm(b)) + if ( RelativeError[-1] < tolerance ) : + print(f'Conjugate Gradient converged in {k+1} iterations with a relative residual of {RelativeError[-1]:1.3e}') + return xk, RelativeError + + print(f'Conjugate Gradient did not converge in {maxIterations} iterations, the relative residual is {np.linalg.norm(rk)/np.linalg.norm(b):1.3e}') + return xk, RelativeError + + + + + +