# EXERCISE 2, PART 1. import numpy as np def conjugate_gradient(A,b,x0,tol,maxIter): # This function returns the minimizer and all the iterations performed # until convergence, of a quadratic function of N-dimensions as defined # in exercise 2, by using the conjugate gradient (CG) method # The inputs are: # A: matrix of size N x N --> array # b: vector of size N --> array # x0: initial guess, vector of size N --> array # maxIter: maximum number of iterations in case method does not converge --> float r = b - np.einsum('ij,j->i',A,x0) # initial residual p = r # initial direction x = x0 # initial guess rdot0 = np.einsum('i,i->', r, r) # initial dot product of residual # Save initial guess in the lists iterations_x and iterations_y. These # lists will change size depending on the number of iterations iterations_x = [x0[0]] iterations_y = [x0[1]] j = 0 # iteration counter # CG method - Loop while True: j+=1 # Compute new approximatted solution Ap = np.einsum('ij,j->i',A,p) # A * p pAp = np.einsum('i,i->', p, Ap) # pT * A * p alpha = rdot0/pAp # alpha value x = x + alpha * p # new value of x # Save result of current iteration iterations_x.append(x[0]) iterations_y.append(x[1]) r = r - alpha * Ap; # current value of residual rdot1 = np.einsum('i,i->', r, r) # current dot product of residual # test if loop is finished if j >= maxIter: break if np.sqrt(rdot1) < tol: break p = r + (rdot1/rdot0) * p; # current direction rdot0 = rdot1 # update dor product of residual for next iteration # Return the optimizer "x", all the iteration points "iterations_x" and # "iterations_y", the number of iterations "j", and the norm of the residual return x, iterations_x, iterations_y, j, np.sqrt(rdot0)