# Implementing the conjugate gradient descent for the example function
# start simple gradient descent with random guess again
x = np.random.randn(2,1)
# The residual
r = b.T-A @ x
v = r
c = r.T @ r
i = 0
while (i <= IterMax):
z = A @ v
t = c/(v.T @ z)
x += t*v
r -= t*z;
d = r.T @ r
if(np.sqrt(d) < tolerance):
break
v = r + (d/c)*v
c = d
i+=1
print("Absolute error for the Conjugate gradient solution",np.abs(x-exact))
The result is much better than those from the steepest descent or the plain gradient descent.