Hi guys I have a problem with defining A in my Matrix where i def cg(self, b, epsilon=1e-4): and i was wondering if you could help me .

```
import math
import time
import matplotlib.pyplot as plt
import numpy as np
class Vector(object):
def __init__(self, data):
self.data = data
self.rows = len(data)
def __mul__(self, other):
assert self.rows == other.rows
return sum([vi * vj for vi, vj in zip(self.data, other.data)])
def __rmul__(self, a):
return Vector([a * d for d in self.data])
def __add__(self, other):
assert self.rows == other.rows
return Vector([i + j for (i, j) in zip(self.data, other.data)])
def __sub__(self, other):
assert self.rows == other.rows
return Vector([i - j for (i, j) in zip(self.data, other.data)])
def norm(self):
return math.sqrt(self * self)
def __str__(self):
return '{0}'.format(self.data)
class Matrix(object):
def __init__(self, rows, cols):
self.rows = rows
self.cols = cols
def cg(self, b, epsilon=1e-4):
... # [1]
xk = Vector([0.] * b.rows)
rk = b - A * xk
pk = 1. * rk
for k in range(b.rows):
alpha = (rk * rk) / (pk * (A * pk))
xkk = xk + alpha * pk
rkk = rk - alpha * (A * pk)
if rkk.norm() < epsilon:
break
beta = (rkk * rkk) / (rk * rk)
pkk = rkk + beta * pk
rk = rkk
pk = pkk
xk = xkk
return xkk
#exit()
class FullMatrix(Matrix):
def __init__(self, rows, cols, data):
super().__init__(rows, cols)
self.data = data
def __mul__(self, v):
assert self.cols == v.rows
data = [0.] * self.rows
... # [2]
for i in range(self.rows):
for j in range(self.cols):
data[i] += self.data[i+j*self.rows]*v.data[j]
return Vector(data)
#exit ()
class SparseMatrix(Matrix):
def __init__(self, rows, cols, triplets):
super().__init__(rows, cols)
self.triplets = triplets
def __mul__(self, v):
assert self.cols == v.rows
data = [0.] * self.rows
... # [3]
for (i,j,d) in self.triplets:
data[i] += d*v.data[j]
return Vector(data)
#exit()
if __name__ == "__main__":
sizes = [2 ** i for i in (2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)]
t_full = []
t_sparse = []
for N in sizes:
print('_' * 80)
print("size : {0}".format(N))
# b - vector
data = [0. for i in range(N)]
data[-1] = 10
b = Vector(data)
# Af - full matrix
data = [0.] * (N * N)
for i in range(N - 1):
data[i + N * i ] = 10.
data[i + 1 + N * i ] = -0.1
data[i + N * (i + 1)] = -0.1
data[N - 1 + N * (N - 1)] = 10.
Af = FullMatrix(N, N, data)
# solve
t0 = time.time()
#~ xf = cg(Af, b)
xf = Af.cg(b)
tf = time.time() - t0
t_full.append(tf)
print("full : time: {0:6.2f}s (x[-1] = {1:12.10g})".format(tf, xf.data[-1]))
# As - sparse
triplets = []
for i in range(N - 1):
triplets.append((i, i, 10.))
triplets.append((i + 1, i, -0.1))
triplets.append((i, i + 1, -0.1))
triplets.append((N - 1, N - 1, 10.))
As = SparseMatrix(N, N, triplets)
# solve
t0 = time.time()
#~ xs = cg(As, b)
xs = As.cg(b)
ts = time.time() - t0
t_sparse.append(ts)
print("sparse : time: {0:6.2f}s (x[-1] = {1:12.10g})".format(ts, xs.data[-1]))
#exit()
# An - numpy
An = np.zeros((N, N))
for i in range(N - 1):
An[i, i] = 10
An[i + 1, i] = -0.1
An[i, i + 1] = -0.1
# solve
An[N - 1, N - 1] = 10.
t0 = time.time()
xn = np.linalg.solve(An, b.data)
tn = time.time() - t0
print("numpy : time: {0:6.2f}s (x[-1] = {1:12.10g})".format(tn, xn[-1]))
print()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(sizes, t_full, 'b-o', label='full')
ax.plot(sizes, t_sparse, 'r-o', label='sparse')
ax.set_xlabel('size')
ax.set_ylabel('times [s]')
ax.grid()
ax.legend()
plt.show()
```