From 14c5953b379eab25df74038046af9251aa74c344 Mon Sep 17 00:00:00 2001 From: manu12121999 Date: Fri, 20 Dec 2024 21:29:31 +0100 Subject: [PATCH] Fixed Conv layer, added tests for layers --- .github/workflows/python-package.yml | 2 +- ctrl_c_nn.py | 142 +++++++++++++------ test/test_nn.py | 199 +++++++++++++++++++++++++-- test/test_tensor.py | 8 +- 4 files changed, 295 insertions(+), 56 deletions(-) diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 1bb5439..9f4f957 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -23,7 +23,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install numpy run: | - pip install numpy coverage flake8 + pip install numpy coverage flake8 pytorch-cpu - name: Test unittests run: | python -m coverage run -m unittest discover -s ./test -p 'test_*.py' diff --git a/ctrl_c_nn.py b/ctrl_c_nn.py index f41568f..bfedee8 100644 --- a/ctrl_c_nn.py +++ b/ctrl_c_nn.py @@ -345,38 +345,41 @@ def __eq__(self, other): def size(self, dim): return self.shape[dim] + ###################### # Construction Methods ##################### @staticmethod - def zeros(shape): - if isinstance(shape, int): - shape = (shape, ) - return Tensor(LLOps.fill(shape, 0)) + def zeros(*shape, dtype=float): + if len(shape)==1 and isinstance(shape[0], (list, tuple)): + shape = shape[0] + value = 0.0 if dtype == float else 0 if dtype == int else None + return Tensor.fill(shape, value=value) @staticmethod - def ones(shape): - if isinstance(shape, int): - shape = (shape, ) - return Tensor(LLOps.fill(shape, 1)) + def ones(*shape, dtype=float): + if len(shape)==1 and isinstance(shape[0], (list, tuple)): + shape = shape[0] + value = 1.0 if dtype == float else 1 if dtype == int else None + return Tensor.fill(shape, value=value) @staticmethod - def fill(shape, number): - if isinstance(shape, int): - shape = (shape, ) - return Tensor(LLOps.fill(shape, number)) + def fill(*shape, value): + if len(shape)==1 and isinstance(shape[0], (list, tuple)): + shape = shape[0] + return Tensor(LLOps.fill(shape, value)) @staticmethod - def random_float(shape, min=-1.0, max=1.0): - if isinstance(shape, int): - shape = (shape, ) + def random_float(*shape, min=-1.0, max=1.0): + if isinstance(shape[0], (list, tuple)): + shape = shape[0] return Tensor(LLOps.fill_callable(shape, lambda: random.uniform(min, max))) @staticmethod - def random_int(shape, min=0, max=10): - if isinstance(shape, int): - shape = (shape,) + def random_int(*shape, min=0, max=10): + if isinstance(shape[0], (list, tuple)): + shape = shape[0] return Tensor(LLOps.fill_callable(shape, lambda: random.randint(min, max))) @staticmethod @@ -649,6 +652,12 @@ def __init__(self, *args, **kwargs): def __call__(self, x: Tensor): return self.forward(x) + def eval(self): + pass + + def train(self): + pass + def forward(self, x: Tensor): raise NotImplementedError @@ -659,6 +668,9 @@ def load_state_dict(self, state_dict): weight_apply(self, state_dict) class ReLU(Module): + def __init__(self, *args, **kwargs): + super().__init__() + def forward(self, x: Tensor): self.cache = x return x.apply(lambda v: max(0.0, v)) @@ -670,24 +682,30 @@ def backward(self, dout: Tensor): return dx class LeakyReLU(Module): + def __init__(self, negative_slope=0.01, *args, **kwargs): + super().__init__() + self.negative_slope = negative_slope + def forward(self, x: Tensor): self.cache = x - return x.apply(lambda v: 0.1*v if v < 0 else v) + return x.apply(lambda v: self.negative_slope*v if v < 0 else v) def backward(self, dout: Tensor): x = self.cache - mask = x.apply(lambda v: 1 if v >= 0 else 0.1) + mask = x.apply(lambda v: 1 if v >= 0 else self.negative_slope) dx = dout * mask return dx class Linear(Module): - def __init__(self, in_features, out_features): + def __init__(self, in_features, out_features, bias=True): super().__init__() init_value_min = -0.1 init_value_max = 0.1 - self.weight = Tensor.random_float(shape=(out_features, in_features), min=init_value_min, max=init_value_max) - self.bias = Tensor.random_float(shape=(out_features, ), min=init_value_min, max=init_value_max) - + self.weight = Tensor.random_float((out_features, in_features), min=init_value_min, max=init_value_max) + if bias: + self.bias = Tensor.random_float((out_features, ), min=init_value_min, max=init_value_max) + else: + self.bias = Tensor.zeros(out_features) self.dw = None self.db = None @@ -769,19 +787,25 @@ def backward(self, dout: Tensor): return dout class Conv2d(Module): - def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride=1, padding=0, bias=True, *args, **kwargs): + def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride=1, padding=0, groups=1, bias=True, *args, **kwargs): if args != () or kwargs != {}: print('Warning, Conv2dTranspose ignoring', args, kwargs) super().__init__() + assert out_channels % groups == 0 + assert in_channels % groups == 0 self.stride = stride self.padding = padding self.kernel_size = kernel_size self.out_channels = out_channels - self.weight = Tensor.fill(shape=(out_channels, in_channels, kernel_size, kernel_size), number=0.0) - self.bias = Tensor.fill(shape=(out_channels, ), number=0.0 if bias else 0.0) + self.groups = groups + self.weight = Tensor.fill((out_channels, in_channels//groups, kernel_size, kernel_size), value=0.0) + self.bias = Tensor.fill((out_channels, ), value=0.0 if bias else 0.0) def forward(self, x: Tensor): - return self.forward_gemm(x) + if self.groups == 1: + return self.forward_gemm(x) + else: + return self.forward_gemm_grouped(x) def forward_naive(self, x: Tensor): # shapes x: (B, C_in, H, W) w: (C_out, C_in, K, K) b: (C_Out) out: (B, C_out, ~H/s, ~W/s) @@ -811,7 +835,7 @@ def forward_gemm(self, x: Tensor): H_out = (H - K + 2 * P) // S + 1 W_out = (W - K + 2 * P) // S + 1 - x_padded = Tensor.fill((B, C_in, H + 2 * P, W + 2 * P), 0.0) + x_padded = Tensor.zeros(B, C_in, H + 2 * P, W + 2 * P) x_padded[:, :, P:H+P, P:W+P] = x assert x_padded[:, :, P:H + P, P:W + P] == x @@ -829,13 +853,53 @@ def im2col(x_pad): start_mat = time.time() res = reshaped_kernel.matmul_T_2d(Tensor(col_repres)) # for performance reasons, Equal to reshaped_kernel @ Tensor(col_repres).T end_mat = time.time() - res = res.reshape((B, C_out, H_out, W_out)) + if B > 1: + res = res.reshape((C_out, B, H_out, W_out)).permute((1,0,2,3)) + else: + res = res.reshape((1, C_out, H_out, W_out)) res = res + self.bias.reshape((1, C_out, 1, 1)) assert res.shape == (B, C_out, H_out, W_out) print("Conv2d took in total", time.time() - start_time, " of which Matmul took", end_mat - start_mat) return res + def forward_gemm_grouped(self, x: Tensor): + start_time = time.time() + B, C_in, H, W = x.shape + C_out = self.out_channels + K, P, S, G = self.kernel_size, self.padding, self.stride, self.groups + H_out = (H - K + 2 * P) // S + 1 + W_out = (W - K + 2 * P) // S + 1 + + x_padded = Tensor.fill((B, G, C_in // G, H + 2 * P, W + 2 * P), value=0.0) + x_padded[:, :, :, P:H + P, P:W + P] = x.reshape((B, G, C_in // G, H, W)) + assert x_padded[:, :, :, P:H + P, P:W + P] == x.reshape( + (B, G, C_in // G, H, W)), f'A {x_padded[:, :, :, P:H + P, P:W + P]}, B {x.reshape((G, B, C_in // G, H, W))}' + + reshaped_kernel = self.weight.reshape((G, C_out // G, C_in // G * K * K)) + + def im2col(x_pad): + coloums = [x_pad[b, g, :, h:h + K, w:w + K].flatten().tolist() + for b in range(B) + for g in range(G) + for h in range(0, H + 2 * P - K + 1, S) + for w in range(0, W + 2 * P - K + 1, S) + ] + return coloums + + col_repres = im2col(x_padded) + start_mat = time.time() + # print(reshaped_kernel.shape, Tensor(col_repres).shape) + res = reshaped_kernel @ Tensor( + col_repres).T # for performance reasons, Equal to reshaped_kernel @ Tensor(col_repres).T + end_mat = time.time() + res = res.reshape((B, C_out, H_out, W_out)) + res = res + self.bias.reshape((1, C_out, 1, 1)) + + assert res.shape == (B, C_out, H_out, W_out), f"res shape {res.shape} is not {(B, C_out, H_out, W_out)}" + print("Conv2d took in total", time.time() - start_time, " of which Matmul took", end_mat - start_mat) + return res + class Conv2dTranspose(Module): def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride=1, padding=0, bias=True, *args, **kwargs): if args != () or kwargs != {}: @@ -845,8 +909,8 @@ def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride self.padding = padding self.kernel_size = kernel_size self.out_channels = out_channels - self.weight = Tensor.fill(shape=(out_channels, in_channels, kernel_size, kernel_size), number=0.0) - self.bias = Tensor.fill(shape=(out_channels, ), number=0.0 if bias else 0) + self.weight = Tensor.fill((out_channels, in_channels, kernel_size, kernel_size), value=0.0) + self.bias = Tensor.fill((out_channels, ), value=0.0 if bias else 0) def forward(self, x: Tensor): raise NotImplementedError @@ -855,10 +919,10 @@ class BatchNorm2d(Module): def __init__(self, num_features, eps=1e-05, *args, **kwargs): if args != () or kwargs != {}: print('Warning, BatchNorm2d ignoring', args, kwargs) - self.weight = Tensor.fill((num_features,), 0.0) - self.bias = Tensor.fill((num_features,), 0.0) - self.running_mean = Tensor.fill((num_features,), 0.0) - self.running_var = Tensor.fill((num_features,), 1.0) + self.weight = Tensor.fill((num_features,), value=0.0) + self.bias = Tensor.fill((num_features,), value=0.0) + self.running_mean = Tensor.fill((num_features,), value=0.0) + self.running_var = Tensor.fill((num_features,), value=1.0) self.num_batches_tracked = Tensor([0.0]) self.C = num_features self.eps = eps @@ -892,7 +956,7 @@ def forward(self, x: Tensor): K, P, S = self.kernel_size, self.padding, self.stride H_out = (H - K + 2 * P) // S + 1 W_out = (W - K + 2 * P) // S + 1 - x_padded = Tensor.zeros((B, C_in, H + 2 * P, W + 2 * P)) + x_padded = Tensor.fill((B, C_in, H + 2 * P, W + 2 * P), value=-math.inf) x_padded[:, :, P:H+P, P:W+P] = x assert x_padded[:, :, P:H+P, P:W+P].tolist() == x.tolist() output_tensor = Tensor.zeros((B, C_in, H_out, W_out)) @@ -996,7 +1060,7 @@ def get_new_size_and_scale_factors(size, scale_factor): return new_H, new_W, scale_factor_h, scale_factor_w new_H, new_W, scale_factor_h, scale_factor_w = get_new_size_and_scale_factors(size, scale_factor) - output_tensor = Tensor.fill((B, C, new_H, new_W), 0.0) + output_tensor = Tensor.fill((B, C, new_H, new_W), value=0.0) if mode == 'nearest': for new_h in range(new_H): for new_w in range(new_W): @@ -1247,7 +1311,7 @@ def save_png(path, tensor): @staticmethod def resize(tensor, new_size: tuple): H, W, C = tensor.shape - new_tensor = tensor.zeros(new_size) + new_tensor = tensor.zeros(new_size, dtype=int) for new_i, i in zip(range(new_size[0]), range(0, H, H // new_size[0])): for new_j, j in zip(range(new_size[1]), range(0, W, W // new_size[1])): new_tensor[new_i, new_j] = tensor[i, j] diff --git a/test/test_nn.py b/test/test_nn.py index 69ec74f..19507be 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -1,32 +1,207 @@ import unittest -from ctrl_c_nn import Tensor, nn +import numpy as np +import torch +import ctrl_c_nn class TestNNForward(unittest.TestCase): def test_shape_forward_linear(self): - input = Tensor.random_float((8, 128)) - linear_layer = nn.Linear(128, 256) + input = ctrl_c_nn.Tensor.random_float((8, 128)) + linear_layer = ctrl_c_nn.nn.Linear(128, 256) pred = linear_layer(input) self.assertEqual(pred.shape, (8, 256), f"Shape is wrong after linear layer") def test_shape_forward_relu(self): - input = Tensor.random_float((8, 128)) - relu = nn.ReLU() + input = ctrl_c_nn.Tensor.random_float((8, 128)) + relu = ctrl_c_nn.nn.ReLU() pred = relu(input) self.assertEqual(pred.shape, (8, 128), f"Shape is wrong after relu layer.") def test_shape_forward_lin_lin(self): - input = Tensor.random_float((8, 64)) - linear_layer1 = nn.Linear(64, 96) - linear_layer2 = nn.Linear(96, 128) + input = ctrl_c_nn.Tensor.random_float((8, 64)) + linear_layer1 = ctrl_c_nn.nn.Linear(64, 96) + linear_layer2 = ctrl_c_nn.nn.Linear(96, 128) pred = linear_layer2(linear_layer1(input)) self.assertEqual(pred.shape, (8, 128), f"Shape is wrong after two linear layer.") def test_shape_forward_seq(self): - input = Tensor.random_float((8, 64)) - model = nn.Sequential( - nn.Linear(64, 96), - nn.Linear(96, 128) + input = ctrl_c_nn.Tensor.random_float((8, 64)) + model = ctrl_c_nn.nn.Sequential( + ctrl_c_nn.nn.Linear(64, 96), + ctrl_c_nn.nn.Linear(96, 128) ) pred = model(input) self.assertEqual(pred.shape, (8, 128), f"Shape is wrong after sequential layer.") + +class TestNNLayers(unittest.TestCase): + + def test_conv_layer_single_batch(self): + input_tensor = np.random.randn(1, 6, 16, 16) + weights = np.random.randn(9, 6, 3, 3) + bias = np.random.randn(9) + + # Pytorch + m_pytorch = torch.nn.Conv2d(in_channels=6, out_channels=9, kernel_size=3, stride=1, padding=1) + m_pytorch.weight = torch.nn.Parameter(torch.tensor(weights)) + m_pytorch.bias = torch.nn.Parameter(torch.tensor(bias)) + out_pytorch = m_pytorch(torch.tensor(input_tensor)) + + # Ctrl_C + m_ctrl_c = ctrl_c_nn.nn.Conv2d(in_channels=6, out_channels=9, kernel_size=3, stride=1, padding=1) + m_ctrl_c.weight.replace(ctrl_c_nn.Tensor(weights)) + m_ctrl_c.bias.replace(ctrl_c_nn.Tensor(bias)) + out_ctrl_c = m_ctrl_c(ctrl_c_nn.Tensor(input_tensor)) + + self.assertEqual(out_pytorch.shape, out_ctrl_c.shape, "shape mismatch") + mean_diff = (out_pytorch - torch.tensor(out_ctrl_c.tolist())).abs().mean() + self.assertLess(mean_diff.item(), 1e-5) + max_diff = (out_pytorch - torch.tensor(out_ctrl_c.tolist())).abs().max() + self.assertLess(max_diff.item(), 1e-2) + + + def test_conv_layer(self): + input_tensor = np.random.randn(3, 6, 48, 16) + weights = np.random.randn(54, 6, 3, 3) + bias = np.random.randn(54) + + # Pytorch + m_pytorch = torch.nn.Conv2d(in_channels=6, out_channels=54, kernel_size=3, stride=1, padding=1) + m_pytorch.weight = torch.nn.Parameter(torch.tensor(weights)) + m_pytorch.bias = torch.nn.Parameter(torch.tensor(bias)) + out_pytorch = m_pytorch(torch.tensor(input_tensor)) + + # Ctrl_C + m_ctrl_c = ctrl_c_nn.nn.Conv2d(in_channels=6, out_channels=54, kernel_size=3, stride=1, padding=1) + m_ctrl_c.weight.replace(ctrl_c_nn.Tensor(weights)) + m_ctrl_c.bias.replace(ctrl_c_nn.Tensor(bias)) + out_ctrl_c = m_ctrl_c(ctrl_c_nn.Tensor(input_tensor)) + + self.assertEqual(out_pytorch.shape, out_ctrl_c.shape, "shape mismatch") + mean_diff = (out_pytorch - torch.tensor(out_ctrl_c.tolist())).abs().mean() + self.assertLess(mean_diff.item(), 1e-5) + max_diff = (out_pytorch - torch.tensor(out_ctrl_c.tolist())).abs().max() + self.assertLess(max_diff.item(), 1e-2) + + + def test_linear_layer(self): + input_tensor = np.random.randn(3, 543) + weights = np.random.randn(54, 543) + bias = np.random.randn(54) + + # Pytorch + m_pytorch = torch.nn.Linear(in_features=6, out_features=54, bias=True) + m_pytorch.weight = torch.nn.Parameter(torch.tensor(weights)) + m_pytorch.bias = torch.nn.Parameter(torch.tensor(bias)) + out_pytorch = m_pytorch(torch.tensor(input_tensor)) + + # Ctrl_C + m_ctrl_c = ctrl_c_nn.nn.Linear(in_features=6, out_features=54, bias=True) + m_ctrl_c.weight.replace(ctrl_c_nn.Tensor(weights)) + m_ctrl_c.bias.replace(ctrl_c_nn.Tensor(bias)) + out_ctrl_c = m_ctrl_c(ctrl_c_nn.Tensor(input_tensor)) + + self.assertEqual(out_pytorch.shape, out_ctrl_c.shape, "shape mismatch") + mean_diff = (out_pytorch - torch.tensor(out_ctrl_c.tolist())).abs().mean() + self.assertLess(mean_diff.item(), 1e-5) + max_diff = (out_pytorch - torch.tensor(out_ctrl_c.tolist())).abs().max() + self.assertLess(max_diff.item(), 1e-2) + + + def test_MaxPool_layer(self): + for kernel_size in range(1,3): + for stride in range(1,3): + for padding in range(1,kernel_size//2): + input_tensor = np.random.randn(3, 6, 42, 42) + + # Pytorch + m_pytorch = torch.nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) + out_pytorch = m_pytorch(torch.tensor(input_tensor)) + + # Ctrl_C + m_ctrl_c = ctrl_c_nn.nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) + out_ctrl_c = m_ctrl_c(ctrl_c_nn.Tensor(input_tensor)) + + self.assertEqual(out_pytorch.shape, out_ctrl_c.shape, "shape mismatch") + mean_diff = (out_pytorch - torch.tensor(out_ctrl_c.tolist())).abs().mean() + self.assertLess(mean_diff.item(), 1e-5) + max_diff = (out_pytorch - torch.tensor(out_ctrl_c.tolist())).abs().max() + self.assertLess(max_diff.item(), 1e-2) + + def test_ReLU_layer(self): + input_tensor = np.random.randn(3, 6, 42, 42) + # Pytorch + m_pytorch = torch.nn.ReLU() + out_pytorch = m_pytorch(torch.tensor(input_tensor)) + + # Ctrl_C + m_ctrl_c = ctrl_c_nn.nn.ReLU() + out_ctrl_c = m_ctrl_c(ctrl_c_nn.Tensor(input_tensor)) + + self.assertEqual(out_pytorch.shape, out_ctrl_c.shape, "shape mismatch") + mean_diff = (out_pytorch - torch.tensor(out_ctrl_c.tolist())).abs().mean() + self.assertLess(mean_diff.item(), 1e-5) + max_diff = (out_pytorch - torch.tensor(out_ctrl_c.tolist())).abs().max() + self.assertLess(max_diff.item(), 1e-2) + + def test_LeakyReLU_layer(self): + input_tensor = np.random.randn(3, 6, 42, 42) + # Pytorch + m_pytorch = torch.nn.LeakyReLU(negative_slope=0.2) + out_pytorch = m_pytorch(torch.tensor(input_tensor)) + + # Ctrl_C + m_ctrl_c = ctrl_c_nn.nn.LeakyReLU(negative_slope=0.2) + out_ctrl_c = m_ctrl_c(ctrl_c_nn.Tensor(input_tensor)) + + self.assertEqual(out_pytorch.shape, out_ctrl_c.shape, "shape mismatch") + mean_diff = (out_pytorch - torch.tensor(out_ctrl_c.tolist())).abs().mean() + self.assertLess(mean_diff.item(), 1e-5) + max_diff = (out_pytorch - torch.tensor(out_ctrl_c.tolist())).abs().max() + self.assertLess(max_diff.item(), 1e-2) + + def test_Dropout_layer(self): + input_tensor = np.random.randn(3, 6, 42, 42) + # Pytorch + m_pytorch = torch.nn.Dropout(p=0.2) + m_pytorch.eval() + out_pytorch = m_pytorch(torch.tensor(input_tensor)) + + # Ctrl_C + m_ctrl_c = ctrl_c_nn.nn.Dropout(p=0.2) + out_ctrl_c = m_ctrl_c(ctrl_c_nn.Tensor(input_tensor)) + + self.assertEqual(out_pytorch.shape, out_ctrl_c.shape, "shape mismatch") + mean_diff = (out_pytorch - torch.tensor(out_ctrl_c.tolist())).abs().mean() + self.assertLess(mean_diff.item(), 1e-5) + max_diff = (out_pytorch - torch.tensor(out_ctrl_c.tolist())).abs().max() + self.assertLess(max_diff.item(), 1e-2) + + def test_BN_layer(self): + input_tensor = np.random.randn(1, 17, 48, 16) + weights = np.random.randn(17) + bias = np.random.randn(17) + running_mean = np.random.randn(17) + running_var = np.random.randn(17) + 2.0 + + # Pytorch + with torch.no_grad(): + m_pytorch = torch.nn.BatchNorm2d(num_features=17, eps=1e-04).eval() + m_pytorch.weight = torch.nn.Parameter(torch.tensor(weights)) + m_pytorch.bias = torch.nn.Parameter(torch.tensor(bias)) + m_pytorch.running_mean = torch.nn.Parameter(torch.tensor(running_mean)) + m_pytorch.running_var = torch.nn.Parameter(torch.tensor(running_var)) + out_pytorch = m_pytorch(torch.tensor(input_tensor)) + + # Ctrl_C + m_ctrl_c = ctrl_c_nn.nn.BatchNorm2d(num_features=17, eps=1e-04) + m_ctrl_c.weight.replace(ctrl_c_nn.Tensor(weights)) + m_ctrl_c.bias.replace(ctrl_c_nn.Tensor(bias)) + m_ctrl_c.running_mean.replace(ctrl_c_nn.Tensor(running_mean)) + m_ctrl_c.running_var.replace(ctrl_c_nn.Tensor(running_var)) + out_ctrl_c = m_ctrl_c(ctrl_c_nn.Tensor(input_tensor)) + + self.assertEqual(out_pytorch.shape, out_ctrl_c.shape, "shape mismatch") + mean_diff = (out_pytorch - torch.tensor(out_ctrl_c.tolist())).abs().mean() + self.assertLess(mean_diff.item(), 1e-5) + max_diff = (out_pytorch - torch.tensor(out_ctrl_c.tolist())).abs().max() + self.assertLess(max_diff.item(), 1e-2) \ No newline at end of file diff --git a/test/test_tensor.py b/test/test_tensor.py index 383b5cc..ffc9ce4 100644 --- a/test/test_tensor.py +++ b/test/test_tensor.py @@ -163,8 +163,8 @@ def test_mul_dimless_tensor(self): def test_bin_ops_basic(self): shape = (3, 4, 1, 1) - a = Tensor.fill(shape, 6) - b = Tensor.fill(shape, 2) + a = Tensor.fill(shape, value=6) + b = Tensor.fill(shape, value=2) self.assertEqual((a + b)[0, 2, 0, 0].item(), 8, "add not working") self.assertEqual((a - b)[0, 2, 0, 0].item(), 4, "sub not working") self.assertEqual((a * b)[0, 2, 0, 0].item(), 12, "mul not working") @@ -455,8 +455,8 @@ def test_create(self): t4 = Tensor.random_int(shape) t5 = Tensor.random_float(shape, min=-2, max=+2) t6 = Tensor.random_int(shape, min=-2, max=+2) - t7 = Tensor.fill(shape, 27) - t8 = Tensor.fill(shape, 27) + t7 = Tensor.fill(shape, value=27) + t8 = Tensor.fill(shape, value=27) self.assertEqual(shape, t1.shape, f"Tensor create gives wrong shape.") self.assertEqual(shape, t2.shape, f"Tensor create gives wrong shape.") self.assertEqual(shape, t3.shape, f"Tensor create gives wrong shape.")