From 68c5230cae948da0acb84bc7f9748bb89a3da484 Mon Sep 17 00:00:00 2001 From: Dmitrii Makarenko Date: Wed, 29 Nov 2023 12:54:36 -0700 Subject: [PATCH] local --- .../linalg_on_tensors_backends/refbackend.py | 2 +- .../torch_mlir_e2e_test/test_suite/mlp.py | 128 ++++++++++++++---- 2 files changed, 102 insertions(+), 28 deletions(-) diff --git a/projects/pt1/python/torch_mlir_e2e_test/linalg_on_tensors_backends/refbackend.py b/projects/pt1/python/torch_mlir_e2e_test/linalg_on_tensors_backends/refbackend.py index d4168e3586400..47c8ad1a39a22 100644 --- a/projects/pt1/python/torch_mlir_e2e_test/linalg_on_tensors_backends/refbackend.py +++ b/projects/pt1/python/torch_mlir_e2e_test/linalg_on_tensors_backends/refbackend.py @@ -115,7 +115,7 @@ def invoke(*args): ffi_args.append( ctypes.pointer( ctypes.pointer(get_unranked_memref_descriptor(arg)))) - with DebugTimer('ExecutionEngine.invoke()', logger=self.logger): + with DebugTimer('\n**Inference** ExecutionEngine.invoke()', logger=self.logger): self.ee.invoke(function_name, *ffi_args) result = self.result assert result is not None, "Invocation didn't produce a result" diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/mlp.py b/projects/pt1/python/torch_mlir_e2e_test/test_suite/mlp.py index 32b4e39e1355d..1bfae81a3906c 100644 --- a/projects/pt1/python/torch_mlir_e2e_test/test_suite/mlp.py +++ b/projects/pt1/python/torch_mlir_e2e_test/test_suite/mlp.py @@ -14,6 +14,7 @@ # Multi-layer perceptron (MLP) models. + class Mlp1LayerModule(torch.nn.Module): def __init__(self): super().__init__() @@ -21,18 +22,23 @@ def __init__(self): torch.manual_seed(0) self.fc0 = nn.Linear(3, 5) self.tanh0 = nn.Tanh() + @export - @annotate_args([ - None, - ([-1, -1], torch.float32, True), - ]) + @annotate_args( + [ + None, + ([-1, -1], torch.float32, True), + ] + ) def forward(self, x): return self.tanh0(self.fc0(x)) + @register_test_case(module_factory=lambda: Mlp1LayerModule()) def Mlp1LayerModule_basic(module, tu: TestUtils): module.forward(tu.rand(5, 3)) + class Mlp2LayerModule(torch.nn.Module): def __init__(self): super().__init__() @@ -43,20 +49,25 @@ def __init__(self): self.tanh0 = nn.Tanh() self.fc1 = nn.Linear(N_HIDDEN, 2) self.tanh1 = nn.Tanh() + @export - @annotate_args([ - None, - ([-1, -1], torch.float32, True), - ]) + @annotate_args( + [ + None, + ([-1, -1], torch.float32, True), + ] + ) def forward(self, x): x = self.tanh0(self.fc0(x)) x = self.tanh1(self.fc1(x)) return x + @register_test_case(module_factory=lambda: Mlp2LayerModule()) def Mlp2LayerModule_basic(module, tu: TestUtils): module.forward(tu.rand(5, 3)) + class Mlp2LayerModuleNoBias(torch.nn.Module): def __init__(self): super().__init__() @@ -67,20 +78,25 @@ def __init__(self): self.tanh0 = nn.Tanh() self.fc1 = nn.Linear(N_HIDDEN, 2, bias=False) self.tanh1 = nn.Tanh() + @export - @annotate_args([ - None, - ([-1, -1], torch.float32, True), - ]) + @annotate_args( + [ + None, + ([-1, -1], torch.float32, True), + ] + ) def forward(self, x): x = self.tanh0(self.fc0(x)) x = self.tanh1(self.fc1(x)) return x + @register_test_case(module_factory=lambda: Mlp2LayerModuleNoBias()) def Mlp2LayerModuleNoBias_basic(module, tu: TestUtils): module.forward(tu.rand(5, 3)) + class BatchMlpLayerModule(torch.nn.Module): def __init__(self): super().__init__() @@ -88,25 +104,31 @@ def __init__(self): torch.manual_seed(0) self.fc0 = nn.Linear(3, 5) self.tanh0 = nn.Tanh() + @export - @annotate_args([ - None, - ([-1, -1, -1], torch.float32, True), - ]) + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ] + ) def forward(self, x): return self.tanh0(self.fc0(x)) + @register_test_case(module_factory=lambda: BatchMlpLayerModule()) def BatchMlpLayerModule_basic(module, tu: TestUtils): module.forward(tu.rand(7, 5, 3)) from torch_mlir_e2e_test.framework import TraceItem + + class MLP(torch.nn.Module): def __init__(self, input_dim, output_dim): super().__init__() self.flatten = torch.nn.Flatten() - self.linear1 = torch.nn.Linear(input_dim, input_dim // 2) + self.linear1 = torch.nn.Linear(input_dim, input_dim // 2, bias=False) # self.relu = torch.nn.ReLU() # self.linear2 = torch.nn.Linear(input_dim // 2, output_dim) @@ -122,31 +144,83 @@ def forward(self, x): # x = self.linear2(x) return x -model = MLP(128*128, 0) + +model = MLP(128 * 128, 0) + +with torch.no_grad(): + model.linear1.weight = nn.Parameter(torch.ones(model.linear1.weight.shape)) + model.linear1.weight[:, 1] = 2. + model.linear1.weight[:, 4] = 5. + model.linear1.weight[:, 6] = 5. + # model.linear1.bias == nn.Parameter(torch.ones_like(model.linear1.bias)) + def model_factory(): return model # model = model_factory() -test_input = torch.rand(1, 128, 128) -# from torch_mlir_e2e_test.framework import DebugTimer -# with DebugTimer("Vanilla", logger=print): -# out_vanilla = model.forward(test_input) +in_shape = (128, 128) + +from torch.utils.data import DataLoader, Dataset +from torchvision import datasets, transforms +import numpy as np + + +class RandomClsDataset(Dataset): + def __init__(self, n, in_shape, n_classes): + super().__init__() + + self.values = np.random.randn(n, *in_shape).astype(np.float32) + self.labels = np.random.randint(n_classes, size=(n,)) + + def __len__(self): + return len(self.values) + + def __getitem__(self, index): + return self.values[index], self.labels[index] + + +ds_size = 1000 +ds = RandomClsDataset(ds_size, in_shape, 100) +train_loader = DataLoader( + ds, batch_size=100, shuffle=True, num_workers=1, pin_memory=False +) + +from torch_mlir_e2e_test.framework import DebugTimer +print("Dataset size: ", len(train_loader.dataset)) +sample_input = next(iter(train_loader))[0] +sample_input2 = next(iter(train_loader))[0] +sample_input3 = next(iter(train_loader))[0] +print("[in] sample") +with DebugTimer("\nVanilla sample", logger=print): + model.forward(sample_input) + model.forward(sample_input2) + model.forward(sample_input3) +for _ in range(ds_size//100): + with DebugTimer("\n**Inference** Vanilla", logger=print): + out_vanilla = model.forward(sample_input2) + + # golden_trace = [TraceItem(symbol="mlp", inputs=[test_input], output=out_vanilla)] w = model.linear1.weight.detach().numpy() -b = model.linear1.bias.detach().numpy() -print("in shape: ", test_input.shape) +# b = model.linear1.bias.detach().numpy() +print("in shape: ", sample_input.shape) print(" w shape: ", w.shape) -print(" b shape: ", b.shape) +# print(" b shape: ", b.shape) @register_test_case(module_factory=model_factory) def MLP_basic(module, tu: TestUtils): # test_input = torch.rand(2, 4, 4) + print("[in] sample") + module.forward(sample_input) + module.forward(sample_input2) + module.forward(sample_input3) + print("[in] done sample") print("[in test] basic") # print("[in test] inp: ", test_input) - out = module.forward(test_input) + for _ in range(ds_size//100): + out = module.forward(sample_input2) # print("[in test] out: ", out) print("[in test] out shape: ", out.size()) -