From 2b48e135130dcdc68732b4658ecae457a59f1147 Mon Sep 17 00:00:00 2001 From: Zingo Andersen Date: Tue, 4 Jun 2024 09:26:40 -0700 Subject: [PATCH] Ignore backends/arm unit tests from pytest.ini (#3831) Summary: The Arm tests needs to have TOSA/Vela tools installed and they are not installed by default by installed_requirements.sh Most backends/arm tests will not test anything out of the box when running all tests without it those tools installed. To run backends/arm test you first need to install the Arm dependacies with: $ examples/arm/setup.sh --i-agree-to-the-contained-eula Then the test can be executed with: $ pytest --config-file=/dev/null backends/arm/test/ For GitHub testing this is setup/executed in the unittest-arm job see .github/workflows/pull.yml for more info. Pull Request resolved: https://github.com/pytorch/executorch/pull/3831 Reviewed By: mergennachin Differential Revision: D58138417 Pulled By: digantdesai fbshipit-source-id: 8ff5fb8b902910d18bc86f69b8a0d4dc0930c555 --- backends/arm/test/common.py | 9 ----- backends/arm/test/misc/test_debug_feats.py | 23 +++++-------- .../arm/test/models/test_mobilenet_v2_arm.py | 26 +++----------- backends/arm/test/ops/test_add.py | 27 +++------------ backends/arm/test/ops/test_avg_pool.py | 22 +++--------- backends/arm/test/ops/test_batch_norm.py | 32 ++++------------- backends/arm/test/ops/test_clone.py | 24 +++---------- backends/arm/test/ops/test_conv.py | 22 +++--------- backends/arm/test/ops/test_conv_combos.py | 34 +++---------------- backends/arm/test/ops/test_depthwise_conv.py | 22 +++--------- backends/arm/test/ops/test_div.py | 22 +++--------- backends/arm/test/ops/test_linear.py | 22 +++--------- backends/arm/test/ops/test_mean_dim.py | 22 +++--------- backends/arm/test/ops/test_softmax.py | 22 +++--------- backends/arm/test/ops/test_view.py | 24 +++---------- pytest.ini | 5 +-- 16 files changed, 67 insertions(+), 291 deletions(-) diff --git a/backends/arm/test/common.py b/backends/arm/test/common.py index 2a6e10e161..8898643c0c 100644 --- a/backends/arm/test/common.py +++ b/backends/arm/test/common.py @@ -5,19 +5,10 @@ # LICENSE file in the root directory of this source tree. import os -import shutil import tempfile from executorch.backends.arm.arm_backend import ArmCompileSpecBuilder -# TODO: fixme! These globs are a temporary workaround. Reasoning: -# Running the jobs in _unittest.yml will not work since that environment doesn't -# have the vela tool, nor the tosa_reference_model tool. Hence, we need a way to -# run what we can in that env temporarily. Long term, vela and tosa_reference_model -# should be installed in the CI env. -TOSA_REF_MODEL_INSTALLED = shutil.which("tosa_reference_model") -VELA_INSTALLED = shutil.which("vela") - def get_tosa_compile_spec(permute_memory_to_nhwc=False, custom_path=None): """ diff --git a/backends/arm/test/misc/test_debug_feats.py b/backends/arm/test/misc/test_debug_feats.py index cc3a556363..4dda4fa110 100644 --- a/backends/arm/test/misc/test_debug_feats.py +++ b/backends/arm/test/misc/test_debug_feats.py @@ -110,18 +110,13 @@ def test_numerical_diff_prints(self): .partition() .to_executorch() ) - if common.TOSA_REF_MODEL_INSTALLED: - # We expect an assertion error here. Any other issues will cause the - # test to fail. Likewise the test will fail if the assertion error is - # not present. - try: - # Tolerate 0 difference => we want to trigger a numerical diff - tester.run_method_and_compare_outputs(atol=0, rtol=0, qtol=0) - except AssertionError: - pass # Implicit pass test - else: - self.fail() + # We expect an assertion error here. Any other issues will cause the + # test to fail. Likewise the test will fail if the assertion error is + # not present. + try: + # Tolerate 0 difference => we want to trigger a numerical diff + tester.run_method_and_compare_outputs(atol=0, rtol=0, qtol=0) + except AssertionError: + pass # Implicit pass test else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) + self.fail() diff --git a/backends/arm/test/models/test_mobilenet_v2_arm.py b/backends/arm/test/models/test_mobilenet_v2_arm.py index 71b056c26e..fc92043c53 100644 --- a/backends/arm/test/models/test_mobilenet_v2_arm.py +++ b/backends/arm/test/models/test_mobilenet_v2_arm.py @@ -49,7 +49,7 @@ class TestMobileNetV2(unittest.TestCase): ) def test_mv2_tosa_MI(self): - tester = ( + ( ArmTester( self.mv2, inputs=self.model_inputs, @@ -60,16 +60,11 @@ def test_mv2_tosa_MI(self): .check(list(self.all_operators)) .partition() .to_executorch() + .run_method_and_compare_outputs() ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs() - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def test_mv2_tosa_BI(self): - tester = ( + ( ArmTester( self.mv2, inputs=self.model_inputs, @@ -81,23 +76,12 @@ def test_mv2_tosa_BI(self): .check(list(self.operators_after_quantization)) .partition() .to_executorch() - ) - if common.TOSA_REF_MODEL_INSTALLED: # atol=1.0 is a defensive upper limit # TODO MLETROCH-72 # TODO MLETROCH-149 - tester.run_method_and_compare_outputs( - atol=1.0, qtol=1, inputs=self.model_inputs - ) - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) + .run_method_and_compare_outputs(atol=1.0, qtol=1, inputs=self.model_inputs) + ) - @unittest.skipIf( - not common.VELA_INSTALLED, - "There is no point in running U55 tests if the Vela tool is not installed", - ) def test_mv2_u55_BI(self): ( ArmTester( diff --git a/backends/arm/test/ops/test_add.py b/backends/arm/test/ops/test_add.py index 53e4c2ef65..a54df79a52 100644 --- a/backends/arm/test/ops/test_add.py +++ b/backends/arm/test/ops/test_add.py @@ -58,7 +58,7 @@ def forward(self, x, y): def _test_add_tosa_MI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -71,18 +71,13 @@ def _test_add_tosa_MI_pipeline( .partition() .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs() ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs() - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_add_tosa_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -96,15 +91,9 @@ def _test_add_tosa_BI_pipeline( .partition() .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs(qtol=1) ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs(qtol=1) - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) - def _test_add_u55_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): @@ -135,10 +124,6 @@ def test_add_tosa_BI(self, test_data: torch.Tensor): self._test_add_tosa_BI_pipeline(self.Add(), test_data) @parameterized.expand(Add.test_parameters) - @unittest.skipIf( - not common.VELA_INSTALLED, - "There is no point in running U55 tests if the Vela tool is not installed", - ) def test_add_u55_BI(self, test_data: torch.Tensor): test_data = (test_data,) self._test_add_u55_BI_pipeline(self.Add(), test_data) @@ -154,10 +139,6 @@ def test_add2_tosa_BI(self, operand1: torch.Tensor, operand2: torch.Tensor): self._test_add_tosa_BI_pipeline(self.Add2(), test_data) @parameterized.expand(Add2.test_parameters) - @unittest.skipIf( - not common.VELA_INSTALLED, - "There is no point in running U55 tests if the Vela tool is not installed", - ) def test_add2_u55_BI(self, operand1: torch.Tensor, operand2: torch.Tensor): test_data = (operand1, operand2) self._test_add_u55_BI_pipeline(self.Add2(), test_data) diff --git a/backends/arm/test/ops/test_avg_pool.py b/backends/arm/test/ops/test_avg_pool.py index 259e8e1180..b7634b3287 100644 --- a/backends/arm/test/ops/test_avg_pool.py +++ b/backends/arm/test/ops/test_avg_pool.py @@ -46,7 +46,7 @@ def forward(self, x): def _test_avgpool2d_tosa_MI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -60,18 +60,13 @@ def _test_avgpool2d_tosa_MI_pipeline( .check_not(["executorch_exir_dialects_edge__ops_aten_avg_pool2d_default"]) .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs() ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs() - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_avgpool2d_tosa_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -86,13 +81,8 @@ def _test_avgpool2d_tosa_BI_pipeline( .check_not(["executorch_exir_dialects_edge__ops_aten_avg_pool2d_default"]) .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs(qtol=1) ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs(qtol=1) - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_avgpool2d_tosa_u55_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.tensor] @@ -142,10 +132,6 @@ def test_avgpool2d_tosa_BI( # Expected to fail since ArmQuantizer cannot quantize a AvgPool2D layer # TODO(MLETORCH-93) @parameterized.expand(test_data_suite) - @unittest.skipIf( - not common.VELA_INSTALLED, - "There is no point in running U55 tests if the Vela tool is not installed", - ) @unittest.expectedFailure def test_avgpool2d_tosa_u55_BI( self, diff --git a/backends/arm/test/ops/test_batch_norm.py b/backends/arm/test/ops/test_batch_norm.py index dceb2c576d..d2e57a6925 100644 --- a/backends/arm/test/ops/test_batch_norm.py +++ b/backends/arm/test/ops/test_batch_norm.py @@ -527,7 +527,7 @@ def forward(self, x): def _test_batchnorm2d_tosa_MI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -552,18 +552,13 @@ def _test_batchnorm2d_tosa_MI_pipeline( ] ) .to_executorch() + .run_method_and_compare_outputs(test_data) ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs(test_data) - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_batchnorm2d_no_stats_tosa_MI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -586,18 +581,13 @@ def _test_batchnorm2d_no_stats_tosa_MI_pipeline( ] ) .to_executorch() + .run_method_and_compare_outputs(test_data) ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs(test_data) - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_batchnorm2d_tosa_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -623,15 +613,9 @@ def _test_batchnorm2d_tosa_BI_pipeline( ] ) .to_executorch() + .run_method_and_compare_outputs(test_data) ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs(test_data) - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) - def _test_batchnorm2d_u55_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): @@ -724,10 +708,6 @@ def test_batchnorm2d_tosa_BI( @unittest.skip( reason="Expected to fail since ArmQuantizer cannot quantize a BatchNorm layer" ) - @unittest.skipIf( - not common.VELA_INSTALLED, - "There is no point in running U55 tests if the Vela tool is not installed", - ) @unittest.expectedFailure def test_batchnorm2d_u55_BI( self, diff --git a/backends/arm/test/ops/test_clone.py b/backends/arm/test/ops/test_clone.py index 2eb94a8232..a89d0a7e67 100644 --- a/backends/arm/test/ops/test_clone.py +++ b/backends/arm/test/ops/test_clone.py @@ -35,7 +35,7 @@ def forward(self, x: torch.Tensor): def _test_clone_tosa_MI_pipeline( self, module: torch.nn.Module, test_data: torch.Tensor ): - tester = ( + ( ArmTester( module, inputs=test_data, compile_spec=common.get_tosa_compile_spec() ) @@ -45,19 +45,13 @@ def _test_clone_tosa_MI_pipeline( .partition() .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs(qtol=1) ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs(qtol=1) - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) - def _test_clone_tosa_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, compile_spec=common.get_tosa_compile_spec() ) @@ -68,15 +62,9 @@ def _test_clone_tosa_BI_pipeline( .partition() .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs(qtol=1) ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs(qtol=1) - else: - raise RuntimeError( - "TOSA ref model tool not installed and the test is an expected fail" - ) - def _test_clone_tosa_u55_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): @@ -108,9 +96,5 @@ def test_clone_tosa_BI(self, test_tensor: torch.Tensor): # TODO MLETROCH-125 @parameterized.expand(Clone.test_parameters) @unittest.expectedFailure - @unittest.skipIf( - not common.VELA_INSTALLED, - "There is no point in running U55 tests if the Vela tool is not installed", - ) def test_clone_u55_BI(self, test_tensor: torch.Tensor): self._test_clone_tosa_u55_pipeline(self.Clone(), (test_tensor,)) diff --git a/backends/arm/test/ops/test_conv.py b/backends/arm/test/ops/test_conv.py index 1f0ef7bc29..679aa7a5a6 100644 --- a/backends/arm/test/ops/test_conv.py +++ b/backends/arm/test/ops/test_conv.py @@ -244,7 +244,7 @@ class TestConv2D(unittest.TestCase): def _test_conv2d_tosa_MI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -256,20 +256,15 @@ def _test_conv2d_tosa_MI_pipeline( .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .check_not(["executorch_exir_dialects_edge__ops_aten_convolution_default"]) .to_executorch() + .run_method_and_compare_outputs() ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs() - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_conv2d_tosa_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor], ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -282,13 +277,8 @@ def _test_conv2d_tosa_BI_pipeline( .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .check_not(["executorch_exir_dialects_edge__ops_aten_convolution_default"]) .to_executorch() + .run_method_and_compare_outputs(qtol=1) ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs(qtol=1) - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_conv2d_u55_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] @@ -317,9 +307,5 @@ def test_conv2d_tosa_BI(self, test_name, model): self._test_conv2d_tosa_BI_pipeline(model, model.get_inputs()) @parameterized.expand(testsuite_u55) - @unittest.skipIf( - not common.VELA_INSTALLED, - "There is no point in running U55 tests if the Vela tool is not installed", - ) def test_conv2d_u55_BI(self, test_name, model): self._test_conv2d_u55_BI_pipeline(model, model.get_inputs()) diff --git a/backends/arm/test/ops/test_conv_combos.py b/backends/arm/test/ops/test_conv_combos.py index 2bde068848..d206678422 100644 --- a/backends/arm/test/ops/test_conv_combos.py +++ b/backends/arm/test/ops/test_conv_combos.py @@ -157,7 +157,7 @@ class TestConvCombos(unittest.TestCase): def _test_conv_combo_tosa_MI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -169,13 +169,8 @@ def _test_conv_combo_tosa_MI_pipeline( .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .check_not(list(module.edge_op_list)) .to_executorch() + .run_method_and_compare_outputs() ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs() - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_conv_combo_tosa_BI_pipeline( self, @@ -184,7 +179,7 @@ def _test_conv_combo_tosa_BI_pipeline( atol: float = 1e-3, rtol: float = 1e-3, ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -197,13 +192,8 @@ def _test_conv_combo_tosa_BI_pipeline( .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .check_not(list(module.edge_op_list)) .to_executorch() + .run_method_and_compare_outputs(atol=atol, rtol=rtol, qtol=1) ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs(atol=atol, rtol=rtol, qtol=1) - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_conv_combo_u55_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] @@ -234,10 +224,6 @@ def test_conv_meandim_tosa_BI(self): model = ComboConv2dMeandim() self._test_conv_combo_tosa_BI_pipeline(model, model.get_inputs()) - @unittest.skipIf( - not common.VELA_INSTALLED, - "There is no point in running U55 tests if the Vela tool is not installed", - ) def test_conv_meandim_u55_BI(self): model = ComboConv2dMeandim() self._test_conv_combo_u55_BI_pipeline(model, model.get_inputs()) @@ -253,10 +239,6 @@ def test_conv_batchnorm_relu_tosa_BI(self): model = ComboConvBatchnormRelu() self._test_conv_combo_tosa_BI_pipeline(model, model.get_inputs()) - @unittest.skipIf( - not common.VELA_INSTALLED, - "There is no point in running U55 tests if the Vela tool is not installed", - ) def test_conv_batchnorm_relu_u55_BI(self): model = ComboConvBatchnormRelu() self._test_conv_combo_u55_BI_pipeline(model, model.get_inputs()) @@ -277,10 +259,6 @@ def test_conv_relu6_tosa_BI(self, test_data: torch.Tensor): self._test_conv_combo_tosa_BI_pipeline(model, test_data) @parameterized.expand(ComboConvRelu6.test_data) - @unittest.skipIf( - not common.VELA_INSTALLED, - "There is no point in running U55 tests if the Vela tool is not installed", - ) def test_conv_relu6_u55_BI(self, test_data: torch.Tensor): model = ComboConvRelu6() test_data = (test_data,) @@ -297,10 +275,6 @@ def test_block_bottleneck_residual_tosa_BI(self): model = ComboBlockBottleneckResidual() self._test_conv_combo_tosa_BI_pipeline(model, model.get_inputs()) - @unittest.skipIf( - not common.VELA_INSTALLED, - "There is no point in running U55 tests if the Vela tool is not installed", - ) def test_block_bottleneck_residual_u55_BI(self): model = ComboBlockBottleneckResidual() self._test_conv_combo_u55_BI_pipeline(model, model.get_inputs()) diff --git a/backends/arm/test/ops/test_depthwise_conv.py b/backends/arm/test/ops/test_depthwise_conv.py index 0901a49293..bc613d1d18 100644 --- a/backends/arm/test/ops/test_depthwise_conv.py +++ b/backends/arm/test/ops/test_depthwise_conv.py @@ -130,7 +130,7 @@ class TestDepthwiseConv2D(unittest.TestCase): def _test_dw_conv2d_tosa_MI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -142,18 +142,13 @@ def _test_dw_conv2d_tosa_MI_pipeline( .check_not(["executorch_exir_dialects_edge__ops_aten_convolution_default"]) .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs() ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs() - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_dw_conv2d_tosa_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -166,13 +161,8 @@ def _test_dw_conv2d_tosa_BI_pipeline( .check_not(["executorch_exir_dialects_edge__ops_aten_convolution_default"]) .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs(qtol=1) ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs(qtol=1) - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_dw_conv2d_u55_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] @@ -201,9 +191,5 @@ def test_dw_conv2d_tosa_BI(self, test_name, model): self._test_dw_conv2d_tosa_BI_pipeline(model, model.get_inputs()) @parameterized.expand(testsuite_u55) - @unittest.skipIf( - not common.VELA_INSTALLED, - "There is no point in running U55 tests if the Vela tool is not installed", - ) def test_dw_conv2d_u55_BI(self, test_name, model): self._test_dw_conv2d_u55_BI_pipeline(model, model.get_inputs()) diff --git a/backends/arm/test/ops/test_div.py b/backends/arm/test/ops/test_div.py index 5152a71c2a..f8154c5138 100644 --- a/backends/arm/test/ops/test_div.py +++ b/backends/arm/test/ops/test_div.py @@ -104,7 +104,7 @@ def forward( def _test_div_tosa_MI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -117,18 +117,13 @@ def _test_div_tosa_MI_pipeline( .partition() .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs(test_data) ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs(test_data) - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_div_tosa_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -142,13 +137,8 @@ def _test_div_tosa_BI_pipeline( .partition() .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs(test_data) ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs(test_data) - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_div_u55_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] @@ -202,10 +192,6 @@ def test_div_tosa_BI( # Expected to fail since ArmQuantizer cannot quantize a Div layer # TODO(MLETORCH-129) @parameterized.expand(test_data_suite) - @unittest.skipIf( - not common.VELA_INSTALLED, - "There is no point in running U55 tests if the Vela tool is not installed", - ) @unittest.expectedFailure def test_div_u55_BI( self, diff --git a/backends/arm/test/ops/test_linear.py b/backends/arm/test/ops/test_linear.py index bafadb6df5..57be8ddb92 100644 --- a/backends/arm/test/ops/test_linear.py +++ b/backends/arm/test/ops/test_linear.py @@ -116,7 +116,7 @@ def forward(self, x): def _test_linear_tosa_MI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -129,18 +129,13 @@ def _test_linear_tosa_MI_pipeline( .partition() .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs() ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs() - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_linear_tosa_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -154,13 +149,8 @@ def _test_linear_tosa_BI_pipeline( .partition() .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs(qtol=True) ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs(qtol=True) - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_linear_tosa_u55_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] @@ -212,10 +202,6 @@ def test_linear_tosa_BI( ) @parameterized.expand(test_data_suite_rank1) - @unittest.skipIf( - not common.VELA_INSTALLED, - "There is no point in running U55 tests if the Vela tool is not installed", - ) def test_linear_tosa_u55_BI( self, test_name: str, diff --git a/backends/arm/test/ops/test_mean_dim.py b/backends/arm/test/ops/test_mean_dim.py index 79ba3de7da..d6de90f4be 100644 --- a/backends/arm/test/ops/test_mean_dim.py +++ b/backends/arm/test/ops/test_mean_dim.py @@ -51,7 +51,7 @@ def forward(self, x): def _test_meandim_tosa_MI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -65,18 +65,13 @@ def _test_meandim_tosa_MI_pipeline( .check_not(["executorch_exir_dialects_edge__ops_aten_mean_dim"]) .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs() ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs() - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_meandim_tosa_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -91,13 +86,8 @@ def _test_meandim_tosa_BI_pipeline( .check_not(["executorch_exir_dialects_edge__ops_aten_mean_dim"]) .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs(qtol=1) ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs(qtol=1) - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_meandim_tosa_u55_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.tensor] @@ -136,10 +126,6 @@ def test_meandim_tosa_BI( self._test_meandim_tosa_BI_pipeline(self.MeanDim(), (test_data,)) @parameterized.expand(test_data_suite) - @unittest.skipIf( - not common.VELA_INSTALLED, - "There is no point in running U55 tests if the Vela tool is not installed", - ) def test_meandim_tosa_u55_BI( self, test_name: str, diff --git a/backends/arm/test/ops/test_softmax.py b/backends/arm/test/ops/test_softmax.py index 32bd225346..237e061567 100644 --- a/backends/arm/test/ops/test_softmax.py +++ b/backends/arm/test/ops/test_softmax.py @@ -39,7 +39,7 @@ def forward(self, x): def _test_softmax_tosa_MI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, @@ -53,18 +53,13 @@ def _test_softmax_tosa_MI_pipeline( .check_not(["executorch_exir_dialects_edge__ops_aten__softmax_default"]) .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs() ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs() - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_softmax_tosa_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, compile_spec=common.get_tosa_compile_spec() ) @@ -77,13 +72,8 @@ def _test_softmax_tosa_BI_pipeline( .check_not(["executorch_exir_dialects_edge__ops_aten__softmax_default"]) .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs(qtol=1) ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs(qtol=1) - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) def _test_softmax_tosa_u55_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.tensor] @@ -129,10 +119,6 @@ def test_softmax_tosa_BI( # Expected to fail since ArmQuantizer cannot quantize a SoftMax layer # TODO(MLETORCH-92) @parameterized.expand(test_data_suite) - @unittest.skipIf( - not common.VELA_INSTALLED, - "There is no point in running U55 tests if the Vela tool is not installed", - ) @unittest.expectedFailure def test_softmax_tosa_u55_BI( self, diff --git a/backends/arm/test/ops/test_view.py b/backends/arm/test/ops/test_view.py index fddd21ed2f..c2dc3b87bd 100644 --- a/backends/arm/test/ops/test_view.py +++ b/backends/arm/test/ops/test_view.py @@ -32,7 +32,7 @@ def forward(self, x: torch.Tensor): def _test_view_tosa_MI_pipeline( self, module: torch.nn.Module, test_data: torch.Tensor ): - tester = ( + ( ArmTester( module, inputs=test_data, compile_spec=common.get_tosa_compile_spec() ) @@ -42,19 +42,13 @@ def _test_view_tosa_MI_pipeline( .partition() .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs(qtol=1) ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs(qtol=1) - else: - logger.warning( - "TOSA ref model tool not installed, skip numerical correctness tests" - ) - def _test_view_tosa_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): - tester = ( + ( ArmTester( module, inputs=test_data, compile_spec=common.get_tosa_compile_spec() ) @@ -65,15 +59,9 @@ def _test_view_tosa_BI_pipeline( .partition() .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() + .run_method_and_compare_outputs(qtol=1) ) - if common.TOSA_REF_MODEL_INSTALLED: - tester.run_method_and_compare_outputs(qtol=1) - else: - raise RuntimeError( - "TOSA ref model tool not installed and the test is an expected fail" - ) - def _test_view_u55_BI_pipeline( self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] ): @@ -105,9 +93,5 @@ def test_view_tosa_BI(self, test_tensor: torch.Tensor): # TODO MLETROCH-125 @parameterized.expand(View.test_parameters) @unittest.expectedFailure - @unittest.skipIf( - not common.VELA_INSTALLED, - "There is no point in running U55 tests if the Vela tool is not installed", - ) def test_view_u55_BI(self, test_tensor: torch.Tensor): self._test_view_u55_BI_pipeline(self.View(), (test_tensor,)) diff --git a/pytest.ini b/pytest.ini index 32ad382601..8b4e756cee 100644 --- a/pytest.ini +++ b/pytest.ini @@ -8,6 +8,9 @@ addopts = --capture=sys # don't suppress warnings, but don't shove them all to the end either -p no:warnings + # Ignore backends/arm tests you need to run examples/arm/setup.sh to install some tool to make them work + # For GitHub testing this is setup/executed in the unittest-arm job see .github/workflows/pull.yml for more info. + --ignore-glob=backends/arm/**/* # explicitly list out tests that are running successfully in oss examples/portable/test # sdk/ @@ -37,8 +40,6 @@ addopts = # kernels/ kernels/prim_ops/test kernels/test/test_case_gen.py - # backends/arm - backends/arm/test # backends/xnnpack backends/xnnpack/test/models # test