diff --git a/backends/xnnpack/partition/config/generic_node_configs.py b/backends/xnnpack/partition/config/generic_node_configs.py index b95d7c5b89..f08b8ccb3c 100644 --- a/backends/xnnpack/partition/config/generic_node_configs.py +++ b/backends/xnnpack/partition/config/generic_node_configs.py @@ -303,6 +303,21 @@ def get_original_aten(self) -> Optional[torch._ops.OpOverload]: class UpsampleBilinear2dConfig(GenericNodePartitionerConfig): target_name = "upsample_bilinear2d.vec" + def check_constraints(self, node: torch.fx.Node, ep: ExportedProgram) -> bool: + """ + XNNPACK's static_resize_bilinear does not support dynamic output sizes + """ + if not self.check_common_constraints(node, ep): + return False + + is_output_dynamic = "val" in node.meta and any( + isinstance(d, torch.SymInt) for d in node.meta["val"].shape + ) + if is_output_dynamic: + why(node, reason="dynamic output sizes are not supported") + return False + return True + def supported_precision_types(self) -> List[ConfigPrecisionType]: return [ConfigPrecisionType.FP32] diff --git a/backends/xnnpack/test/ops/bilinear2d.py b/backends/xnnpack/test/ops/bilinear2d.py index bf89e2196f..6a19476365 100644 --- a/backends/xnnpack/test/ops/bilinear2d.py +++ b/backends/xnnpack/test/ops/bilinear2d.py @@ -8,6 +8,7 @@ import torch from executorch.backends.xnnpack.test.tester import Tester +from executorch.backends.xnnpack.test.tester.tester import Export class TestUpsampleBilinear2d(unittest.TestCase): @@ -118,3 +119,23 @@ def test_fp32_static_resize_bilinear2d_antialiased(self): ) .check_not(["torch.ops.higher_order.executorch_call_delegate"]) ) + + def test_fp32_bilinear2d_dynamic_bilinear2d_not_partitioned(self): + """ + Verify that upsample_bilinear2d ops with dynamic output sizes are not partitioned. + """ + example_inputs = (torch.randn(2, 3, 4, 5),) + dynamic_shapes = { + "x": { + 2: torch.export.Dim("h", min=1, max=10), + 3: torch.export.Dim("w", min=1, max=12), + } + } + ( + Tester(self.StaticResizeBilinear2dModule(), example_inputs) + .export(Export(dynamic_shapes)) + .to_edge_transform_and_lower() + # NOTE The decomposition is partially delegated. This will need to be replaced + # with the aten upsample op once decomp is removed. + .check("executorch_exir_dialects_edge__ops_aten_index_Tensor") + )