Skip to content

Commit

Permalink
Fix XNN partitioning dynamic upsample with constant scales
Browse files Browse the repository at this point in the history
Differential Revision: D66611750

Pull Request resolved: pytorch#7131
  • Loading branch information
GregoryComer authored Dec 3, 2024
1 parent 5f0a14a commit 0dc2bd3
Show file tree
Hide file tree
Showing 2 changed files with 36 additions and 0 deletions.
15 changes: 15 additions & 0 deletions backends/xnnpack/partition/config/generic_node_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,6 +303,21 @@ def get_original_aten(self) -> Optional[torch._ops.OpOverload]:
class UpsampleBilinear2dConfig(GenericNodePartitionerConfig):
target_name = "upsample_bilinear2d.vec"

def check_constraints(self, node: torch.fx.Node, ep: ExportedProgram) -> bool:
"""
XNNPACK's static_resize_bilinear does not support dynamic output sizes
"""
if not self.check_common_constraints(node, ep):
return False

is_output_dynamic = "val" in node.meta and any(
isinstance(d, torch.SymInt) for d in node.meta["val"].shape
)
if is_output_dynamic:
why(node, reason="dynamic output sizes are not supported")
return False
return True

def supported_precision_types(self) -> List[ConfigPrecisionType]:
return [ConfigPrecisionType.FP32]

Expand Down
21 changes: 21 additions & 0 deletions backends/xnnpack/test/ops/bilinear2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

import torch
from executorch.backends.xnnpack.test.tester import Tester
from executorch.backends.xnnpack.test.tester.tester import Export


class TestUpsampleBilinear2d(unittest.TestCase):
Expand Down Expand Up @@ -118,3 +119,23 @@ def test_fp32_static_resize_bilinear2d_antialiased(self):
)
.check_not(["torch.ops.higher_order.executorch_call_delegate"])
)

def test_fp32_bilinear2d_dynamic_bilinear2d_not_partitioned(self):
"""
Verify that upsample_bilinear2d ops with dynamic output sizes are not partitioned.
"""
example_inputs = (torch.randn(2, 3, 4, 5),)
dynamic_shapes = {
"x": {
2: torch.export.Dim("h", min=1, max=10),
3: torch.export.Dim("w", min=1, max=12),
}
}
(
Tester(self.StaticResizeBilinear2dModule(), example_inputs)
.export(Export(dynamic_shapes))
.to_edge_transform_and_lower()
# NOTE The decomposition is partially delegated. This will need to be replaced
# with the aten upsample op once decomp is removed.
.check("executorch_exir_dialects_edge__ops_aten_index_Tensor")
)

0 comments on commit 0dc2bd3

Please sign in to comment.