diff --git a/backends/vulkan/partitioner/supported_ops.py b/backends/vulkan/partitioner/supported_ops.py index 08d7f96a6b..ca7ce72cae 100644 --- a/backends/vulkan/partitioner/supported_ops.py +++ b/backends/vulkan/partitioner/supported_ops.py @@ -8,7 +8,10 @@ import operator -from executorch.backends.vulkan.passes.custom_ops_defs import grid_priors_op # noqa +from executorch.backends.vulkan.passes.custom_ops_defs import ( # noqa + conv_with_clamp_op, + grid_priors_op, +) from executorch.exir.dialects._ops import ops as exir_ops @@ -84,6 +87,7 @@ def __contains__(self, op): CONVOLUTION_OPS = [ exir_ops.edge.aten.convolution.default, + exir_ops.edge.et_vk.conv_with_clamp.default, ] REDUCTION_OPS = [ diff --git a/backends/vulkan/passes/custom_ops_defs.py b/backends/vulkan/passes/custom_ops_defs.py index 62f21bfee6..fd586b665a 100644 --- a/backends/vulkan/passes/custom_ops_defs.py +++ b/backends/vulkan/passes/custom_ops_defs.py @@ -48,6 +48,43 @@ def conv_with_clamp_impl( conv_with_clamp_op = getattr(getattr(torch.ops, namespace), name) +def conv_with_clamp_out_impl( + input, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + transposed=False, + output_padding=0, + groups=1, + output_min=-float("inf"), + output_max=float("inf"), + out=None, +): + out = conv_with_clamp_impl( + input, + weight, + bias, + stride, + padding, + dilation, + transposed, + output_padding, + groups, + output_min, + output_max, + ) + return out + + +name = "conv_with_clamp.out" +lib.define( + f"{name}(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, Scalar? output_min, Scalar? output_max, *, Tensor(a!) out) -> Tensor(a!)" +) +lib.impl(name, conv_with_clamp_out_impl, "CompositeExplicitAutograd") + + # The dimension of x should be larger than 1 def grid_priors_impl( x, diff --git a/backends/vulkan/runtime/graph/ops/impl/Convolution.cpp b/backends/vulkan/runtime/graph/ops/impl/Convolution.cpp index 52af0542b6..74113197d4 100644 --- a/backends/vulkan/runtime/graph/ops/impl/Convolution.cpp +++ b/backends/vulkan/runtime/graph/ops/impl/Convolution.cpp @@ -562,6 +562,7 @@ void conv(ComputeGraph& graph, const std::vector& args) { REGISTER_OPERATORS { VK_REGISTER_OP(aten.convolution.default, conv); VK_REGISTER_OP(conv_with_clamp.default, conv); + VK_REGISTER_OP(et_vk.conv_with_clamp.default, conv); } } // namespace vkcompute diff --git a/backends/vulkan/test/test_vulkan_delegate.py b/backends/vulkan/test/test_vulkan_delegate.py index 9f57ec49a8..d80809ec79 100644 --- a/backends/vulkan/test/test_vulkan_delegate.py +++ b/backends/vulkan/test/test_vulkan_delegate.py @@ -1633,6 +1633,42 @@ def forward(self, x): memory_layouts=[vk_graph_schema.VkMemoryLayout.TENSOR_CHANNELS_PACKED], ) + def test_vulkan_backend_conv_with_clamp(self): + class ConvWithClampModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.weight = torch.randn(6, 8, 3, 3) + self.bias = torch.randn(8) + self.stride = (1, 2) + self.padding = (2, 3) + self.dilation = (1, 1) + self.transposed = True + self.output_padding = (0, 1) + self.groups = 1 + self.output_min = 0 + self.output_max = 10 + + def forward(self, x): + return torch.ops.et_vk.conv_with_clamp( + x, + self.weight, + self.bias, + self.stride, + self.padding, + self.dilation, + self.transposed, + self.output_padding, + self.groups, + self.output_min, + self.output_max, + ) + + self.lower_module_and_test_output( + ConvWithClampModule(), + (torch.randn(size=(1, 6, 40, 50), dtype=torch.float32),), + memory_layouts=[vk_graph_schema.VkMemoryLayout.TENSOR_CHANNELS_PACKED], + ) + def test_vulkan_backend_grid_priors(self): class GridPriorsModule(torch.nn.Module): def __init__(self):