From 532b10c08f938048191681c832365eef6428c07e Mon Sep 17 00:00:00 2001 From: "wenyuchi.wyc" Date: Tue, 22 Nov 2022 11:33:45 +0800 Subject: [PATCH] Support fuse add into ConvTranspose. Signed-off-by: wenyuchi.wyc --- .../passes/fuse_add_bias_into_conv.h | 20 +- onnxoptimizer/test/optimizer_test.py | 202 ++++++++++++++++++ 2 files changed, 218 insertions(+), 4 deletions(-) diff --git a/onnxoptimizer/passes/fuse_add_bias_into_conv.h b/onnxoptimizer/passes/fuse_add_bias_into_conv.h index b4696b5f7..3d2fc4fc0 100644 --- a/onnxoptimizer/passes/fuse_add_bias_into_conv.h +++ b/onnxoptimizer/passes/fuse_add_bias_into_conv.h @@ -33,10 +33,21 @@ struct FuseAddBiasIntoConv final : public PredicateBasedPass { std::string getPassName() const override { return "fuse_add_bias_into_conv"; } + + inline bool matchConvAdd(Node *node) { + return node->kind() == kAdd && node->inputs()[0]->node()->kind() == kConv && + node->inputs()[0]->node()->inputs().size() == 2; + } + + inline bool matchConvTransposeAdd(Node *node) { + return node->kind() == kAdd && node->inputs()[0]->node()->kind() == kConvTranspose && + node->inputs()[0]->node()->inputs().size() == 2; + } + bool patternMatchPredicate(Node *node) override { - return CheckKind(node, kAdd, 0, kConv) && - GetInputsOfPreNode(node, 0).size() == 2; + return matchConvAdd(node) || matchConvTransposeAdd(node); } + static Node *makeSqueezeOrUnsqueeze(Graph &graph, std::vector &axes, Value *input, Node *target_node, BuiltinSymbol k) { @@ -62,6 +73,7 @@ struct FuseAddBiasIntoConv final : public PredicateBasedPass { NodeDestroyType &destroy_current) override { // due to current broadcasting's constraint, Conv has to be the first // operand + const bool is_conv = matchConvAdd(n); destroy_current = NodeDestroyType::DestroyZero; auto orig_conv = n->inputs()[0]; auto orig_bias = n->inputs()[1]; @@ -86,8 +98,8 @@ struct FuseAddBiasIntoConv final : public PredicateBasedPass { } // try to get feature M and rank from weight_shape if (weight_shape.size() > 0 && weight_shape[0].is_int) { - ONNX_ASSERT(M == -1 || M == weight_shape[0].dim); - M = weight_shape[0].dim; + ONNX_ASSERT(M == -1 || M == weight_shape[0].dim || M == weight_shape[1].dim); + M = is_conv ? weight_shape[0].dim : weight_shape[1].dim; ONNX_ASSERT(rank == -1 || rank == static_cast(weight_shape.size())); rank = weight_shape.size(); diff --git a/onnxoptimizer/test/optimizer_test.py b/onnxoptimizer/test/optimizer_test.py index ad7721800..0c7aebba2 100644 --- a/onnxoptimizer/test/optimizer_test.py +++ b/onnxoptimizer/test/optimizer_test.py @@ -1424,6 +1424,208 @@ def test_fuse_add_bias_into_conv_squeeze_4d_bias_no_fuse(self): assert optimized_model.graph.node[0].op_type == "Conv" assert optimized_model.graph.node[1].op_type == "Add" + def test_fuse_add_bias_into_conv_transpose_with_scalar_bias(self): # type: () -> None + nodes = [ + helper.make_node("ConvTranspose", ["X", "Y"], ["Z"], strides=(2, 2)), + helper.make_node("Add", ["Z", "A"], ["B"]), + ] + graph = helper.make_graph( + nodes, + "test", + [ + helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 3, 160, 160)), + helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 16, 2, 2)), + helper.make_tensor_value_info("A", TensorProto.FLOAT, ()), + ], + [helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 320, 320))], + ) + optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"]) + + # Unsqueeze, Conv + assert len(optimized_model.graph.node) == 4 + assert optimized_model.graph.node[0].op_type == "Unsqueeze" + assert optimized_model.graph.node[1].op_type == "Constant" + assert optimized_model.graph.node[2].op_type == "Tile" + assert optimized_model.graph.node[3].op_type == "ConvTranspose" + + def test_fuse_add_bias_into_conv_transpose_use_weight_shape(self): # type: () -> None + nodes = [ + helper.make_node("ConvTranspose", ["X", "Y"], ["Z"], strides=(2, 2)), + helper.make_node("Add", ["Z", "A"], ["B"]), + ] + # FIXME(daquexian): It looks like subgraph cannot get value info from parent subgraph + # nodes.extend(self._make_fake_loop_op( + # [helper.make_node("Conv", ["_X", "Y"], ["_Z"]), + # helper.make_node("Add", ["_Z", "A"], ["_B2"])], + # [(TensorProto.FLOAT, (1, 5, 3, 3), "X")], + # [(TensorProto.FLOAT, (1, 16, 1, 1), "B2")])) + graph = helper.make_graph( + nodes, + "test", + [ + helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 3, 160, 160)), + helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 16, 2, 2)), + helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 1, 1)), + ], + [helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 320, 320))], + ) + optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"]) + + # # Squeeze, Conv, Constant (trip count), Constant (condition), Loop + # assert len(list(optimized_model.graph.node)) == 5 + assert len(list(optimized_model.graph.node)) == 2 + assert optimized_model.graph.node[0].op_type == "Squeeze" + assert optimized_model.graph.node[1].op_type == "ConvTranspose" + assert optimized_model.graph.output[0].name == "B" + # # Squeeze, Conv + # assert len(optimized_model.graph.node[4].attribute[0].g.node) == 2 + # assert optimized_model.graph.node[4].attribute[0].g.node[0].op_type == 'Squeeze' + # assert optimized_model.graph.node[4].attribute[0].g.node[1].op_type == 'Conv' + # # Output 1 since 0 is 'cond' + # assert optimized_model.graph.node[4].attribute[0].g.output[1].name == 'B2' + + # type: () -> None + def test_fuse_add_bias_into_conv_transpose_use_weight_shape_with_tile(self): + conv = helper.make_node("ConvTranspose", ["X", "Y"], ["Z"], strides=(2, 2)) + add = helper.make_node("Add", ["Z", "A"], ["B"]) + graph = helper.make_graph( + [conv, add], + "test", + [ + helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 3, 160, 160)), + helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 16, 2, 2)), + helper.make_tensor_value_info("A", TensorProto.FLOAT, (1,)), + ], + [helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 320, 320))], + ) + optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"]) + + assert len(list(optimized_model.graph.node)) == 3 + assert len(optimized_model.graph.value_info) == 1 + assert ( + optimized_model.graph.value_info[0].type.tensor_type.elem_type + == TensorProto.INT64 + ) + assert len(optimized_model.graph.value_info[0].type.tensor_type.shape.dim) == 1 + assert optimized_model.graph.node[0].op_type == "Constant" + assert optimized_model.graph.node[1].op_type == "Tile" + assert optimized_model.graph.node[2].op_type == "ConvTranspose" + assert optimized_model.graph.output[0].name == "B" + + def test_fuse_add_bias_into_conv_transpose_use_conv_shape(self): # type: () -> None + sub = helper.make_node("Sub", ["M", "N"], ["Y"]) + conv = helper.make_node("ConvTranspose", ["X", "Y"], ["Z"], strides=(2, 2)) + add = helper.make_node("Add", ["Z", "A"], ["B"]) + graph = helper.make_graph( + [sub, conv, add], + "test", + [ + helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 3, 160, 160)), + helper.make_tensor_value_info("M", TensorProto.FLOAT, (3, 16, 2, 2)), + helper.make_tensor_value_info("N", TensorProto.FLOAT, (3, 16, 2, 2)), + helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 16, 1, 1)), + ], + [helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 320, 320))], + value_info=[ + helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 320, 320)) + ], + ) + optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"]) + + assert len(optimized_model.graph.node) == 3 + assert optimized_model.graph.node[0].op_type == "Sub" + assert optimized_model.graph.node[1].op_type == "Squeeze" + assert optimized_model.graph.node[2].op_type == "ConvTranspose" + assert optimized_model.graph.output[0].name == "B" + assert ( + optimized_model.graph.output[0].type.tensor_type.elem_type + == TensorProto.FLOAT + ) + assert len(optimized_model.graph.output[0].type.tensor_type.shape.dim) == 4 + + # type: () -> None + def test_fuse_add_bias_into_conv_transpose_use_move_constant(self): + conv = helper.make_node("ConvTranspose", ["X", "Y"], ["Z"], strides=(2, 2)) + constant = helper.make_node( + "Constant", + [], + ["A"], + value=helper.make_tensor( + name="bias", + data_type=TensorProto.FLOAT, + dims=(16, 1, 1), + vals=np.random.randn(16).astype(np.float32).tolist(), + ), + ) + add = helper.make_node("Add", ["Z", "A"], ["B"]) + graph = helper.make_graph( + [conv, constant, add], + "test", + [ + helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 3, 160, 160)), + helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 16, 2, 2)), + ], + [helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 320, 320))], + value_info=[ + helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 1, 1)), + ], + ) + optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"]) + + assert len(optimized_model.graph.node) == 3 + assert optimized_model.graph.node[0].op_type == "Constant" + assert optimized_model.graph.node[1].op_type == "Squeeze" + assert optimized_model.graph.node[2].op_type == "ConvTranspose" + assert optimized_model.graph.output[0].name == "B" + assert ( + optimized_model.graph.output[0].type.tensor_type.elem_type + == TensorProto.FLOAT + ) + assert len(optimized_model.graph.output[0].type.tensor_type.shape.dim) == 4 + + # type: () -> None + def test_fuse_add_bias_into_conv_transpose_squeeze_1d_bias_no_fuse(self): + conv = helper.make_node("ConvTranspose", ["X", "Y"], ["Z"], strides=(2, 2)) + add = helper.make_node("Add", ["Z", "A"], ["B"]) + graph = helper.make_graph( + [conv, add], + "test", + [ + helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 3, 160, 160)), + helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 16, 2, 2)), + helper.make_tensor_value_info("A", TensorProto.FLOAT, (320,)), + ], + [helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 320, 320))], + value_info=[ + helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 320, 320)), + ], + ) + optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"]) + + assert len(list(optimized_model.graph.node)) == 2 + assert optimized_model.graph.node[0].op_type == "ConvTranspose" + assert optimized_model.graph.node[1].op_type == "Add" + + # type: () -> None + def test_fuse_add_bias_into_conv_transpose_squeeze_4d_bias_no_fuse(self): + conv = helper.make_node("ConvTranspose", ["X", "Y"], ["Z"], strides=(2, 2)) + add = helper.make_node("Add", ["Z", "A"], ["B"]) + graph = helper.make_graph( + [conv, add], + "test", + [ + helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 3, 160, 160)), + helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 16, 2, 2)), + helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 16, 320, 320)), + ], + [helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 320, 320))], + ) + optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"]) + + assert len(list(optimized_model.graph.node)) == 2 + assert optimized_model.graph.node[0].op_type == "ConvTranspose" + assert optimized_model.graph.node[1].op_type == "Add" + def test_fuse_matmul_add_bias_into_gemm(self): # type: () -> None matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"]) add = helper.make_node("Add", ["Z", "B"], ["A"])