From 5dd9da19ffcaa6fe8535e6fa5dc84da4f4f4742a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A5ns=20Nilsson?= Date: Tue, 12 Nov 2024 11:17:12 +0100 Subject: [PATCH] Fix quant specific op registration for some ops Quant specific registration for BatchMatmul, SVDF and LSTM were not working correctly. Change-Id: I491ee918f34ed3c440073bbfb18ad4cf9d855e63 --- tensorflow/lite/micro/kernels/BUILD | 2 + tensorflow/lite/micro/kernels/batch_matmul.cc | 2 + tensorflow/lite/micro/kernels/batch_matmul.h | 97 ++------------ .../lite/micro/kernels/batch_matmul_common.cc | 119 ++++++++++++++++++ .../lite/micro/micro_mutable_op_resolver.h | 12 +- tensorflow/lite/micro/tools/make/Makefile | 1 + 6 files changed, 141 insertions(+), 92 deletions(-) create mode 100644 tensorflow/lite/micro/kernels/batch_matmul_common.cc diff --git a/tensorflow/lite/micro/kernels/BUILD b/tensorflow/lite/micro/kernels/BUILD index 29a369eda33..5bf750a0970 100644 --- a/tensorflow/lite/micro/kernels/BUILD +++ b/tensorflow/lite/micro/kernels/BUILD @@ -202,6 +202,7 @@ tflm_kernel_cc_library( "arg_min_max.cc", "assign_variable.cc", "batch_matmul.cc", + "batch_matmul_common.cc", "batch_to_space_nd.cc", "broadcast_args.cc", "broadcast_to.cc", @@ -326,6 +327,7 @@ tflm_kernel_cc_library( "sub.h", "svdf.h", "transpose_conv.h", + "unidirectional_sequence_lstm.h", ] + select({ xtensa_fusion_f1_config(): glob(["xtensa/**/*.h"]), xtensa_hifi_3_config(): glob(["xtensa/**/*.h"]), diff --git a/tensorflow/lite/micro/kernels/batch_matmul.cc b/tensorflow/lite/micro/kernels/batch_matmul.cc index 15112e3b4cd..bbb1c0b0a7e 100644 --- a/tensorflow/lite/micro/kernels/batch_matmul.cc +++ b/tensorflow/lite/micro/kernels/batch_matmul.cc @@ -24,7 +24,9 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/reference/transpose.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/micro/kernels/batch_matmul.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" #include "tensorflow/lite/micro/micro_log.h" namespace tflite { diff --git a/tensorflow/lite/micro/kernels/batch_matmul.h b/tensorflow/lite/micro/kernels/batch_matmul.h index 198b1d48ead..5e811fa3782 100644 --- a/tensorflow/lite/micro/kernels/batch_matmul.h +++ b/tensorflow/lite/micro/kernels/batch_matmul.h @@ -16,22 +16,12 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_KERNELS_BATCH_MATMUL_H_ #define TENSORFLOW_LITE_MICRO_KERNELS_BATCH_MATMUL_H_ -#include - #include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/kernels/internal/reference/transpose.h" #include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" #include "tensorflow/lite/micro/micro_common.h" -#include "tensorflow/lite/micro/micro_log.h" namespace tflite { -extern constexpr int kBatchMatmulInputLhsTensor = 0; -extern constexpr int kBatchMatmulInputRhsTensor = 1; -extern constexpr int kBatchMatmulOutputTensor = 0; - struct QuantizationOpDataBatchMatmul { // The scaling factor from input to output (aka the 'real multiplier') can // be represented as a fixed point multiplier plus a left shift. @@ -59,98 +49,29 @@ struct OpDataBatchMatmul { bool rhs_is_constant_tensor; }; +extern const int kBatchMatmulInputLhsTensor; +extern const int kBatchMatmulInputRhsTensor; +extern const int kBatchMatmulOutputTensor; + TfLiteStatus ReshapeOutputTensor(TfLiteContext* context, TfLiteNode* node, const RuntimeShape& extended_lhs_shape, const RuntimeShape& extended_rhs_shape, bool adj_x, bool adj_y, int output_rank, - TfLiteTensor* output) { - int64_t orig_size = NumElements(output); - - // make sure the new output dims rank does not exceed the original rank - TF_LITE_ENSURE(context, output_rank <= NumDimensions(output)); - - // make sure output tensor dims are not in the FlatBuffer - TfLiteEvalTensor* output_eval = - tflite::micro::GetEvalOutput(context, node, kBatchMatmulOutputTensor); - TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy( - context, output, output_eval)); - - // Fill in any broadcast dimensions. - for (int i = 0; i < output_rank - 2; ++i) { - const int lhs_dim = extended_lhs_shape.Dims(i); - const int rhs_dim = extended_rhs_shape.Dims(i); - int broadcast_dim = lhs_dim; - if ((lhs_dim != rhs_dim) && (lhs_dim == 1)) { - broadcast_dim = rhs_dim; - } - output->dims->data[i] = broadcast_dim; - } - // Fill in the matmul dimensions. - int lhs_rows_index = adj_x ? output_rank - 1 : output_rank - 2; - int rhs_cols_index = adj_y ? output_rank - 2 : output_rank - 1; - - output->dims->data[output_rank - 2] = extended_lhs_shape.Dims(lhs_rows_index); - output->dims->data[output_rank - 1] = extended_rhs_shape.Dims(rhs_cols_index); - output->dims->size = output_rank; - - // Check that output tensor has not been resized - // since TFLM doesn't support tensor resizing. - TF_LITE_ENSURE_EQ(context, orig_size, NumElements(output)); - - return kTfLiteOk; -} + TfLiteTensor* output); template void TransposeRowsColumnsImpl(const TfLiteEvalTensor& tensor_in, - TfLiteEvalTensor* tensor_out) { - const T* input = tflite::micro::GetTensorData(&tensor_in); - T* output = tflite::micro::GetTensorData(tensor_out); - RuntimeShape transposed_shape(tflite::micro::GetTensorShape(&tensor_in)); - RuntimeShape shape(transposed_shape); - TransposeParams params; - const int rank = shape.DimensionsCount(); - params.perm_count = rank; - for (int i = 0; i < rank - 2; ++i) { - params.perm[i] = i; - } - // Transpose the last two dimensions. - params.perm[rank - 2] = rank - 1; - params.perm[rank - 1] = rank - 2; - transposed_shape.SetDim(rank - 1, shape.Dims(rank - 2)); - transposed_shape.SetDim(rank - 2, shape.Dims(rank - 1)); - reference_ops::Transpose(params, shape, input, transposed_shape, output); -} + TfLiteEvalTensor* tensor_out); TfLiteStatus TransposeRowsColumns(const TfLiteEvalTensor& tensor_in, - TfLiteEvalTensor* tensor_out) { - if (tensor_in.type == kTfLiteFloat32) { - TransposeRowsColumnsImpl(tensor_in, tensor_out); - return kTfLiteOk; - } else if (tensor_in.type == kTfLiteInt8) { - TransposeRowsColumnsImpl(tensor_in, tensor_out); - return kTfLiteOk; - } else if (tensor_in.type == kTfLiteInt16) { - TransposeRowsColumnsImpl(tensor_in, tensor_out); - return kTfLiteOk; - } else { - MicroPrintf( - "BATCH_MATMUL can only transpose tensors with FLOAT32, INT8, INT16 " - "type."); - } - return kTfLiteError; -} + TfLiteEvalTensor* tensor_out); -RuntimeShape SwapRowColumnDims(const RuntimeShape& shape) { - RuntimeShape swapped_shape(shape); - const int32_t dims = shape.DimensionsCount(); - swapped_shape.SetDim(dims - 2, shape.Dims(dims - 1)); - swapped_shape.SetDim(dims - 1, shape.Dims(dims - 2)); - return swapped_shape; -} +RuntimeShape SwapRowColumnDims(const RuntimeShape& shape); TFLMRegistration Register_BATCH_MATMUL(); #if defined(CMSIS_NN) + // Returns a TFLMRegistration struct for kernel variant that only supports // int8 matrix multiplication and uses the latency optimized // implementations. diff --git a/tensorflow/lite/micro/kernels/batch_matmul_common.cc b/tensorflow/lite/micro/kernels/batch_matmul_common.cc new file mode 100644 index 00000000000..1447cd489e9 --- /dev/null +++ b/tensorflow/lite/micro/kernels/batch_matmul_common.cc @@ -0,0 +1,119 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include "tensorflow/lite/kernels/internal/reference/transpose.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/batch_matmul.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +const int kBatchMatmulInputLhsTensor = 0; +const int kBatchMatmulInputRhsTensor = 1; +const int kBatchMatmulOutputTensor = 0; + +TfLiteStatus ReshapeOutputTensor(TfLiteContext* context, TfLiteNode* node, + const RuntimeShape& extended_lhs_shape, + const RuntimeShape& extended_rhs_shape, + bool adj_x, bool adj_y, int output_rank, + TfLiteTensor* output) { + int64_t orig_size = NumElements(output); + + // make sure the new output dims rank does not exceed the original rank + TF_LITE_ENSURE(context, output_rank <= NumDimensions(output)); + + // make sure output tensor dims are not in the FlatBuffer + TfLiteEvalTensor* output_eval = + tflite::micro::GetEvalOutput(context, node, kBatchMatmulOutputTensor); + TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy( + context, output, output_eval)); + + // Fill in any broadcast dimensions. + for (int i = 0; i < output_rank - 2; ++i) { + const int lhs_dim = extended_lhs_shape.Dims(i); + const int rhs_dim = extended_rhs_shape.Dims(i); + int broadcast_dim = lhs_dim; + if ((lhs_dim != rhs_dim) && (lhs_dim == 1)) { + broadcast_dim = rhs_dim; + } + output->dims->data[i] = broadcast_dim; + } + // Fill in the matmul dimensions. + int lhs_rows_index = adj_x ? output_rank - 1 : output_rank - 2; + int rhs_cols_index = adj_y ? output_rank - 2 : output_rank - 1; + + output->dims->data[output_rank - 2] = extended_lhs_shape.Dims(lhs_rows_index); + output->dims->data[output_rank - 1] = extended_rhs_shape.Dims(rhs_cols_index); + output->dims->size = output_rank; + + // Check that output tensor has not been resized + // since TFLM doesn't support tensor resizing. + TF_LITE_ENSURE_EQ(context, orig_size, NumElements(output)); + + return kTfLiteOk; +} + +template +void TransposeRowsColumnsImpl(const TfLiteEvalTensor& tensor_in, + TfLiteEvalTensor* tensor_out) { + const T* input = tflite::micro::GetTensorData(&tensor_in); + T* output = tflite::micro::GetTensorData(tensor_out); + RuntimeShape transposed_shape(tflite::micro::GetTensorShape(&tensor_in)); + RuntimeShape shape(transposed_shape); + TransposeParams params; + const int rank = shape.DimensionsCount(); + params.perm_count = rank; + for (int i = 0; i < rank - 2; ++i) { + params.perm[i] = i; + } + // Transpose the last two dimensions. + params.perm[rank - 2] = rank - 1; + params.perm[rank - 1] = rank - 2; + transposed_shape.SetDim(rank - 1, shape.Dims(rank - 2)); + transposed_shape.SetDim(rank - 2, shape.Dims(rank - 1)); + reference_ops::Transpose(params, shape, input, transposed_shape, output); +} + +TfLiteStatus TransposeRowsColumns(const TfLiteEvalTensor& tensor_in, + TfLiteEvalTensor* tensor_out) { + if (tensor_in.type == kTfLiteFloat32) { + TransposeRowsColumnsImpl(tensor_in, tensor_out); + return kTfLiteOk; + } else if (tensor_in.type == kTfLiteInt8) { + TransposeRowsColumnsImpl(tensor_in, tensor_out); + return kTfLiteOk; + } else if (tensor_in.type == kTfLiteInt16) { + TransposeRowsColumnsImpl(tensor_in, tensor_out); + return kTfLiteOk; + } else { + MicroPrintf( + "BATCH_MATMUL can only transpose tensors with FLOAT32, INT8, INT16 " + "type."); + } + return kTfLiteError; +} + +RuntimeShape SwapRowColumnDims(const RuntimeShape& shape) { + RuntimeShape swapped_shape(shape); + const int32_t dims = shape.DimensionsCount(); + swapped_shape.SetDim(dims - 2, shape.Dims(dims - 1)); + swapped_shape.SetDim(dims - 1, shape.Dims(dims - 2)); + return swapped_shape; +} + +} // namespace tflite diff --git a/tensorflow/lite/micro/micro_mutable_op_resolver.h b/tensorflow/lite/micro/micro_mutable_op_resolver.h index f5f6e38e003..52ec7217c17 100644 --- a/tensorflow/lite/micro/micro_mutable_op_resolver.h +++ b/tensorflow/lite/micro/micro_mutable_op_resolver.h @@ -1,4 +1,4 @@ -/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ limitations under the License. #include "tensorflow/lite/kernels/op_macros.h" #include "tensorflow/lite/micro/compatibility.h" #include "tensorflow/lite/micro/kernels/add.h" +#include "tensorflow/lite/micro/kernels/batch_matmul.h" #include "tensorflow/lite/micro/kernels/conv.h" #include "tensorflow/lite/micro/kernels/depthwise_conv.h" #include "tensorflow/lite/micro/kernels/ethosu.h" @@ -33,7 +34,9 @@ limitations under the License. #include "tensorflow/lite/micro/kernels/pooling.h" #include "tensorflow/lite/micro/kernels/reduce.h" #include "tensorflow/lite/micro/kernels/softmax.h" +#include "tensorflow/lite/micro/kernels/svdf.h" #include "tensorflow/lite/micro/kernels/transpose_conv.h" +#include "tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h" #include "tensorflow/lite/micro/micro_log.h" #include "tensorflow/lite/micro/micro_op_resolver.h" #include "tensorflow/lite/schema/schema_generated.h" @@ -145,9 +148,10 @@ class MicroMutableOpResolver : public MicroOpResolver { return AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, registration, ParsePool); } - TfLiteStatus AddBatchMatMul() { - return AddBuiltin(BuiltinOperator_BATCH_MATMUL, - tflite::Register_BATCH_MATMUL(), ParseBatchMatMul); + TfLiteStatus AddBatchMatMul( + const TFLMRegistration& registration = Register_BATCH_MATMUL()) { + return AddBuiltin(BuiltinOperator_BATCH_MATMUL, registration, + ParseBatchMatMul); } TfLiteStatus AddBatchToSpaceNd() { diff --git a/tensorflow/lite/micro/tools/make/Makefile b/tensorflow/lite/micro/tools/make/Makefile index d9f150d3f96..36a8b14383e 100644 --- a/tensorflow/lite/micro/tools/make/Makefile +++ b/tensorflow/lite/micro/tools/make/Makefile @@ -350,6 +350,7 @@ $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/add_n.cc \ $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/arg_min_max.cc \ $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/assign_variable.cc \ $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/batch_matmul.cc \ +$(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/batch_matmul_common.cc \ $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/batch_to_space_nd.cc \ $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/broadcast_args.cc \ $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/broadcast_to.cc \