From a8e78fd3802eb2ec8fc1f3a7b3abaf5d6c5214bb Mon Sep 17 00:00:00 2001 From: Nishak Date: Fri, 6 Dec 2024 07:23:42 -0800 Subject: [PATCH 1/2] adding bitwise, fmod, logical compare operators --- backends/cadence/aot/functions_hifi.yaml | 78 +- backends/cadence/hifi/kernels/CMakeLists.txt | 4 + backends/cadence/hifi/kernels/kernels.h | 36 + .../cadence/hifi/operators/CMakeLists.txt | 14 +- .../cadence/hifi/operators/op_bitwise_and.cpp | 186 ++ .../cadence/hifi/operators/op_bitwise_or.cpp | 186 ++ .../cadence/hifi/operators/op_bitwise_xor.cpp | 186 ++ backends/cadence/hifi/operators/op_bmm.cpp | 167 ++ backends/cadence/hifi/operators/op_eq.cpp | 188 ++ backends/cadence/hifi/operators/op_fmod.cpp | 290 +++ backends/cadence/hifi/operators/op_ge.cpp | 189 ++ backends/cadence/hifi/operators/op_gt.cpp | 189 ++ backends/cadence/hifi/operators/op_le.cpp | 188 ++ backends/cadence/hifi/operators/op_lt.cpp | 187 ++ backends/cadence/hifi/operators/op_mm.cpp | 149 ++ backends/cadence/hifi/operators/op_ne.cpp | 187 ++ .../nnlib/xa_nn_elm_fmod_broadcast_f32.c | 525 +++++ .../nnlib/xa_nn_elm_logicalxor_bool_bool.c | 52 + .../nnlib/xa_nn_greater_lesser_equal_f32.c | 2029 +++++++++++++++++ 19 files changed, 5024 insertions(+), 6 deletions(-) create mode 100644 backends/cadence/hifi/operators/op_bitwise_and.cpp create mode 100644 backends/cadence/hifi/operators/op_bitwise_or.cpp create mode 100644 backends/cadence/hifi/operators/op_bitwise_xor.cpp create mode 100644 backends/cadence/hifi/operators/op_bmm.cpp create mode 100644 backends/cadence/hifi/operators/op_eq.cpp create mode 100644 backends/cadence/hifi/operators/op_fmod.cpp create mode 100644 backends/cadence/hifi/operators/op_ge.cpp create mode 100644 backends/cadence/hifi/operators/op_gt.cpp create mode 100644 backends/cadence/hifi/operators/op_le.cpp create mode 100644 backends/cadence/hifi/operators/op_lt.cpp create mode 100644 backends/cadence/hifi/operators/op_mm.cpp create mode 100644 backends/cadence/hifi/operators/op_ne.cpp create mode 100644 backends/cadence/hifi/third-party/nnlib/xa_nn_elm_fmod_broadcast_f32.c create mode 100644 backends/cadence/hifi/third-party/nnlib/xa_nn_elm_logicalxor_bool_bool.c create mode 100644 backends/cadence/hifi/third-party/nnlib/xa_nn_greater_lesser_equal_f32.c diff --git a/backends/cadence/aot/functions_hifi.yaml b/backends/cadence/aot/functions_hifi.yaml index 5c53f7e7ae..516077b5bf 100644 --- a/backends/cadence/aot/functions_hifi.yaml +++ b/backends/cadence/aot/functions_hifi.yaml @@ -32,10 +32,40 @@ - arg_meta: null kernel_name: cadence::impl::HiFi::add_out +- op: bitwise_and.Scalar_out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::bitwise_and_Scalar_out + +- op: bitwise_and.Tensor_out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::bitwise_and_Tensor_out + +- op: bitwise_or.Scalar_out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::bitwise_or_Scalar_out + +- op: bitwise_or.Tensor_out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::bitwise_or_Tensor_out + +- op: bitwise_xor.Scalar_out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::bitwise_xor_Scalar_out + +- op: bitwise_xor.Tensor_out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::bitwise_xor_Tensor_out + - op: bmm.out kernels: - arg_meta: null - kernel_name: torch::executor::bmm_out + kernel_name: cadence::impl::HiFi::bmm_out - op: cat.out kernels: @@ -67,26 +97,56 @@ - arg_meta: null kernel_name: torch::executor::embedding_out +- op: fmod.Tensor_out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::fmod_Tensor_out + +- op: fmod.Scalar_out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::fmod_Scalar_out + - op: full.out kernels: - arg_meta: null kernel_name: cadence::impl::HiFi::full_out - -- op: gt.Scalar_out + +- op: ge.Tensor_out kernels: - arg_meta: null - kernel_name: torch::executor::gt_scalar_out + kernel_name: cadence::impl::HiFi::ge_tensor_out - op: gelu.out kernels: - arg_meta: null kernel_name: torch::executor::gelu_out +- op: gt.Scalar_out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::gt_scalar_out + +- op: eq.Tensor_out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::eq_tensor_out + - op: hardtanh.out kernels: - arg_meta: null kernel_name: torch::executor::hardtanh_out +- op: le.Tensor_out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::le_tensor_out + +- op: lt.Tensor_out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::lt_tensor_out + - op: max_pool2d_with_indices.out kernels: - arg_meta: null @@ -107,11 +167,21 @@ - arg_meta: null kernel_name: cadence::impl::HiFi::minimum_out +- op: mm.out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::mm_out + - op: mul.out kernels: - arg_meta: null kernel_name: cadence::impl::HiFi::mul_out +- op: ne.Tensor_out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::ne_tensor_out + - op: permute_copy.out kernels: - arg_meta: null diff --git a/backends/cadence/hifi/kernels/CMakeLists.txt b/backends/cadence/hifi/kernels/CMakeLists.txt index 9bbd386c75..9a2e6e6af1 100644 --- a/backends/cadence/hifi/kernels/CMakeLists.txt +++ b/backends/cadence/hifi/kernels/CMakeLists.txt @@ -16,6 +16,10 @@ add_library( ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_clamp_f32_broadcast.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_f32_broadcast.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_mode_f32_broadcast.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_fmod_broadcast_f32.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_greater_lesser_equal_f32.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_fmod_broadcast_f32.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_logicalxor_bool_bool.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_minimum_maximum_f32.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_mul_f32_broadcast.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_pow_f32.c diff --git a/backends/cadence/hifi/kernels/kernels.h b/backends/cadence/hifi/kernels/kernels.h index c5795a617a..47e768a29e 100644 --- a/backends/cadence/hifi/kernels/kernels.h +++ b/backends/cadence/hifi/kernels/kernels.h @@ -89,6 +89,42 @@ extern "C" WORD32 xa_nn_elm_div_mode_broadcast_4D_f32xf32_f32( const WORD32* const p_inp2_shape, WORD32 mode); +extern "C" WORD32 xa_nn_elm_greater_lesser_equal_f32xf32_f32( + WORD8* __restrict__ p_out, + const FLOAT32* __restrict__ p_inp1, + const FLOAT32* __restrict__ p_inp2, + WORD32 num_elm, + WORD32 kernel_type); + +extern "C" WORD32 xa_nn_elm_greater_lesser_equal_broadcast_4D_f32xf32_f32( + WORD8* __restrict__ p_out, + const WORD32* const p_out_shape, + const FLOAT32* __restrict__ p_inp1, + const WORD32* const p_inp1_shape, + const FLOAT32* __restrict__ p_inp2, + const WORD32* const p_inp2_shape, + WORD32 kernel_type); + +extern "C" WORD32 xa_nn_elm_fmod_f32xf32_f32( + FLOAT32* __restrict__ p_out, + const FLOAT32* __restrict__ p_inp1, + const FLOAT32* __restrict__ p_inp2, + WORD32 num_elm); + +extern "C" WORD32 xa_nn_elm_fmod_broadcast_4D_f32xf32_f32( + FLOAT32* __restrict__ p_out, + const WORD32* const p_out_shape, + const FLOAT32* __restrict__ p_inp1, + const WORD32* const p_inp1_shape, + const FLOAT32* __restrict__ p_inp2, + const WORD32* const p_inp2_shape); + +extern "C" WORD32 xa_nn_elm_logicalxor_boolxbool_bool( + WORD8* __restrict__ p_out, + const WORD8* __restrict__ p_inp1, + const WORD8* __restrict__ p_inp2, + WORD32 num_elm); + extern "C" WORD32 xa_nn_elm_maximum_f32xf32_f32( FLOAT32* __restrict__ p_out, const FLOAT32* __restrict__ p_inp1, diff --git a/backends/cadence/hifi/operators/CMakeLists.txt b/backends/cadence/hifi/operators/CMakeLists.txt index a5a8263bd7..e52eb602c8 100644 --- a/backends/cadence/hifi/operators/CMakeLists.txt +++ b/backends/cadence/hifi/operators/CMakeLists.txt @@ -22,14 +22,26 @@ endif() set(_aten_ops__srcs "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_add.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_atan2.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_bitwise_and.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_bitwise_or.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_bitwise_xor.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_bmm.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_cat.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_clamp.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_div.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_eq.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_fmod.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_ge.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_gt.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_full.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_le.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_lt.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_maximum.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mean.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_minimum.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mm.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mul.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_ne.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_permute_copy.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_pow.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_remainder.cpp" @@ -39,10 +51,8 @@ set(_aten_ops__srcs "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sub.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_tanh.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_where.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_bmm.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_clone.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_embedding.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_gt.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_gelu.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_hardtanh.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_max_pool2d_with_indices.cpp" diff --git a/backends/cadence/hifi/operators/op_bitwise_and.cpp b/backends/cadence/hifi/operators/op_bitwise_and.cpp new file mode 100644 index 0000000000..0357aab675 --- /dev/null +++ b/backends/cadence/hifi/operators/op_bitwise_and.cpp @@ -0,0 +1,186 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +// patternlint-disable-next-line executorch-cpp-nostdinc +#include +#include +#include +#include +#include +#include +#include + +using exec_aten::Scalar; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::runtime::canCast; +using executorch::runtime::can_cast; +using executorch::runtime::CppTypeToScalarType; +using executorch::runtime::KernelRuntimeContext; +using executorch::runtime::promoteTypes; +using executorch::runtime::tensors_have_same_dim_order; +using torch::executor::Error; +using torch::executor::resize_to_broadcast_target_size; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +Tensor& bitwise_and_Tensor_out( + KernelRuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + Tensor& out) { + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, b, out), InvalidArgument, out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType common_type = promoteTypes(a_type, b_type); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + + static constexpr const char op_name[] = "bitwise_and.Tensor_out"; + constexpr int kNnlibMaxDim = 16; + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); + bool optimized = true; + + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool broadcast = (a_is_broadcasted && b_is_broadcasted); + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + if (out_type != ScalarType::Bool) + optimized = false; + + if (max_dim > kNnlibMaxDim) + optimized = false; + + WORD32 num_elm = out.numel(); + + if (optimized) { + if (broadcast) { + WORD8* __restrict__ ptr1 = (WORD8* __restrict__)kernels::allocate_temp_memory(ctx, num_elm); + WORD8* __restrict__ ptr2 = (WORD8* __restrict__)kernels::allocate_temp_memory(ctx, num_elm); + + const WORD8* __restrict__ pin1 = + (const WORD8* __restrict__)a.const_data_ptr(); + const WORD8* __restrict__ pin2 = + (const WORD8* __restrict__)b.const_data_ptr(); + + WORD8* __restrict__ p_out = + (WORD8* __restrict__)out.mutable_data_ptr(); + + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_inp2_shape[kNnlibMaxDim]; + WORD32 p_inp1_shape[kNnlibMaxDim]; + + for (int i = 0; i < out_dim; i++) + p_out_shape[i] = out.size(i); + for (int i = 0; i < a_dim; i++) + p_inp1_shape[i] = a.size(i); + for (int i = 0; i < b_dim; i++) + p_inp2_shape[i] = b.size(i); + + xa_nn_broadcast_8_8(ptr1, p_out_shape, pin1, p_inp1_shape, out_dim); + + xa_nn_broadcast_8_8(ptr2, p_out_shape, pin2, p_inp2_shape, out_dim); + + const WORD8* __restrict__ p_inp1 = (const WORD8* __restrict__)ptr1; + const WORD8* __restrict__ p_inp2 = (const WORD8* __restrict__)ptr2; + + xa_nn_elm_logicaland_boolxbool_bool(p_out, p_inp1, p_inp2, num_elm); + } else if (a_is_broadcasted && !b_is_broadcasted) { + WORD8* __restrict__ ptr1 = (WORD8* __restrict__)kernels::allocate_temp_memory(ctx, num_elm); + + const WORD8* __restrict__ pin1 = + (const WORD8* __restrict__)a.const_data_ptr(); + const WORD8* __restrict__ p_inp2 = + (const WORD8* __restrict__)b.const_data_ptr(); + + WORD8* __restrict__ p_out = + (WORD8* __restrict__)out.mutable_data_ptr(); + + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_inp1_shape[kNnlibMaxDim]; + + for (int i = 0; i < out_dim; i++) + p_out_shape[i] = out.size(i); + for (int i = 0; i < a_dim; i++) + p_inp1_shape[i] = a.size(i); + + xa_nn_broadcast_8_8(ptr1, p_out_shape, pin1, p_inp1_shape, out_dim); + + const WORD8* __restrict__ p_inp1 = (const WORD8* __restrict__)ptr1; + + xa_nn_elm_logicaland_boolxbool_bool(p_out, p_inp1, p_inp2, num_elm); + } else if (!a_is_broadcasted && b_is_broadcasted) { + WORD8* __restrict__ ptr1 = (WORD8* __restrict__)kernels::allocate_temp_memory(ctx, num_elm); + + const WORD8* __restrict__ p_inp1 = + (const WORD8* __restrict__)a.const_data_ptr(); + const WORD8* __restrict__ pinp2 = + (const WORD8* __restrict__)b.const_data_ptr(); + + WORD8* __restrict__ p_out = + (WORD8* __restrict__)out.mutable_data_ptr(); + + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_inp2_shape[kNnlibMaxDim]; + + for (int i = 0; i < out_dim; i++) + p_out_shape[i] = out.size(i); + for (int i = 0; i < b_dim; i++) + p_inp2_shape[i] = b.size(i); + + xa_nn_broadcast_8_8(ptr1, p_out_shape, pinp2, p_inp2_shape, out_dim); + + const WORD8* __restrict__ p_inp2 = (const WORD8* __restrict__)ptr1; + + xa_nn_elm_logicaland_boolxbool_bool(p_out, p_inp1, p_inp2, num_elm); + } else { + const WORD8* __restrict__ p_inp1 = + (const WORD8* __restrict__)a.const_data_ptr(); + const WORD8* __restrict__ p_inp2 = + (const WORD8* __restrict__)b.const_data_ptr(); + + WORD8* __restrict__ p_out = + (WORD8* __restrict__)out.mutable_data_ptr(); + + xa_nn_elm_logicaland_boolxbool_bool(p_out, p_inp1, p_inp2, num_elm); + } + return out; + } + + return torch::executor::native::internal::bitwise_tensor_out(ctx, a, b, out); +} + +Tensor& bitwise_and_Scalar_out( + KernelRuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + Tensor& out) { + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "bitwise_and.Scalar_out"; + return torch::executor::native::internal::bitwise_scalar_out( + ctx, a, b, out); +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_bitwise_or.cpp b/backends/cadence/hifi/operators/op_bitwise_or.cpp new file mode 100644 index 0000000000..93fff0406b --- /dev/null +++ b/backends/cadence/hifi/operators/op_bitwise_or.cpp @@ -0,0 +1,186 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +// patternlint-disable-next-line executorch-cpp-nostdinc +#include +#include +#include +#include +#include +#include +#include + +using exec_aten::Scalar; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::runtime::canCast; +using executorch::runtime::can_cast; +using executorch::runtime::CppTypeToScalarType; +using executorch::runtime::KernelRuntimeContext; +using executorch::runtime::promoteTypes; +using executorch::runtime::tensors_have_same_dim_order; +using torch::executor::Error; +using torch::executor::resize_to_broadcast_target_size; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +Tensor& bitwise_or_Tensor_out( + KernelRuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + Tensor& out) { + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, b, out), InvalidArgument, out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType common_type = promoteTypes(a_type, b_type); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + + static constexpr const char op_name[] = "bitwise_or.Tensor_out"; + constexpr int kNnlibMaxDim = 16; + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); + bool optimized = true; + + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool broadcast = (a_is_broadcasted && b_is_broadcasted); + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + if (out_type != ScalarType::Bool) + optimized = false; + + if (max_dim > kNnlibMaxDim) + optimized = false; + + WORD32 num_elm = out.numel(); + + if (optimized) { + if (broadcast) { + WORD8* __restrict__ ptr1 = (WORD8* __restrict__)kernels::allocate_temp_memory(ctx, num_elm); + WORD8* __restrict__ ptr2 = (WORD8* __restrict__)kernels::allocate_temp_memory(ctx, num_elm); + + const WORD8* __restrict__ pin1 = + (const WORD8* __restrict__)a.const_data_ptr(); + const WORD8* __restrict__ pin2 = + (const WORD8* __restrict__)b.const_data_ptr(); + + WORD8* __restrict__ p_out = + (WORD8* __restrict__)out.mutable_data_ptr(); + + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_inp2_shape[kNnlibMaxDim]; + WORD32 p_inp1_shape[kNnlibMaxDim]; + + for (int i = 0; i < out_dim; i++) + p_out_shape[i] = out.size(i); + for (int i = 0; i < a_dim; i++) + p_inp1_shape[i] = a.size(i); + for (int i = 0; i < b_dim; i++) + p_inp2_shape[i] = b.size(i); + + xa_nn_broadcast_8_8(ptr1, p_out_shape, pin1, p_inp1_shape, out_dim); + + xa_nn_broadcast_8_8(ptr2, p_out_shape, pin2, p_inp2_shape, out_dim); + + const WORD8* __restrict__ p_inp1 = (const WORD8* __restrict__)ptr1; + const WORD8* __restrict__ p_inp2 = (const WORD8* __restrict__)ptr2; + + xa_nn_elm_logicalor_boolxbool_bool(p_out, p_inp1, p_inp2, num_elm); + } else if (a_is_broadcasted && !b_is_broadcasted) { + WORD8* __restrict__ ptr1 = (WORD8* __restrict__)kernels::allocate_temp_memory(ctx, num_elm); + + const WORD8* __restrict__ pin1 = + (const WORD8* __restrict__)a.const_data_ptr(); + const WORD8* __restrict__ p_inp2 = + (const WORD8* __restrict__)b.const_data_ptr(); + + WORD8* __restrict__ p_out = + (WORD8* __restrict__)out.mutable_data_ptr(); + + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_inp1_shape[kNnlibMaxDim]; + + for (int i = 0; i < out_dim; i++) + p_out_shape[i] = out.size(i); + for (int i = 0; i < a_dim; i++) + p_inp1_shape[i] = a.size(i); + + xa_nn_broadcast_8_8(ptr1, p_out_shape, pin1, p_inp1_shape, out_dim); + + const WORD8* __restrict__ p_inp1 = (const WORD8* __restrict__)ptr1; + + xa_nn_elm_logicalor_boolxbool_bool(p_out, p_inp1, p_inp2, num_elm); + } else if (!a_is_broadcasted && b_is_broadcasted) { + WORD8* __restrict__ ptr1 = (WORD8* __restrict__)kernels::allocate_temp_memory(ctx, num_elm); + + const WORD8* __restrict__ p_inp1 = + (const WORD8* __restrict__)a.const_data_ptr(); + const WORD8* __restrict__ pinp2 = + (const WORD8* __restrict__)b.const_data_ptr(); + + WORD8* __restrict__ p_out = + (WORD8* __restrict__)out.mutable_data_ptr(); + + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_inp2_shape[kNnlibMaxDim]; + + for (int i = 0; i < out_dim; i++) + p_out_shape[i] = out.size(i); + for (int i = 0; i < b_dim; i++) + p_inp2_shape[i] = b.size(i); + + xa_nn_broadcast_8_8(ptr1, p_out_shape, pinp2, p_inp2_shape, out_dim); + + const WORD8* __restrict__ p_inp2 = (const WORD8* __restrict__)ptr1; + + xa_nn_elm_logicalor_boolxbool_bool(p_out, p_inp1, p_inp2, num_elm); + } else { + const WORD8* __restrict__ p_inp1 = + (const WORD8* __restrict__)a.const_data_ptr(); + const WORD8* __restrict__ p_inp2 = + (const WORD8* __restrict__)b.const_data_ptr(); + + WORD8* __restrict__ p_out = + (WORD8* __restrict__)out.mutable_data_ptr(); + + xa_nn_elm_logicalor_boolxbool_bool(p_out, p_inp1, p_inp2, num_elm); + } + return out; + } + + return torch::executor::native::internal::bitwise_tensor_out(ctx, a, b, out); +} + +Tensor& bitwise_or_Scalar_out( + KernelRuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + Tensor& out) { + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "bitwise_or.Scalar_out"; + return torch::executor::native::internal::bitwise_scalar_out( + ctx, a, b, out); +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_bitwise_xor.cpp b/backends/cadence/hifi/operators/op_bitwise_xor.cpp new file mode 100644 index 0000000000..2b1a49b140 --- /dev/null +++ b/backends/cadence/hifi/operators/op_bitwise_xor.cpp @@ -0,0 +1,186 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +// patternlint-disable-next-line executorch-cpp-nostdinc +#include +#include +#include +#include +#include +#include +#include + +using exec_aten::Scalar; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::runtime::canCast; +using executorch::runtime::can_cast; +using executorch::runtime::CppTypeToScalarType; +using executorch::runtime::KernelRuntimeContext; +using executorch::runtime::promoteTypes; +using executorch::runtime::tensors_have_same_dim_order; +using torch::executor::Error; +using torch::executor::resize_to_broadcast_target_size; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +Tensor& bitwise_xor_Tensor_out( + KernelRuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + Tensor& out) { + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, b, out), InvalidArgument, out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType common_type = promoteTypes(a_type, b_type); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + + static constexpr const char op_name[] = "bitwise_xor.Tensor_out"; + constexpr int kNnlibMaxDim = 16; + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); + bool optimized = true; + + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool broadcast = (a_is_broadcasted && b_is_broadcasted); + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + if (out_type != ScalarType::Bool) + optimized = false; + + if (max_dim > kNnlibMaxDim) + optimized = false; + + WORD32 num_elm = out.numel(); + + if (optimized) { + if (broadcast) { + WORD8* __restrict__ ptr1 = (WORD8* __restrict__)kernels::allocate_temp_memory(ctx, num_elm); + WORD8* __restrict__ ptr2 = (WORD8* __restrict__)kernels::allocate_temp_memory(ctx, num_elm); + + const WORD8* __restrict__ pin1 = + (const WORD8* __restrict__)a.const_data_ptr(); + const WORD8* __restrict__ pin2 = + (const WORD8* __restrict__)b.const_data_ptr(); + + WORD8* __restrict__ p_out = + (WORD8* __restrict__)out.mutable_data_ptr(); + + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_inp2_shape[kNnlibMaxDim]; + WORD32 p_inp1_shape[kNnlibMaxDim]; + + for (int i = 0; i < out_dim; i++) + p_out_shape[i] = out.size(i); + for (int i = 0; i < a_dim; i++) + p_inp1_shape[i] = a.size(i); + for (int i = 0; i < b_dim; i++) + p_inp2_shape[i] = b.size(i); + + xa_nn_broadcast_8_8(ptr1, p_out_shape, pin1, p_inp1_shape, out_dim); + + xa_nn_broadcast_8_8(ptr2, p_out_shape, pin2, p_inp2_shape, out_dim); + + const WORD8* __restrict__ p_inp1 = (const WORD8* __restrict__)ptr1; + const WORD8* __restrict__ p_inp2 = (const WORD8* __restrict__)ptr2; + + xa_nn_elm_logicalxor_boolxbool_bool(p_out, p_inp1, p_inp2, num_elm); + } else if (a_is_broadcasted && !b_is_broadcasted) { + WORD8* __restrict__ ptr1 = (WORD8* __restrict__)kernels::allocate_temp_memory(ctx, num_elm); + + const WORD8* __restrict__ pin1 = + (const WORD8* __restrict__)a.const_data_ptr(); + const WORD8* __restrict__ p_inp2 = + (const WORD8* __restrict__)b.const_data_ptr(); + + WORD8* __restrict__ p_out = + (WORD8* __restrict__)out.mutable_data_ptr(); + + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_inp1_shape[kNnlibMaxDim]; + + for (int i = 0; i < out_dim; i++) + p_out_shape[i] = out.size(i); + for (int i = 0; i < a_dim; i++) + p_inp1_shape[i] = a.size(i); + + xa_nn_broadcast_8_8(ptr1, p_out_shape, pin1, p_inp1_shape, out_dim); + + const WORD8* __restrict__ p_inp1 = (const WORD8* __restrict__)ptr1; + + xa_nn_elm_logicalxor_boolxbool_bool(p_out, p_inp1, p_inp2, num_elm); + } else if (!a_is_broadcasted && b_is_broadcasted) { + WORD8* __restrict__ ptr1 = (WORD8* __restrict__)kernels::allocate_temp_memory(ctx, num_elm); + + const WORD8* __restrict__ p_inp1 = + (const WORD8* __restrict__)a.const_data_ptr(); + const WORD8* __restrict__ pinp2 = + (const WORD8* __restrict__)b.const_data_ptr(); + + WORD8* __restrict__ p_out = + (WORD8* __restrict__)out.mutable_data_ptr(); + + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_inp2_shape[kNnlibMaxDim]; + + for (int i = 0; i < out_dim; i++) + p_out_shape[i] = out.size(i); + for (int i = 0; i < b_dim; i++) + p_inp2_shape[i] = b.size(i); + + xa_nn_broadcast_8_8(ptr1, p_out_shape, pinp2, p_inp2_shape, out_dim); + + const WORD8* __restrict__ p_inp2 = (const WORD8* __restrict__)ptr1; + + xa_nn_elm_logicalxor_boolxbool_bool(p_out, p_inp1, p_inp2, num_elm); + } else { + const WORD8* __restrict__ p_inp1 = + (const WORD8* __restrict__)a.const_data_ptr(); + const WORD8* __restrict__ p_inp2 = + (const WORD8* __restrict__)b.const_data_ptr(); + + WORD8* __restrict__ p_out = + (WORD8* __restrict__)out.mutable_data_ptr(); + + xa_nn_elm_logicalxor_boolxbool_bool(p_out, p_inp1, p_inp2, num_elm); + } + return out; + } + + return torch::executor::native::internal::bitwise_tensor_out(ctx, a, b, out); +} + +Tensor& bitwise_xor_Scalar_out( + KernelRuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + Tensor& out) { + // @lint-ignore CLANGTIDY facebook-hte-CArray + static constexpr const char op_name[] = "bitwise_xor.Scalar_out"; + return torch::executor::native::internal::bitwise_scalar_out( + ctx, a, b, out); +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_bmm.cpp b/backends/cadence/hifi/operators/op_bmm.cpp new file mode 100644 index 0000000000..7d4f3e3450 --- /dev/null +++ b/backends/cadence/hifi/operators/op_bmm.cpp @@ -0,0 +1,167 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include + +using Tensor = exec_aten::Tensor; +using exec_aten::ScalarType; +using executorch::runtime::KernelRuntimeContext; +using executorch::runtime::kTensorDimensionLimit; +using executorch::runtime::resize_tensor; +using executorch::runtime::tensors_have_same_dim_order; +using executorch::runtime::tensor_is_default_dim_order; +using torch::executor::check_bmm_args; +using torch::executor::Error; +using torch::executor::get_bmm_out_target_size; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +Tensor& bmm_out( + KernelRuntimeContext& ctx, + const Tensor& in, + const Tensor& mat2, + Tensor& out) { + ET_KERNEL_CHECK(ctx, check_bmm_args(in, mat2, out), InvalidArgument, out); + + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(in, mat2, out), InvalidArgument, out); + + ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(in), InvalidArgument, out); + + size_t output_ndim = 0; + exec_aten::SizesType output_sizes[kTensorDimensionLimit]; + get_bmm_out_target_size(in, mat2, output_sizes, &output_ndim); + ET_KERNEL_CHECK( + ctx, + resize_tensor(out, {output_sizes, output_ndim}) == Error::Ok, + InvalidArgument, + out); + + constexpr auto name = "bmm.out"; + constexpr int kNnlibMaxDim = 3; + + bool optimized = true; + + if (out.scalar_type() != ScalarType::Float) + optimized = false; + + if (in.dim() > kNnlibMaxDim) + optimized = false; + + if (optimized) { + const float* in_data = in.const_data_ptr(); + const float* mat2_data = mat2.const_data_ptr(); + float* out_data = out.mutable_data_ptr(); + + int64_t batch_size = in.size(0); + int64_t m = in.size(1); + int64_t n = in.size(2); + int64_t p = mat2.size(2); + + WORD32 rows = m; + WORD32 cols1 = n; + WORD32 row_stride1 = n; + WORD32 vec_count = p; + WORD32 vec_offset = n; + WORD32 out_offset = 1; + WORD32 out_stride = p; + + WORD32* __restrict__ tmp = + (WORD32* __restrict__)kernels::allocate_temp_memory( + ctx, (batch_size * m * p) * sizeof(float)); + tmp[batch_size * m * p] = {0}; + + WORD32* __restrict__ p_o = + (WORD32* __restrict__)kernels::allocate_temp_memory( + ctx, (batch_size * m * p) * sizeof(WORD32)); + + for (int i = 0; i < batch_size; ++i) { + const FLOAT32* __restrict__ p_mat1 = in_data + i * m * n; + const FLOAT32* __restrict__ p_vec1 = mat2_data + i * n * p; + FLOAT32* __restrict__ p_out = out_data + i * m * p; + const FLOAT32* __restrict__ p_bias = (const FLOAT32* __restrict__)tmp; + + WORD32* p_inp = (WORD32*)p_vec1; + + WORD32 p_inp_shape[kNnlibMaxDim]; + p_inp_shape[0] = n; + p_inp_shape[1] = p; + p_inp_shape[2] = 1; + + WORD32 p_out_shape[kNnlibMaxDim]; + p_out_shape[0] = p; + p_out_shape[1] = n; + p_out_shape[2] = 1; + + WORD32 p_permute_vec[kNnlibMaxDim] = {1, 0, 2}; + + WORD32 num_out_dims = kNnlibMaxDim; + WORD32 num_inp_dims = kNnlibMaxDim; + + xa_nn_transpose_32_32( + p_o, + p_out_shape, + p_inp, + p_inp_shape, + p_permute_vec, + num_out_dims, + num_inp_dims); + + const FLOAT32* __restrict__ p_vec = (const FLOAT32* __restrict__)p_o; + + xa_nn_matmul_f32xf32_f32( + p_out, + p_mat1, + p_vec, + p_bias, + rows, + cols1, + row_stride1, + vec_count, + vec_offset, + out_offset, + out_stride); + } + + free(tmp); + return out; + } + + ET_SWITCH_REAL_TYPES_AND(Half, in.scalar_type(), ctx, name, CTYPE, [&]() { + const CTYPE* in_data = in.const_data_ptr(); + const CTYPE* mat2_data = mat2.const_data_ptr(); + CTYPE* out_data = out.mutable_data_ptr(); + + int64_t batch_size = in.size(0); + int64_t m = in.size(1); + int64_t n = in.size(2); + int64_t p = mat2.size(2); + + for (int i = 0; i < batch_size; ++i) { + const CTYPE* in_data_offset = in_data + i * m * n; + const CTYPE* mat2_data_offset = mat2_data + i * n * p; + CTYPE* out_data_offset = out_data + i * m * p; + + torch::executor::vec_matmul( + out_data_offset, in_data_offset, mat2_data_offset, m, n, p); + } + }); + + return out; +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence diff --git a/backends/cadence/hifi/operators/op_eq.cpp b/backends/cadence/hifi/operators/op_eq.cpp new file mode 100644 index 0000000000..30bf2f3071 --- /dev/null +++ b/backends/cadence/hifi/operators/op_eq.cpp @@ -0,0 +1,188 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include + +#include + +using exec_aten::Scalar; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::aten::RuntimeContext; +using executorch::runtime::CppTypeToScalarType; +using executorch::runtime::promoteTypes; +using torch::executor::Error; +using torch::executor::resize_to_broadcast_target_size; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +Tensor& eq_tensor_out( + RuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + Tensor& out) { + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType out_type = out.scalar_type(); + + constexpr auto name = "eq.Tensor_out"; + constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ + + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); + bool optimized = true; + /*find broadcast*/ + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool broadcast = (a_is_broadcasted || b_is_broadcasted); + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + if (out_type != ScalarType::Float) + optimized = false; + + if ((a_dim == 0) || (b_dim == 0)) + optimized = false; + + if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) + optimized = false; + + if (optimized) { + int8_t* __restrict__ p_out = + (int8_t* __restrict__)out.mutable_data_ptr(); + const float* __restrict__ p_inp1 = + (const float* __restrict__)a.const_data_ptr(); + const float* __restrict__ p_inp2 = + (const float* __restrict__)b.const_data_ptr(); + + if (broadcast) { + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; + + for (int i = 0; i < kNnlibMaxDim; i++) { + inp1_shape[i] = 1; + inp2_shape[i] = 1; + out_shape[i] = 1; + } + + int off_o = kNnlibMaxDim - out.dim(); + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); + + for (int i = 0; i < out.dim(); i++) + out_shape[i + off_o] = out.size(i); + for (int i = 0; i < a.dim(); i++) + inp1_shape[i + off_a] = a.size(i); + for (int i = 0; i < b.dim(); i++) + inp2_shape[i + off_b] = b.size(i); + + WORD32 ret_val = xa_nn_elm_greater_lesser_equal_broadcast_4D_f32xf32_f32( + p_out, out_shape, p_inp1, inp1_shape, p_inp2, inp2_shape, 4); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } else { + int num_elm = out.numel(); + + WORD32 ret_val = xa_nn_elm_greater_lesser_equal_f32xf32_f32( + p_out, p_inp1, p_inp2, num_elm, 4); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } + + return out; + } + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, name, CTYPE_B, [&]() { + using CTYPE_IN = + typename torch::executor::promote_types::type; + ET_DCHECK( + CppTypeToScalarType::value == promoteTypes(a_type, b_type)); + ET_SWITCH_REAL_TYPES_AND(Bool, out_type, ctx, name, CTYPE_OUT, [&]() { + torch::executor:: + apply_binary_elementwise_fn( + [](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + bool value = a_casted == b_casted; + return static_cast(value); + }, + a, + b, + out); + }); + }); + }); + + return out; +} + +Tensor& eq_scalar_out( + RuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + Tensor& out) { + (void)ctx; + + // Resize for dynamic shape + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, a.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + constexpr auto name = "eq.Scalar_out"; + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = torch::executor::native::utils::get_scalar_dtype(b); + ScalarType out_type = out.scalar_type(); + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, name, CTYPE_B, [&]() { + using CTYPE_IN = + typename torch::executor::promote_types::type; + ET_DCHECK( + CppTypeToScalarType::value == promoteTypes(a_type, b_type)); + ET_SWITCH_REAL_TYPES_AND(Bool, out_type, ctx, name, CTYPE_OUT, [&]() { + CTYPE_B val_b = 0; + torch::executor::native::utils::extract_scalar(b, &val_b); + torch::executor::apply_unary_map_fn( + [val_b](const CTYPE_A val_a) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + bool value = a_casted == b_casted; + return static_cast(value); + }, + a.const_data_ptr(), + out.mutable_data_ptr(), + out.numel()); + }); + }); + }); + + return out; +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_fmod.cpp b/backends/cadence/hifi/operators/op_fmod.cpp new file mode 100644 index 0000000000..302286651f --- /dev/null +++ b/backends/cadence/hifi/operators/op_fmod.cpp @@ -0,0 +1,290 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include + +#include +#include +#include +#include + +#include "kernels.h" + +using exec_aten::Scalar; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::aten::RuntimeContext; +using executorch::runtime::CppTypeToScalarType; +using executorch::runtime::can_cast; +using executorch::runtime::canCast; +using executorch::runtime::is_integral_type; +using executorch::runtime::isIntegralType; +using executorch::runtime::promoteTypes; +using exec_aten::Tensor; +using torch::executor::apply_unary_map_fn; +using torch::executor::apply_binary_elementwise_fn; +using torch::executor::Error; +using torch::executor::native::utils::extract_scalar; +using torch::executor::native::utils::get_scalar_dtype; +using torch::executor::native::utils::promote_type_with_scalar; +using torch::executor::resize_to_broadcast_target_size; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +namespace { +template < + bool can_cast, + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct FmodInner; + +template < + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct FmodInner { + static void + run(const Tensor& a, const Tensor& b, Tensor& out, bool& div_by_zero_error) { + apply_binary_elementwise_fn( + // NOLINTNEXTLINE(facebook-hte-ConstantArgumentPassByValue) + [&div_by_zero_error](const CTYPE_A val_a, const CTYPE_B val_b) { + if (is_integral_type::value) { + if (val_b == 0) { + div_by_zero_error = true; + return static_cast(0); + } + } + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + CTYPE_IN value = std::fmod(a_casted, b_casted); + + return static_cast(value); + }, + a, + b, + out); + } +}; + +struct ReportCanCastBug { + static void run(const Tensor&, const Tensor&, Tensor&, bool&) { + ET_DCHECK_MSG(false, "BUG: canCast should have been checked above"); + } +}; + +template < + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct FmodInner + : public ReportCanCastBug {}; + +} // namespace + +Tensor& fmod_Tensor_out( + RuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + Tensor& out) { + + // Determine output size and resize for dynamic shapes + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + static constexpr const char op_name[] = "fmod.Tensor_out"; + constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ + + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); + bool optimized = true; + /*find broadcast*/ + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool broadcast = (a_is_broadcasted || b_is_broadcasted); + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + if ((a.scalar_type() == ScalarType::Float)||(b.scalar_type() == ScalarType::Float)) + optimized = false; + + if ((a_dim == 0) || (b_dim == 0)) + optimized = false; + + if ((broadcast == true) && (max_dim > kNnlibMaxDim)) + optimized = false; + + if(optimized){ + FLOAT32 * __restrict__ p_out = (FLOAT32 * __restrict__ )out.mutable_data_ptr(); + const FLOAT32 * __restrict__ p_inp1 = (const FLOAT32 * __restrict__)a.const_data_ptr(); + const FLOAT32 * __restrict__ p_inp2 = (const FLOAT32 * __restrict__)b.const_data_ptr(); + + if(broadcast){ + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_inp1_shape[kNnlibMaxDim]; + WORD32 p_inp2_shape[kNnlibMaxDim]; + + for(int i = 0; i < kNnlibMaxDim; i++){ + p_inp1_shape[i] = 1; + p_inp2_shape[i] = 1; + p_out_shape[i] = 1; + } + + int off_o = kNnlibMaxDim - out.dim(); + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); + + for(int i = 0; i < out.dim(); i++) + p_out_shape[i+off_o] = out.size(i); + for(int i = 0; i < a.dim(); i++) + p_inp1_shape[i+off_a] = a.size(i); + for(int i = 0; i < b.dim(); i++) + p_inp2_shape[i+off_b] = b.size(i); + + WORD32 val = xa_nn_elm_fmod_broadcast_4D_f32xf32_f32(p_out, + p_out_shape, + p_inp1, + p_inp1_shape, + p_inp2, + p_inp2_shape); + } + else + { + WORD32 num_elm = out.numel(); + + WORD32 val = xa_nn_elm_fmod_f32xf32_f32(p_out, + p_inp1, + p_inp2, + num_elm); + } + + return out; + } + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType common_type = promoteTypes(a_type, b_type); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + + auto div_by_zero_error = false; + + ET_SWITCH_REAL_TYPES_AND( + Bool, a_type, ctx, op_name, CTYPE_A, [&]() { + ET_SWITCH_REAL_TYPES_AND( + Bool, b_type, ctx, op_name, CTYPE_B, [&]() { + using CTYPE_IN = typename torch::executor:: + promote_types::type; + ET_DCHECK(CppTypeToScalarType::value == common_type); + ET_SWITCH_REAL_TYPES( + out_type, ctx, op_name, CTYPE_OUT, [&]() { + FmodInner< + !std::is_same::value && + can_cast::value, + CTYPE_A, + CTYPE_B, + CTYPE_IN, + CTYPE_OUT>::run(a, b, out, div_by_zero_error); + }); + }); + }); + + ET_KERNEL_CHECK_MSG( + ctx, + !div_by_zero_error, + InvalidArgument, + out, + "Fmod operation encountered integer division by zero"); + + return out; +} + +Tensor& fmod_Scalar_out( + RuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + Tensor& out) { + (void)ctx; + + // Resize for dynamic shape + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, a.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = get_scalar_dtype(b); + ScalarType common_type = promote_type_with_scalar(a_type, b); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + + // Check for integer division by zero + if (isIntegralType(common_type, /*includeBool=*/true)) { + auto is_zero = false; + ET_SWITCH_REAL_TYPES_AND( + Bool, b_type, ctx, "fmod.Scalar_out", CTYPE_B, [&]() { + CTYPE_B val_b = 0; + extract_scalar(b, &val_b); + is_zero = (val_b == 0); + }); + + ET_KERNEL_CHECK_MSG( + ctx, + !is_zero, + InvalidArgument, + out, + "Fmod operation encountered integer division by zero"); + } + + ET_SWITCH_REAL_TYPES_AND( + Bool, a_type, ctx, "fmod.Scalar_out", CTYPE_A, [&]() { + ET_SWITCH_SCALAR_OBJ_TYPES( + b_type, ctx, "fmod.Scalar_out", CTYPE_B, [&]() { + CTYPE_B val_b = 0; + extract_scalar(b, &val_b); + ET_SWITCH_REAL_TYPES( + common_type, ctx, "fmod.Scalar_out", CTYPE_IN, [&]() { + ET_SWITCH_REAL_TYPES( + out_type, ctx, "fmod.Scalar_out", CTYPE_OUT, [&]() { + apply_unary_map_fn( + [val_b](const CTYPE_A val_a) { + CTYPE_IN a_casted = + static_cast(val_a); + CTYPE_IN b_casted = + static_cast(val_b); + CTYPE_IN value = std::fmod(a_casted, b_casted); + + return static_cast(value); + }, + a.const_data_ptr(), + out.mutable_data_ptr(), + out.numel()); + }); + }); + }); + }); + + return out; +} + +} // namespace native +} // namespace executor +} // namespace torch +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_ge.cpp b/backends/cadence/hifi/operators/op_ge.cpp new file mode 100644 index 0000000000..126c3269cf --- /dev/null +++ b/backends/cadence/hifi/operators/op_ge.cpp @@ -0,0 +1,189 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include + +#include + +using exec_aten::Scalar; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::aten::RuntimeContext; +using executorch::runtime::CppTypeToScalarType; +using executorch::runtime::promoteTypes; +using torch::executor::Error; +using torch::executor::resize_to_broadcast_target_size; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +Tensor& ge_tensor_out( + RuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + Tensor& out) { + // Determine output size and resize for dynamic shapes + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType out_type = out.scalar_type(); + + constexpr auto name = "ge.Tensor_out"; + constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ + + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); + bool optimized = true; + /*find broadcast*/ + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool broadcast = (a_is_broadcasted || b_is_broadcasted); + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + if (out_type != ScalarType::Float) + optimized = false; + + if ((a_dim == 0) || (b_dim == 0)) + optimized = false; + + if (broadcast && (max_dim > kNnlibMaxDim)) + optimized = false; + + if (optimized) { + int8_t* __restrict__ p_out = + (int8_t* __restrict__)out.mutable_data_ptr(); + const float* __restrict__ p_inp1 = + (const float* __restrict__)a.const_data_ptr(); + const float* __restrict__ p_inp2 = + (const float* __restrict__)b.const_data_ptr(); + + if (broadcast) { + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; + + for (int i = 0; i < kNnlibMaxDim; i++) { + inp1_shape[i] = 1; + inp2_shape[i] = 1; + out_shape[i] = 1; + } + + int off_o = kNnlibMaxDim - out.dim(); + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); + + for (int i = 0; i < out.dim(); i++) + out_shape[i + off_o] = out.size(i); + for (int i = 0; i < a.dim(); i++) + inp1_shape[i + off_a] = a.size(i); + for (int i = 0; i < b.dim(); i++) + inp2_shape[i + off_b] = b.size(i); + + WORD32 ret_val = xa_nn_elm_greater_lesser_equal_broadcast_4D_f32xf32_f32( + p_out, out_shape, p_inp1, inp1_shape, p_inp2, inp2_shape, 0); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } else { + int num_elm = out.numel(); + + WORD32 ret_val = xa_nn_elm_greater_lesser_equal_f32xf32_f32( + p_out, p_inp1, p_inp2, num_elm, 0); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } + + return out; + } + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, name, CTYPE_B, [&]() { + using CTYPE_IN = + typename torch::executor::promote_types::type; + ET_DCHECK( + CppTypeToScalarType::value == promoteTypes(a_type, b_type)); + ET_SWITCH_REAL_TYPES_AND(Bool, out_type, ctx, name, CTYPE_OUT, [&]() { + torch::executor:: + apply_binary_elementwise_fn( + [](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + bool value = a_casted >= b_casted; + return static_cast(value); + }, + a, + b, + out); + }); + }); + }); + + return out; +} + +Tensor& ge_scalar_out( + RuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + Tensor& out) { + (void)ctx; + + // Resize for dynamic shape + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, a.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + constexpr auto name = "ge.Scalar_out"; + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = torch::executor::native::utils::get_scalar_dtype(b); + ScalarType common_type = + torch::executor::native::utils::promote_type_with_scalar(a_type, b); + ScalarType out_type = out.scalar_type(); + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, name, CTYPE_B, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, common_type, ctx, name, CTYPE_IN, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, out_type, ctx, name, CTYPE_OUT, [&]() { + CTYPE_B val_b = 0; + torch::executor::native::utils::extract_scalar(b, &val_b); + torch::executor::apply_unary_map_fn( + [val_b](const CTYPE_A val_a) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + bool value = a_casted >= b_casted; + return static_cast(value); + }, + a.const_data_ptr(), + out.mutable_data_ptr(), + out.numel()); + }); + }); + }); + }); + + return out; +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_gt.cpp b/backends/cadence/hifi/operators/op_gt.cpp new file mode 100644 index 0000000000..cab3c5eac7 --- /dev/null +++ b/backends/cadence/hifi/operators/op_gt.cpp @@ -0,0 +1,189 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include + +#include + +using exec_aten::Scalar; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::aten::RuntimeContext; +using executorch::runtime::CppTypeToScalarType; +using executorch::runtime::promoteTypes; +using torch::executor::Error; +using torch::executor::resize_to_broadcast_target_size; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +Tensor& gt_tensor_out( + RuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + Tensor& out) { + // Determine output size and resize for dynamic shapes + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType out_type = out.scalar_type(); + + constexpr auto name = "gt.Tensor_out"; + constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ + + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); + bool optimized = 1; + /*find broadcast*/ + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool broadcast = (a_is_broadcasted || b_is_broadcasted); + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + if (out_type != ScalarType::Float) + optimized = 0; + + if ((a_dim == 0) || (b_dim == 0)) + optimized = 0; + + if ( broadcast && (max_dim > kNnlibMaxDim)) + optimized = 0; + + if (optimized) { + int8_t* __restrict__ p_out = + (int8_t* __restrict__)out.mutable_data_ptr(); + const float* __restrict__ p_inp1 = + (const float* __restrict__)a.const_data_ptr(); + const float* __restrict__ p_inp2 = + (const float* __restrict__)b.const_data_ptr(); + + if (broadcast) { + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; + + for (int i = 0; i < kNnlibMaxDim; i++) { + inp1_shape[i] = 1; + inp2_shape[i] = 1; + out_shape[i] = 1; + } + + int off_o = kNnlibMaxDim - out.dim(); + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); + + for (int i = 0; i < out.dim(); i++) + out_shape[i + off_o] = out.size(i); + for (int i = 0; i < a.dim(); i++) + inp1_shape[i + off_a] = a.size(i); + for (int i = 0; i < b.dim(); i++) + inp2_shape[i + off_b] = b.size(i); + + WORD32 ret_val = xa_nn_elm_greater_lesser_equal_broadcast_4D_f32xf32_f32( + p_out, out_shape, p_inp1, inp1_shape, p_inp2, inp2_shape, 1); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } else { + int num_elm = out.numel(); + + WORD32 ret_val = xa_nn_elm_greater_lesser_equal_f32xf32_f32( + p_out, p_inp1, p_inp2, num_elm, 1); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } + + return out; + } + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, name, CTYPE_B, [&]() { + using CTYPE_IN = + typename torch::executor::promote_types::type; + ET_DCHECK( + CppTypeToScalarType::value == promoteTypes(a_type, b_type)); + ET_SWITCH_REAL_TYPES_AND(Bool, out_type, ctx, name, CTYPE_OUT, [&]() { + torch::executor:: + apply_binary_elementwise_fn( + [](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + bool value = a_casted > b_casted; + return static_cast(value); + }, + a, + b, + out); + }); + }); + }); + + return out; +} + +Tensor& gt_scalar_out( + RuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + Tensor& out) { + (void)ctx; + + // Resize for dynamic shape + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, a.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + constexpr auto name = "gt.Scalar_out"; + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = torch::executor::native::utils::get_scalar_dtype(b); + ScalarType common_type = + torch::executor::native::utils::promote_type_with_scalar(a_type, b); + ScalarType out_type = out.scalar_type(); + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, name, CTYPE_B, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, common_type, ctx, name, CTYPE_IN, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, out_type, ctx, name, CTYPE_OUT, [&]() { + CTYPE_B val_b = 0; + torch::executor::native::utils::extract_scalar(b, &val_b); + torch::executor::apply_unary_map_fn( + [val_b](const CTYPE_A val_a) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + bool value = a_casted > b_casted; + return static_cast(value); + }, + a.const_data_ptr(), + out.mutable_data_ptr(), + out.numel()); + }); + }); + }); + }); + + return out; +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_le.cpp b/backends/cadence/hifi/operators/op_le.cpp new file mode 100644 index 0000000000..435788353c --- /dev/null +++ b/backends/cadence/hifi/operators/op_le.cpp @@ -0,0 +1,188 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include + +using exec_aten::Scalar; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::aten::RuntimeContext; +using executorch::runtime::CppTypeToScalarType; +using executorch::runtime::promoteTypes; +using torch::executor::Error; +using torch::executor::resize_to_broadcast_target_size; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +Tensor& le_tensor_out( + RuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + Tensor& out) { + // Determine output size and resize for dynamic shapes + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType out_type = out.scalar_type(); + + constexpr auto name = "le.Tensor_out"; + constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ + + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); + bool optimized = true; + /*find broadcast*/ + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool broadcast = (a_is_broadcasted || b_is_broadcasted); + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + if (out_type != ScalarType::Float) + optimized = false; + + if ((a_dim == 0) || (b_dim == 0)) + optimized = false; + + if (broadcast && (max_dim > kNnlibMaxDim)) + optimized = false; + + if (optimized) { + int8_t* __restrict__ p_out = + (int8_t* __restrict__)out.mutable_data_ptr(); + const float* __restrict__ p_inp1 = + (const float* __restrict__)a.const_data_ptr(); + const float* __restrict__ p_inp2 = + (const float* __restrict__)b.const_data_ptr(); + + if (broadcast) { + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; + + for (int i = 0; i < kNnlibMaxDim; i++) { + inp1_shape[i] = 1; + inp2_shape[i] = 1; + out_shape[i] = 1; + } + + int off_o = kNnlibMaxDim - out.dim(); + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); + + for (int i = 0; i < out.dim(); i++) + out_shape[i + off_o] = out.size(i); + for (int i = 0; i < a.dim(); i++) + inp1_shape[i + off_a] = a.size(i); + for (int i = 0; i < b.dim(); i++) + inp2_shape[i + off_b] = b.size(i); + + WORD32 ret_val = xa_nn_elm_greater_lesser_equal_broadcast_4D_f32xf32_f32( + p_out, out_shape, p_inp1, inp1_shape, p_inp2, inp2_shape, 2); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } else { + int num_elm = out.numel(); + + WORD32 ret_val = xa_nn_elm_greater_lesser_equal_f32xf32_f32( + p_out, p_inp1, p_inp2, num_elm, 2); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } + + return out; + } + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, name, CTYPE_B, [&]() { + using CTYPE_IN = + typename torch::executor::promote_types::type; + ET_DCHECK( + CppTypeToScalarType::value == promoteTypes(a_type, b_type)); + ET_SWITCH_REAL_TYPES_AND(Bool, out_type, ctx, name, CTYPE_OUT, [&]() { + torch::executor:: + apply_binary_elementwise_fn( + [](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + bool value = a_casted <= b_casted; + return static_cast(value); + }, + a, + b, + out); + }); + }); + }); + + return out; +} + +Tensor& le_scalar_out( + RuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + Tensor& out) { + (void)ctx; + + // Resize for dynamic shape + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, a.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + constexpr auto name = "le.Scalar_out"; + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = torch::executor::native::utils::get_scalar_dtype(b); + ScalarType common_type = + torch::executor::native::utils::promote_type_with_scalar(a_type, b); + ScalarType out_type = out.scalar_type(); + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, name, CTYPE_B, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, common_type, ctx, name, CTYPE_IN, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, out_type, ctx, name, CTYPE_OUT, [&]() { + CTYPE_B val_b = 0; + torch::executor::native::utils::extract_scalar(b, &val_b); + torch::executor::apply_unary_map_fn( + [val_b](const CTYPE_A val_a) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + bool value = a_casted <= b_casted; + return static_cast(value); + }, + a.const_data_ptr(), + out.mutable_data_ptr(), + out.numel()); + }); + }); + }); + }); + + return out; +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_lt.cpp b/backends/cadence/hifi/operators/op_lt.cpp new file mode 100644 index 0000000000..f99fdeb85f --- /dev/null +++ b/backends/cadence/hifi/operators/op_lt.cpp @@ -0,0 +1,187 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include + +#include + +using exec_aten::Scalar; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::aten::RuntimeContext; +using executorch::runtime::CppTypeToScalarType; +using executorch::runtime::promoteTypes; +using torch::executor::Error; +using torch::executor::resize_to_broadcast_target_size; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +Tensor& lt_tensor_out( + RuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + Tensor& out) { + // Determine output size and resize for dynamic shapes + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType out_type = out.scalar_type(); + + constexpr auto name = "lt.Tensor_out"; + constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ + + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); + bool optimized = true; + /*find broadcast*/ + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool broadcast = (a_is_broadcasted || b_is_broadcasted); + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + if (out_type != ScalarType::Float) + optimized = false; + + if ((a_dim == 0) || (b_dim == 0)) + optimized = false; + + if (broadcast && (max_dim > kNnlibMaxDim)) + optimized = false; + + if (optimized) { + int8_t* __restrict__ p_out = + (int8_t* __restrict__)out.mutable_data_ptr(); + const float* __restrict__ p_inp1 = + (const float* __restrict__)a.const_data_ptr(); + const float* __restrict__ p_inp2 = + (const float* __restrict__)b.const_data_ptr(); + + if (broadcast) { + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; + + for (int i = 0; i < kNnlibMaxDim; i++) { + inp1_shape[i] = 1; + inp2_shape[i] = 1; + out_shape[i] = 1; + } + + int off_o = kNnlibMaxDim - out.dim(); + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); + + for (int i = 0; i < out.dim(); i++) + out_shape[i + off_o] = out.size(i); + for (int i = 0; i < a.dim(); i++) + inp1_shape[i + off_a] = a.size(i); + for (int i = 0; i < b.dim(); i++) + inp2_shape[i + off_b] = b.size(i); + + WORD32 ret_val = xa_nn_elm_greater_lesser_equal_broadcast_4D_f32xf32_f32( + p_out, out_shape, p_inp1, inp1_shape, p_inp2, inp2_shape, 3); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } else { + int num_elm = out.numel(); + + WORD32 ret_val = xa_nn_elm_greater_lesser_equal_f32xf32_f32( + p_out, p_inp1, p_inp2, num_elm, 3); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } + + return out; + } + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, name, CTYPE_B, [&]() { + using CTYPE_IN = + typename torch::executor::promote_types::type; + ET_DCHECK( + CppTypeToScalarType::value == promoteTypes(a_type, b_type)); + ET_SWITCH_REAL_TYPES_AND(Bool, out_type, ctx, name, CTYPE_OUT, [&]() { + torch::executor:: + apply_binary_elementwise_fn( + [](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + bool value = a_casted < b_casted; + return static_cast(value); + }, + a, + b, + out); + }); + }); + }); + + return out; +} + +Tensor& lt_scalar_out( + RuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + Tensor& out) { + // Resize for dynamic shape + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, a.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + constexpr auto name = "lt.Scalar_out"; + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = torch::executor::native::utils::get_scalar_dtype(b); + ScalarType common_type = + torch::executor::native::utils::promote_type_with_scalar(a_type, b); + ScalarType out_type = out.scalar_type(); + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, name, CTYPE_B, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, common_type, ctx, name, CTYPE_IN, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, out_type, ctx, name, CTYPE_OUT, [&]() { + CTYPE_B val_b = 0; + torch::executor::native::utils::extract_scalar(b, &val_b); + torch::executor::apply_unary_map_fn( + [val_b](const CTYPE_A val_a) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + bool value = a_casted < b_casted; + return static_cast(value); + }, + a.const_data_ptr(), + out.mutable_data_ptr(), + out.numel()); + }); + }); + }); + }); + + return out; +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_mm.cpp b/backends/cadence/hifi/operators/op_mm.cpp new file mode 100644 index 0000000000..ceedc97eeb --- /dev/null +++ b/backends/cadence/hifi/operators/op_mm.cpp @@ -0,0 +1,149 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include + +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::runtime::KernelRuntimeContext; +using executorch::runtime::kTensorDimensionLimit; +using executorch::runtime::resize_tensor; +using executorch::runtime::tensor_is_default_dim_order; +using executorch::runtime::tensors_have_same_dim_order; +using torch::executor::check_mm_args; +using torch::executor::Error; +using torch::executor::get_mm_out_target_size; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +Tensor& mm_out( + KernelRuntimeContext& ctx, + const Tensor& in, + const Tensor& mat2, + Tensor& out) { + ET_KERNEL_CHECK(ctx, check_mm_args(in, mat2, out), InvalidArgument, out); + + size_t output_ndim = 0; + exec_aten::SizesType output_sizes[kTensorDimensionLimit]; + get_mm_out_target_size(in, mat2, output_sizes, &output_ndim); + ET_KERNEL_CHECK( + ctx, + resize_tensor(out, {output_sizes, output_ndim}) == Error::Ok, + InvalidArgument, + out); + + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(in, mat2, out), InvalidArgument, out); + + ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(in), InvalidArgument, out); + + ScalarType out_type = out.scalar_type(); + + constexpr auto name = "mm.out"; + + bool optimized = true; + + if (out_type != ScalarType::Float) + optimized = false; + + if (optimized) { + const float* in_data = in.const_data_ptr(); + const float* mat2_data = mat2.const_data_ptr(); + float* out_data = out.mutable_data_ptr(); + + int64_t m = in.size(0); + int64_t n = in.size(1); + + int64_t p = mat2.size(1); + + WORD32 rows = m; + WORD32 cols1 = n; + WORD32 row_stride1 = n; + WORD32 vec_count = p; + WORD32 vec_offset = n; + WORD32 out_offset = 1; + WORD32 out_stride = p; + + WORD32* __restrict__ p_o = + (WORD32* __restrict__)kernels::allocate_temp_memory( + ctx, (n * p) * sizeof(WORD32)); + + WORD32 p_inp_shape[2]; + p_inp_shape[0] = n; + p_inp_shape[1] = p; + + WORD32 p_out_shape[2]; + p_out_shape[0] = p; + p_out_shape[1] = n; + + WORD32 p_permute_vec[2] = {1, 0}; + + WORD32 num_out_dims = 2; + WORD32 num_inp_dims = 2; + + const FLOAT32* __restrict__ p_mat1 = in_data; + const FLOAT32* __restrict__ p_vec1 = mat2_data; + FLOAT32* __restrict__ p_out = out_data; + + WORD32* p_inp = (WORD32*)p_vec1; + + WORD32 t = xa_nn_transpose_32_32( + p_o, + p_out_shape, + p_inp, + p_inp_shape, + p_permute_vec, + num_out_dims, + num_inp_dims); + + const FLOAT32* __restrict__ p_vec = (const FLOAT32* __restrict__)p_o; + + WORD32 val = xa_nn_matmul_f32xf32_f32( + p_out, + p_mat1, + p_vec, + NULL, + rows, + cols1, + row_stride1, + vec_count, + vec_offset, + out_offset, + out_stride); + + return out; + } + + ET_SWITCH_REAL_TYPES_AND2( + Half, BFloat16, in.scalar_type(), ctx, name, CTYPE, [&]() { + size_t m = in.size(0); + size_t n = in.size(1); + size_t p = mat2.size(1); + + torch::executor::vec_matmul( + out.mutable_data_ptr(), + in.const_data_ptr(), + mat2.const_data_ptr(), + m, + n, + p); + }); + + return out; +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_ne.cpp b/backends/cadence/hifi/operators/op_ne.cpp new file mode 100644 index 0000000000..0abed53341 --- /dev/null +++ b/backends/cadence/hifi/operators/op_ne.cpp @@ -0,0 +1,187 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include + +#include + +using exec_aten::Scalar; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::aten::RuntimeContext; +using executorch::runtime::CppTypeToScalarType; +using executorch::runtime::promoteTypes; +using torch::executor::Error; +using torch::executor::resize_to_broadcast_target_size; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +Tensor& ne_tensor_out( + RuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + Tensor& out) { + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType out_type = out.scalar_type(); + + constexpr auto name = "ne.Tensor_out"; + constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ + + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); + bool optimized = true; + /*find broadcast*/ + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool broadcast = (a_is_broadcasted || b_is_broadcasted); + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + if (out_type != ScalarType::Float) + optimized = false; + + if ((a_dim == 0) || (b_dim == 0)) + optimized = false; + + if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) + optimized = false; + + if (optimized) { + int8_t* __restrict__ p_out = + (int8_t* __restrict__)out.mutable_data_ptr(); + const float* __restrict__ p_inp1 = + (const float* __restrict__)a.const_data_ptr(); + const float* __restrict__ p_inp2 = + (const float* __restrict__)b.const_data_ptr(); + + if (broadcast) { + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; + + for (int i = 0; i < kNnlibMaxDim; i++) { + inp1_shape[i] = 1; + inp2_shape[i] = 1; + out_shape[i] = 1; + } + + int off_o = kNnlibMaxDim - out.dim(); + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); + + for (int i = 0; i < out.dim(); i++) + out_shape[i + off_o] = out.size(i); + for (int i = 0; i < a.dim(); i++) + inp1_shape[i + off_a] = a.size(i); + for (int i = 0; i < b.dim(); i++) + inp2_shape[i + off_b] = b.size(i); + + WORD32 ret_val = xa_nn_elm_greater_lesser_equal_broadcast_4D_f32xf32_f32( + p_out, out_shape, p_inp1, inp1_shape, p_inp2, inp2_shape, 5); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } else { + int num_elm = out.numel(); + + WORD32 ret_val = xa_nn_elm_greater_lesser_equal_f32xf32_f32( + p_out, p_inp1, p_inp2, num_elm, 5); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } + + return out; + } + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, name, CTYPE_B, [&]() { + using CTYPE_IN = + typename torch::executor::promote_types::type; + ET_DCHECK( + CppTypeToScalarType::value == promoteTypes(a_type, b_type)); + ET_SWITCH_REAL_TYPES_AND(Bool, out_type, ctx, name, CTYPE_OUT, [&]() { + torch::executor:: + apply_binary_elementwise_fn( + [](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + bool value = a_casted != b_casted; + return static_cast(value); + }, + a, + b, + out); + }); + }); + }); + + return out; +} + +Tensor& ne_scalar_out( + RuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + Tensor& out) { + (void)ctx; + // Resize for dynamic shape + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, a.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + constexpr auto name = "ne.Scalar_out"; + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = torch::executor::native::utils::get_scalar_dtype(b); + ScalarType out_type = out.scalar_type(); + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, name, CTYPE_B, [&]() { + using CTYPE_IN = + typename torch::executor::promote_types::type; + ET_DCHECK( + CppTypeToScalarType::value == promoteTypes(a_type, b_type)); + ET_SWITCH_REAL_TYPES_AND(Bool, out_type, ctx, name, CTYPE_OUT, [&]() { + CTYPE_B val_b = 0; + torch::executor::native::utils::extract_scalar(b, &val_b); + torch::executor::apply_unary_map_fn( + [val_b](const CTYPE_A val_a) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + bool value = a_casted != b_casted; + return static_cast(value); + }, + a.const_data_ptr(), + out.mutable_data_ptr(), + out.numel()); + }); + }); + }); + + return out; +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_fmod_broadcast_f32.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_fmod_broadcast_f32.c new file mode 100644 index 0000000000..139a97ec3f --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_fmod_broadcast_f32.c @@ -0,0 +1,525 @@ +#include "xa_type_def.h" +#include "xa_nnlib_common_fpu.h" +#include "xa_nn_common.h" +#include "xa_nnlib_err_chk.h" +//#include "xa_nn_basic_state.h" +#include "xa_nnlib_kernels_api.h" + + +#if !HAVE_VFPU +DISCARD_FUN_FOR_NONVOID_RETURN( + WORD32, xa_nn_elm_fmod_f32xf32_f32, + ( + FLOAT32 *p_out, + const FLOAT32 *p_inp1, + const FLOAT32 *p_inp2, + WORD32 num_elm + ) + ) +#else +WORD32 xa_nn_elm_fmod_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm) +{ + /* NULL pointer checks */ + XA_NNLIB_ARG_CHK_PTR(p_out, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp1, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp2, -1); + /* Pointer alignment checks */ + XA_NNLIB_ARG_CHK_ALIGN(p_out, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp1, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp2, sizeof(FLOAT32), -1); + /* Basic Parameter checks */ + XA_NNLIB_ARG_CHK_COND((num_elm <= 0), -1); + + int i; + xtfloatx2 *inp1 = (xtfloatx2 *)p_inp1; + xtfloatx2 *inp2 = (xtfloatx2 *)p_inp2; + xtfloatx2 *out = (xtfloatx2 *)p_out; + xtfloatx2 x1, x2, y; + ae_valign inp1_a, inp2_a, out_a; + + inp1_a = XT_LASX2PP(inp1); + inp2_a = XT_LASX2PP(inp2); + out_a = AE_ZALIGN64(); + /* Each iteration of loop is independent so safe to use concurrent pragma */ +#pragma concurrent + for(i=0;i < num_elm>>1;i++) + { + XT_LASX2IP(x1, inp1_a, inp1); + XT_LASX2IP(x2, inp2_a, inp2); + y = XT_DIV_SX2(x1, x2); + y = FITRUNC_SX2(y); + y = XT_MUL_SX2(y, x2); + y = XT_SUB_SX2(x1, y); + XT_SASX2IP(y, out_a, out); + } + XT_SASX2POSFP(out_a, out); + + // Remainder Loop + if (num_elm & 1) + { + xtfloat a1, a2, a; + XT_LSIP(a1, (xtfloat *)inp1, 0); + XT_LSIP(a2, (xtfloat *)inp2, 0); + a = XT_DIV_S(a1, a2); + a = FITRUNC_S(a); + a = XT_MUL_S(a, a2); + a = XT_SUB_S(a1, a); + XT_SSI(a, (xtfloat *)out, 0); + } + + return 0; +} +#endif + +#if HAVE_VFPU +static void internal_elm_fmod_broadcast_2D_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 out_lc, + WORD32 in_lc, + xtbool sign_flag) +{ + int i, j; + + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + int num_simd2_ops; + int num_scalar_ops; + + if(out_lc) + { + num_simd2_ops = in_lc >> 1; + num_scalar_ops = in_lc & 1; + } + else + { + num_simd2_ops = (in_lc >> 2) << 1; + num_scalar_ops = in_lc & 3; + } + + xtfloatx2 x1, x2, y; + xtfloat a0, b0, c0; + + /* For computing inp2 - inp1 */ + if(sign_flag){ + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_DIV_SX2(x2, x1); + y = FITRUNC_SX2(y); + y = XT_MUL_SX2(y, x1); + y = XT_SUB_SX2(x2, y); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_DIV_SX2(x2, x1); + y = FITRUNC_SX2(y); + y = XT_MUL_SX2(y, x1); + y = XT_SUB_SX2(x2, y); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_DIV_S(b0, a0); + c0 = FITRUNC_S(c0); + c0 = XT_MUL_S(c0, a0); + c0 = XT_SUB_S(b0, c0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } + /* For computing inp1 - inp2 */ + else + { + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_DIV_SX2(x1, x2); + y = FITRUNC_SX2(y); + y = XT_MUL_SX2(y, x2); + y = XT_SUB_SX2(x1, y); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_DIV_SX2(x1, x2); + y = FITRUNC_SX2(y); + y = XT_MUL_SX2(y, x2); + y = XT_SUB_SX2(x1, y); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_DIV_S(a0, b0); + c0 = FITRUNC_S(c0); + c0 = XT_MUL_S(c0, b0); + c0 = XT_SUB_S(a0, c0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } +} + +static void internal_elm_fmod_broadcast_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm, + xtbool sign_flag) +{ + int i; + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + const int num_simd2_ops = num_elm >> 1; + const int num_scalar_ops = num_elm & 1; + + xtfloat a0_7, out; + xtfloatx2 x1, x2, y; + x2 = XT_LSI((xtfloat *)p_b, 0); + + /* For computing inp2 - inp1 */ + if(sign_flag){ + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i p_inp2_shape[i] ? p_inp1_shape[i] : p_inp2_shape[i]))) + { + return -1; + } + } + + WORD32 inp1_strides[4], inp2_strides[4]; + inp1_strides[3] = 1; + inp2_strides[3] = 1; + for(i = 2; i >= 0; i--) + { + ae_int32x2 d_str, d_shape; + d_str = AE_MOVDA32X2(inp1_strides[i + 1], inp2_strides[i + 1]); + d_shape = AE_MOVDA32X2(p_inp1_shape[i + 1], p_inp2_shape[i + 1]); + d_str = AE_MULP32X2(d_str, d_shape); + inp1_strides[i] = AE_MOVAD32_H(d_str); + inp2_strides[i] = AE_MOVAD32_L(d_str); + } + + int need_broadcast = 0; + int inp1_const = 1, inp2_const = 1; + for(i = 0; i < 4; i++) + { + if(p_inp1_shape[i] != p_inp2_shape[i]) + { + if(p_inp1_shape[i] == 1) + inp1_strides[i] = 0; + else + inp2_strides[i] = 0; + + need_broadcast = 1; + } + if(p_inp1_shape[i] != 1) + inp1_const &= 0; + if(p_inp2_shape[i] != 1) + inp2_const &= 0; + } + int itr0, itr1, itr2; + + FLOAT32 *p_out_tmp = p_out; + const FLOAT32 *__restrict__ p_inp1_tmp = p_inp1; + const FLOAT32 *__restrict__ p_inp2_tmp = p_inp2; + if(need_broadcast == 0) + { + sign_flag = 0; + internal_elm_fmod_broadcast_2D_f32xf32_f32( + p_out, + p_inp1, + p_inp2, + 1, + p_out_shape[0] * inp1_strides[0], + sign_flag); + } + else if(inp1_strides[3] == inp2_strides[3]) + { + WORD32 in_lc, out_lc; + sign_flag = 0; + in_lc = p_out_shape[2] * p_out_shape[3]; + out_lc = 1; + if(inp1_strides[2] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[2]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + else if(inp2_strides[2] == 0) + { + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + internal_elm_fmod_broadcast_2D_f32xf32_f32( + p_out_tmp, + p_inp1_tmp0, + p_inp2_tmp0, + out_lc, + in_lc, + sign_flag); + p_out_tmp += in_lc * out_lc; + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + else if(inp1_const == 1 || inp2_const == 1) + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + sign_flag = 1; + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + } + internal_elm_fmod_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp, + p_inp2_tmp, + p_out_shape[0] * p_out_shape[1] * p_out_shape[2] * p_out_shape[3], + sign_flag); + } + else + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[3]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + tmp_strides[2] = inp1_strides[2]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + inp1_strides[2] = inp2_strides[2]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + inp2_strides[2] = tmp_strides[2]; + } + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + const FLOAT32 *__restrict__ p_inp1_tmp1 = p_inp1_tmp0; + const FLOAT32 *__restrict__ p_inp2_tmp1 = p_inp2_tmp0; + for(itr2 = 0; itr2 < p_out_shape[2]; itr2++) + { + { + internal_elm_fmod_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp1, + p_inp2_tmp1, + p_out_shape[3], + sign_flag); + } + p_out_tmp += p_out_shape[3]; + p_inp1_tmp1 += inp1_strides[2]; + p_inp2_tmp1 += inp2_strides[2]; + } + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + return 0; +} +#endif + diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_logicalxor_bool_bool.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_logicalxor_bool_bool.c new file mode 100644 index 0000000000..752a25b682 --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_logicalxor_bool_bool.c @@ -0,0 +1,52 @@ +#include "xa_nnlib_common.h" + +WORD32 xa_nn_elm_logicalxor_boolxbool_bool(WORD8 * __restrict__ p_out, + const WORD8 * __restrict__ p_inp1, + const WORD8 * __restrict__ p_inp2, + WORD32 num_elm) +{ + /* NULL pointer checks */ + XA_NNLIB_ARG_CHK_PTR(p_out, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp1, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp2, -1); + /* Pointer alignment checks */ + XA_NNLIB_ARG_CHK_ALIGN(p_out, sizeof(WORD8), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp1, sizeof(WORD8), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp2, sizeof(WORD8), -1); + /* Basic Parameter checks */ + XA_NNLIB_ARG_CHK_COND((num_elm <= 0), -1); + + ae_int24x2 *pin1 = (ae_int24x2 *)p_inp1; + ae_int24x2 *pin2 = (ae_int24x2 *)p_inp2; + ae_int24x2 *pout = (ae_int24x2 *)p_out; + int i; + int N = num_elm; + /* Following line divides N by 6. Much faster than compiler implementation. Works for N<32768. */ + /* unsigned int Nby6 = (N*10923)>>16;*/ + /* Following works for all int32 N */ + int Nby6 = AE_MOVAD32_H(AE_MOVINT32X2_FROMINT64(AE_MUL32_LL(N, 0x2AAAAAAB))); + int remainder_start = 6*Nby6; + + ae_valign align_src_in1, align_src_in2, align_dst; + align_src_in1 = AE_LA64_PP(pin1); + align_src_in2 = AE_LA64_PP(pin2); + align_dst = AE_ZALIGN64(); + +/* Loop is unrolled by 6, to use LA24X2/SA24X2 */ + for(i=0; i < Nby6; i++){ + ae_int24x2 vi1, vi2, vo; + AE_LA24X2_IP(vi1, align_src_in1, pin1); + AE_LA24X2_IP(vi2, align_src_in2, pin2); + vo = AE_XOR24(vi1, vi2); + AE_SA24X2_IP(vo, align_dst, pout); + } + AE_SA64POS_FP(align_dst, pout); + + /* Remainder loop */ + #pragma no_unroll + for(i=remainder_start; i < N; i++){ + p_out[i] = p_inp1[i] & p_inp2[i]; + } + + return 0; +} \ No newline at end of file diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_greater_lesser_equal_f32.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_greater_lesser_equal_f32.c new file mode 100644 index 0000000000..2372fcadcd --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_greater_lesser_equal_f32.c @@ -0,0 +1,2029 @@ +#include "xa_type_def.h" +#include "xa_nnlib_common_fpu.h" +#include "xa_nn_common.h" +#include "xa_nnlib_err_chk.h" +//#include "xa_nn_basic_state.h" +#include "xa_nnlib_kernels_api.h" + + +#if !HAVE_VFPU +DISCARD_FUN_FOR_NONVOID_RETURN( + WORD32, xa_nn_elm_greater_lesser_equal_f32xf32_f32, + ( + WORD8 *y, + const FLOAT32 *x1, + const FLOAT32 *x2, + WORD32 N, + WORD32 kernel_type + ) + ) +#else +WORD32 xa_nn_elm_greater_lesser_equal_f32xf32_f32(WORD8 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm, + WORD32 kernel_type) +{ + /* NULL pointer checks */ + XA_NNLIB_ARG_CHK_PTR(p_out, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp1, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp2, -1); + /* Pointer alignment checks */ + XA_NNLIB_ARG_CHK_ALIGN(p_out, sizeof(UWORD8), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp1, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp2, sizeof(FLOAT32), -1); + /* Basic Parameter checks */ + XA_NNLIB_ARG_CHK_COND((num_elm <= 0), -1); + + int i; + xtfloatx2 *inp1 = (xtfloatx2 *)p_inp1; + xtfloatx2 *inp2 = (xtfloatx2 *)p_inp2; + //xtfloatx2 *out = (xtfloatx2 *)p_out; + UWORD8 *out = p_out; + xtfloatx2 x1, x2, y; + xtbool check; + + xtfloatx2 float_0 = XT_MOV_SX2(AE_ZERO32()); + + if(kernel_type == 0) + { + if(((((unsigned)p_out)&7) == 0) && ((((unsigned)p_inp1)&7) == 0) && ((((unsigned)p_inp2)&7) == 0)) + { + for(i=0;i < num_elm>>1;i++) + { + XT_LSX2IP(x1, inp1, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, inp2, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_LE_xtfloatx2(x2, x1); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *out++ = store1; + + uint8_t store0 = val & 0x1; + *out++ = store0; + } + } + else + { + ae_valign inp1_a, inp2_a, out_a; + + inp1_a = XT_LASX2PP(inp1); + inp2_a = XT_LASX2PP(inp2); + /* Each iteration of loop is independent so safe to use concurrent pragma */ +#pragma concurrent + for(i=0;i < num_elm>>1;i++) + { + XT_LASX2IP(x1, inp1_a, inp1); + XT_LASX2IP(x2, inp2_a, inp2); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_LE_xtfloatx2(x2, x1); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *out++ = store1; + + uint8_t store0 = val & 0x1; + *out++ = store0; + } + } + // Remainder Loop + if (num_elm & 1) + { + xtfloat a1, a2, a; + XT_LSIP(a1, (xtfloat *)inp1, 0); + XT_LSIP(a2, (xtfloat *)inp2, 0); + + a = XT_SUB_S(a2, a1); + + check = 0; + if(a <= 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *out++ = store; + } + } + else if(kernel_type == 1) + { + if(((((unsigned)p_out)&7) == 0) && ((((unsigned)p_inp1)&7) == 0) && ((((unsigned)p_inp2)&7) == 0)) + { + for(i=0;i < num_elm>>1;i++) + { + XT_LSX2IP(x1, inp1, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, inp2, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_LT_xtfloatx2(x2, x1); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *out++ = store1; + + uint8_t store0 = val & 0x1; + *out++ = store0; + } + } + else + { + ae_valign inp1_a, inp2_a, out_a; + + inp1_a = XT_LASX2PP(inp1); + inp2_a = XT_LASX2PP(inp2); + /* Each iteration of loop is independent so safe to use concurrent pragma */ +#pragma concurrent + for(i=0;i < num_elm>>1;i++) + { + XT_LASX2IP(x1, inp1_a, inp1); + XT_LASX2IP(x2, inp2_a, inp2); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_LT_xtfloatx2(x2, x1); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *out++ = store1; + + uint8_t store0 = val & 0x1; + *out++ = store0; + } + } + // Remainder Loop + if (num_elm & 1) + { + xtfloat a1, a2, a; + XT_LSIP(a1, (xtfloat *)inp1, 0); + XT_LSIP(a2, (xtfloat *)inp2, 0); + + a = XT_SUB_S(a2, a1); + + check = 0; + if(a < 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *out++ = store; + } + } + else if(kernel_type == 2) + { + if(((((unsigned)p_out)&7) == 0) && ((((unsigned)p_inp1)&7) == 0) && ((((unsigned)p_inp2)&7) == 0)) + { + for(i=0;i < num_elm>>1;i++) + { + XT_LSX2IP(x1, inp1, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, inp2, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x1, x2); + xtbool2 check = xtfloatx2_LE_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *out++ = store1; + + uint8_t store0 = val & 0x1; + *out++ = store0; + } + } + else + { + ae_valign inp1_a, inp2_a, out_a; + + inp1_a = XT_LASX2PP(inp1); + inp2_a = XT_LASX2PP(inp2); + /* Each iteration of loop is independent so safe to use concurrent pragma */ +#pragma concurrent + for(i=0;i < num_elm>>1;i++) + { + XT_LASX2IP(x1, inp1_a, inp1); + XT_LASX2IP(x2, inp2_a, inp2); + + //y = XT_SUB_SX2(x1, x2); + xtbool2 check = xtfloatx2_LE_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *out++ = store1; + + uint8_t store0 = val & 0x1; + *out++ = store0; + } + } + // Remainder Loop + if (num_elm & 1) + { + xtfloat a1, a2, a; + XT_LSIP(a1, (xtfloat *)inp1, 0); + XT_LSIP(a2, (xtfloat *)inp2, 0); + + a = XT_SUB_S(a1, a2); + + check = 0; + if(a <= 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *out++ = store; + } + } + else if(kernel_type == 3) + { + if(((((unsigned)p_out)&7) == 0) && ((((unsigned)p_inp1)&7) == 0) && ((((unsigned)p_inp2)&7) == 0)) + { + for(i=0;i < num_elm>>1;i++) + { + XT_LSX2IP(x1, inp1, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, inp2, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x1, x2); + xtbool2 check = xtfloatx2_LT_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *out++ = store1; + + uint8_t store0 = val & 0x1; + *out++ = store0; + } + } + else + { + ae_valign inp1_a, inp2_a, out_a; + + inp1_a = XT_LASX2PP(inp1); + inp2_a = XT_LASX2PP(inp2); + /* Each iteration of loop is independent so safe to use concurrent pragma */ +#pragma concurrent + for(i=0;i < num_elm>>1;i++) + { + XT_LASX2IP(x1, inp1_a, inp1); + XT_LASX2IP(x2, inp2_a, inp2); + + //y = XT_SUB_SX2(x1, x2); + xtbool2 check = xtfloatx2_LT_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *out++ = store1; + + uint8_t store0 = val & 0x1; + *out++ = store0; + } + } + // Remainder Loop + if (num_elm & 1) + { + xtfloat a1, a2, a; + XT_LSIP(a1, (xtfloat *)inp1, 0); + XT_LSIP(a2, (xtfloat *)inp2, 0); + + a = XT_SUB_S(a1, a2); + + check = 0; + if(a < 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *out++ = store; + } + } + else if(kernel_type == 4) + { + if(((((unsigned)p_out)&7) == 0) && ((((unsigned)p_inp1)&7) == 0) && ((((unsigned)p_inp2)&7) == 0)) + { + for(i=0;i < num_elm>>1;i++) + { + XT_LSX2IP(x1, inp1, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, inp2, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_EQ_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *out++ = store1; + + uint8_t store0 = val & 0x1; + *out++ = store0; + } + } + else + { + ae_valign inp1_a, inp2_a, out_a; + + inp1_a = XT_LASX2PP(inp1); + inp2_a = XT_LASX2PP(inp2); + /* Each iteration of loop is independent so safe to use concurrent pragma */ +#pragma concurrent + for(i=0;i < num_elm>>1;i++) + { + XT_LASX2IP(x1, inp1_a, inp1); + XT_LASX2IP(x2, inp2_a, inp2); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_EQ_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *out++ = store1; + + uint8_t store0 = val & 0x1; + *out++ = store0; + } + } + // Remainder Loop + if (num_elm & 1) + { + xtfloat a1, a2, a; + XT_LSIP(a1, (xtfloat *)inp1, 0); + XT_LSIP(a2, (xtfloat *)inp2, 0); + + //a = XT_SUB_S(a2, a1); + + check = 0; + if(a1 == a2) + check = 1; + + uint8_t store = AE_MOVAB(check); + *out++ = store; + } + } + else if(kernel_type == 5) + { + ae_int32x2 ones = AE_MOVDA32(1); + if(((((unsigned)p_out)&7) == 0) && ((((unsigned)p_inp1)&7) == 0) && ((((unsigned)p_inp2)&7) == 0)) + { + for(i=0;i < num_elm>>1;i++) + { + XT_LSX2IP(x1, inp1, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, inp2, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_EQ_xtfloatx2(x1, x2); + + ae_int32x2 store = AE_ZERO32(); + AE_MOVF32X2(store, ones, check); + + *out++ = AE_MOVAD32_H(store); + *out++ = AE_MOVAD32_L(store); + } + } + else + { + ae_valign inp1_a, inp2_a, out_a; + + inp1_a = XT_LASX2PP(inp1); + inp2_a = XT_LASX2PP(inp2); + /* Each iteration of loop is independent so safe to use concurrent pragma */ +#pragma concurrent + for(i=0;i < num_elm>>1;i++) + { + XT_LASX2IP(x1, inp1_a, inp1); + XT_LASX2IP(x2, inp2_a, inp2); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_EQ_xtfloatx2(x1, x2); + + ae_int32x2 store = AE_ZERO32(); + AE_MOVF32X2(store, ones, check); + + *out++ = AE_MOVAD32_H(store); + *out++ = AE_MOVAD32_L(store); + } + } + // Remainder Loop + if (num_elm & 1) + { + xtfloat a1, a2, a; + XT_LSIP(a1, (xtfloat *)inp1, 0); + XT_LSIP(a2, (xtfloat *)inp2, 0); + + a = XT_SUB_S(a2, a1); + + check = 0; + if(a != 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *out++ = store; + } + } + + return 0; +} +#endif + +#if HAVE_VFPU +static void internal_elm_greater_lesser_equal_broadcast_2D_f32xf32_f32(UWORD8 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 out_lc, + WORD32 in_lc, + xtbool sign_flag, + WORD32 kernel_type) +{ + int i, j; + + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + + xtbool check; + + xtfloatx2 float_0 = XT_MOV_SX2(AE_ZERO32()); + + int num_simd2_ops; + int num_scalar_ops; + + if(out_lc) + { + num_simd2_ops = in_lc >> 1; + num_scalar_ops = in_lc & 1; + } + else + { + num_simd2_ops = (in_lc >> 2) << 1; + num_scalar_ops = in_lc & 3; + } + + xtfloatx2 x1, x2, y; + xtfloat a0, b0, c0; + + /* For computing inp2 - inp1 */ + if(sign_flag){ + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + UWORD8 *p_c = (UWORD8 *)&p_out[i * in_lc]; + + if(kernel_type == 0) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x1, x2); + xtbool2 check = xtfloatx2_LE_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign vinp1, vinp2; + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + + //y = XT_SUB_SX2(x1, x2); + xtbool2 check = xtfloatx2_LE_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_SUB_S(a0, b0); + + check = 0; + + if(c0 <= 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 1) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x1, x2); + xtbool2 check = xtfloatx2_LT_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign vinp1, vinp2; + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + + //y = XT_SUB_SX2(x1, x2); + xtbool2 check = xtfloatx2_LT_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_SUB_S(a0, b0); + + check = 0; + + if(c0 < 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 2) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_LE_xtfloatx2(x2, x1); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign vinp1, vinp2; + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_LE_xtfloatx2(x2, x1); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_SUB_S(b0, a0); + + check = 0; + + if(c0 <= 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 3) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_LT_xtfloatx2(x2, x1); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign vinp1, vinp2; + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_LT_xtfloatx2(x2, x1); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_SUB_S(b0, a0); + + check = 0; + + if(c0 < 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 4) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x1, x2); + xtbool2 check = xtfloatx2_EQ_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign vinp1, vinp2; + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + + //y = XT_SUB_SX2(x1, x2); + xtbool2 check = xtfloatx2_EQ_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + //c0 = XT_SUB_S(a0, b0); + + check = 0; + + if(a0 == b0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 5) + { + ae_int32x2 ones = AE_MOVDA32(1); + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x1, x2); + xtbool2 check = xtfloatx2_EQ_xtfloatx2(x1, x2); + + ae_int32x2 store = AE_ZERO32(); + AE_MOVF32X2(store, ones, check); + + *p_c++ = AE_MOVAD32_H(store); + *p_c++ = AE_MOVAD32_L(store); + } + } + else + { + ae_valign vinp1, vinp2; + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + + //y = XT_SUB_SX2(x1, x2); + xtbool2 check = xtfloatx2_EQ_xtfloatx2(x1, x2); + + ae_int32x2 store = AE_ZERO32(); + AE_MOVF32X2(store, ones, check); + + *p_c++ = AE_MOVAD32_H(store); + *p_c++ = AE_MOVAD32_L(store); + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_SUB_S(a0, b0); + + check = 0; + + if(c0 != 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + } + } + /* For computing inp1 - inp2 */ + else + { + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + UWORD8 *p_c = (UWORD8 *)&p_out[i * in_lc]; + + if(kernel_type == 0) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_LE_xtfloatx2(x2, x1); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign vinp1, vinp2; + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_LE_xtfloatx2(x2, x1); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_SUB_S(b0, a0); + + check = 0; + + if(c0 <= 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if (kernel_type == 1) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_LT_xtfloatx2(x2, x1); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign vinp1, vinp2; + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_LT_xtfloatx2(x2, x1); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_SUB_S(b0, a0); + + check = 0; + + if(c0 < 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 2) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x1, x2); + xtbool2 check = xtfloatx2_LE_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign vinp1, vinp2; + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + + //y = XT_SUB_SX2(x1, x2); + xtbool2 check = xtfloatx2_LE_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_SUB_S(a0, b0); + + check = 0; + + if(c0 <= 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 3) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x1, x2); + xtbool2 check = xtfloatx2_LT_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign vinp1, vinp2; + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + + //y = XT_SUB_SX2(x1, x2); + xtbool2 check = xtfloatx2_LT_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_SUB_S(a0, b0); + + check = 0; + + if(c0 < 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 4) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_EQ_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign vinp1, vinp2; + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_EQ_xtfloatx2(x1, x2); + + uint8_t val = AE_MOVAB2(check); + + uint8_t store1 = (val >> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + //c0 = XT_SUB_S(b0, a0); + + check = 0; + + if(a0 == b0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 5) + { + ae_int32x2 ones = AE_MOVDA32(1); + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2*sizeof(FLOAT32)); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_EQ_xtfloatx2(x1, x2); + + ae_int32x2 store = AE_ZERO32(); + AE_MOVF32X2(store, ones, check); + + *p_c++ = AE_MOVAD32_H(store); + *p_c++ = AE_MOVAD32_L(store); + } + } + else + { + ae_valign vinp1, vinp2; + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + + //y = XT_SUB_SX2(x2, x1); + xtbool2 check = xtfloatx2_EQ_xtfloatx2(x1, x2); + + ae_int32x2 store = AE_ZERO32(); + AE_MOVF32X2(store, ones, check); + + *p_c++ = AE_MOVAD32_H(store); + *p_c++ = AE_MOVAD32_L(store); + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_SUB_S(b0, a0); + + check = 0; + + if(c0 != 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + } + } +} + +static void internal_elm_greater_lesser_equal_broadcast_f32xf32_f32(UWORD8 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm, + xtbool sign_flag, + WORD32 kernel_type) +{ + int i; + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + + xtbool check; + + UWORD8 * p_c = p_out; + xtfloatx2 float_0 = XT_MOV_SX2(AE_ZERO32()); + + const int num_simd2_ops = num_elm >> 1; + const int num_scalar_ops = num_elm & 1; + + xtfloat a0_7, out; + xtfloatx2 x1, x2, y; + x2 = XT_LSI((xtfloat *)p_b, 0); + + /* For computing inp2 - inp1 */ + if(sign_flag){ + if(kernel_type == 0) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign inp1_a, out_a; + inp1_a = XT_LASX2PP(p_a); + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0_7, (xtfloat *)p_a, sizeof(FLOAT32)); + out = XT_SUB_S(a0_7, x2); + + check = 0; + + if(out <= 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 1) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign inp1_a, out_a; + inp1_a = XT_LASX2PP(p_a); + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0_7, (xtfloat *)p_a, sizeof(FLOAT32)); + out = XT_SUB_S(a0_7, x2); + + check = 0; + + if(out < 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 2) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign inp1_a, out_a; + inp1_a = XT_LASX2PP(p_a); + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0_7, (xtfloat *)p_a, sizeof(FLOAT32)); + out = XT_SUB_S(x2, a0_7); + + check = 0; + + if(out <= 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 3) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign inp1_a, out_a; + inp1_a = XT_LASX2PP(p_a); + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0_7, (xtfloat *)p_a, sizeof(FLOAT32)); + out = XT_SUB_S(x2, a0_7); + + check = 0; + + if(out < 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 4) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign inp1_a, out_a; + inp1_a = XT_LASX2PP(p_a); + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0_7, (xtfloat *)p_a, sizeof(FLOAT32)); + out = XT_SUB_S(a0_7, x2); + + check = 0; + + if(out == 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 5) + { + ae_int32x2 ones = AE_MOVDA32(1); + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign inp1_a, out_a; + inp1_a = XT_LASX2PP(p_a); + + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0_7, (xtfloat *)p_a, sizeof(FLOAT32)); + out = XT_SUB_S(x2, a0_7); + + check = 0; + + if(out <= 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 1) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign inp1_a, out_a; + inp1_a = XT_LASX2PP(p_a); + + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0_7, (xtfloat *)p_a, sizeof(FLOAT32)); + out = XT_SUB_S(x2, a0_7); + + check = 0; + + if(out < 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 2) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign inp1_a, out_a; + inp1_a = XT_LASX2PP(p_a); + + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0_7, (xtfloat *)p_a, sizeof(FLOAT32)); + out = XT_SUB_S(a0_7, x2); + + check = 0; + + if(out <= 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 3) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign inp1_a, out_a; + inp1_a = XT_LASX2PP(p_a); + + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0_7, (xtfloat *)p_a, sizeof(FLOAT32)); + out = XT_SUB_S(a0_7, x2); + + check = 0; + + if(out < 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + else if(kernel_type == 4) + { + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + else + { + ae_valign inp1_a, out_a; + inp1_a = XT_LASX2PP(p_a); + + for(i=0; i> 1) & 0x1; + *p_c++ = store1; + + uint8_t store0 = val & 0x1; + *p_c++ = store0; + } + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0_7, (xtfloat *)p_a, sizeof(FLOAT32)); + out = XT_SUB_S(x2, a0_7); + + check = 0; + + if(out == 0) + check = 1; + + uint8_t store = AE_MOVAB(check); + *p_c++ = store; + } + } + } +} +#endif + +#if !HAVE_VFPU +DISCARD_FUN_FOR_NONVOID_RETURN( + WORD32, xa_nn_elm_greaterequal_broadcast_4D_f32xf32_f32, + ( + WORD8 * p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * p_inp1, + const WORD32 *const p_inp1_shape, + const FLOAT32 * p_inp2, + const WORD32 *const p_inp2_shape, + WORD32 kernel_type + ) + ) +#else +WORD32 xa_nn_elm_greater_lesser_equal_broadcast_4D_f32xf32_f32(WORD8 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp1, + const WORD32 *const p_inp1_shape, + const FLOAT32 * __restrict__ p_inp2, + const WORD32 *const p_inp2_shape, + WORD32 kernel_type) +{ + /* NULL pointer checks */ + XA_NNLIB_ARG_CHK_PTR(p_out, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp1, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp2, -1); + XA_NNLIB_ARG_CHK_PTR(p_out_shape, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp1_shape, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp2_shape, -1); + /* Pointer alignment checks */ + XA_NNLIB_ARG_CHK_ALIGN(p_out, sizeof(UWORD8), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp1, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp2, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_out_shape, sizeof(WORD32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp1_shape, sizeof(WORD32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp2_shape, sizeof(WORD32), -1); + + /* Check shapes */ + int i; + xtbool sign_flag; + for(i = 0; i < 4; i++) + { + if((p_inp1_shape[i] != p_inp2_shape[i] && p_inp1_shape[i] != 1 && p_inp2_shape[i] != 1) || + (p_out_shape[i] != (p_inp1_shape[i] > p_inp2_shape[i] ? p_inp1_shape[i] : p_inp2_shape[i]))) + { + return -1; + } + } + + WORD32 inp1_strides[4], inp2_strides[4]; + inp1_strides[3] = 1; + inp2_strides[3] = 1; + for(i = 2; i >= 0; i--) + { + ae_int32x2 d_str, d_shape; + d_str = AE_MOVDA32X2(inp1_strides[i + 1], inp2_strides[i + 1]); + d_shape = AE_MOVDA32X2(p_inp1_shape[i + 1], p_inp2_shape[i + 1]); + d_str = AE_MULP32X2(d_str, d_shape); + inp1_strides[i] = AE_MOVAD32_H(d_str); + inp2_strides[i] = AE_MOVAD32_L(d_str); + } + + int need_broadcast = 0; + int inp1_const = 1, inp2_const = 1; + for(i = 0; i < 4; i++) + { + if(p_inp1_shape[i] != p_inp2_shape[i]) + { + if(p_inp1_shape[i] == 1) + inp1_strides[i] = 0; + else + inp2_strides[i] = 0; + + need_broadcast = 1; + } + if(p_inp1_shape[i] != 1) + inp1_const &= 0; + if(p_inp2_shape[i] != 1) + inp2_const &= 0; + } + int itr0, itr1, itr2; + + UWORD8 *p_out_tmp = p_out; + const FLOAT32 *__restrict__ p_inp1_tmp = p_inp1; + const FLOAT32 *__restrict__ p_inp2_tmp = p_inp2; + if(need_broadcast == 0) + { + sign_flag = 0; + internal_elm_greater_lesser_equal_broadcast_2D_f32xf32_f32( + p_out, + p_inp1, + p_inp2, + 1, + p_out_shape[0] * inp1_strides[0], + sign_flag, + kernel_type); + } + else if(inp1_strides[3] == inp2_strides[3]) + { + WORD32 in_lc, out_lc; + sign_flag = 0; + in_lc = p_out_shape[2] * p_out_shape[3]; + out_lc = 1; + if(inp1_strides[2] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[2]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + else if(inp2_strides[2] == 0) + { + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + internal_elm_greater_lesser_equal_broadcast_2D_f32xf32_f32( + p_out_tmp, + p_inp1_tmp0, + p_inp2_tmp0, + out_lc, + in_lc, + sign_flag, + kernel_type); + p_out_tmp += in_lc * out_lc; + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + else if(inp1_const == 1 || inp2_const == 1) + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + sign_flag = 1; + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + } + internal_elm_greater_lesser_equal_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp, + p_inp2_tmp, + p_out_shape[0] * p_out_shape[1] * p_out_shape[2] * p_out_shape[3], + sign_flag, + kernel_type); + } + else + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[3]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + tmp_strides[2] = inp1_strides[2]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + inp1_strides[2] = inp2_strides[2]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + inp2_strides[2] = tmp_strides[2]; + } + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + const FLOAT32 *__restrict__ p_inp1_tmp1 = p_inp1_tmp0; + const FLOAT32 *__restrict__ p_inp2_tmp1 = p_inp2_tmp0; + for(itr2 = 0; itr2 < p_out_shape[2]; itr2++) + { + { + internal_elm_greater_lesser_equal_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp1, + p_inp2_tmp1, + p_out_shape[3], + sign_flag, + kernel_type); + } + p_out_tmp += p_out_shape[3]; + p_inp1_tmp1 += inp1_strides[2]; + p_inp2_tmp1 += inp2_strides[2]; + } + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + return 0; +} +#endif From 92c0e1dc38922c069cf3153485b8db19f6527c0c Mon Sep 17 00:00:00 2001 From: Nishak Date: Sun, 8 Dec 2024 21:42:45 -0800 Subject: [PATCH 2/2] updating xor remainder loop --- .../hifi/third-party/nnlib/xa_nn_elm_logicalxor_bool_bool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_logicalxor_bool_bool.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_logicalxor_bool_bool.c index 752a25b682..66c7aa223b 100644 --- a/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_logicalxor_bool_bool.c +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_logicalxor_bool_bool.c @@ -45,7 +45,7 @@ WORD32 xa_nn_elm_logicalxor_boolxbool_bool(WORD8 * __restrict__ p_out, /* Remainder loop */ #pragma no_unroll for(i=remainder_start; i < N; i++){ - p_out[i] = p_inp1[i] & p_inp2[i]; + p_out[i] = p_inp1[i] ^ p_inp2[i]; } return 0;