From 6d22d50d3d967edbcf5999190dd7c88dd67f1ba2 Mon Sep 17 00:00:00 2001 From: Rushi-cad Date: Mon, 29 Jul 2024 05:33:31 -0700 Subject: [PATCH 1/4] Adding NNLIB optimized kernel for operator Div --- backends/cadence/hifi/kernels/CMakeLists.txt | 2 + backends/cadence/hifi/kernels/kernels.h | 29 + .../cadence/hifi/operators/CMakeLists.txt | 2 +- backends/cadence/hifi/operators/op_div.cpp | 362 +++++++++++++ .../nnlib/xa_nn_elm_div_broadcast_f32.c | 419 +++++++++++++++ .../nnlib/xa_nn_elm_floor_div_broadcast_f32.c | 495 ++++++++++++++++++ 6 files changed, 1308 insertions(+), 1 deletion(-) create mode 100644 backends/cadence/hifi/operators/op_div.cpp create mode 100644 backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_broadcast_f32.c create mode 100644 backends/cadence/hifi/third-party/nnlib/xa_nn_elm_floor_div_broadcast_f32.c diff --git a/backends/cadence/hifi/kernels/CMakeLists.txt b/backends/cadence/hifi/kernels/CMakeLists.txt index af2359a1a0..7bb143f917 100644 --- a/backends/cadence/hifi/kernels/CMakeLists.txt +++ b/backends/cadence/hifi/kernels/CMakeLists.txt @@ -9,6 +9,8 @@ add_library( cadence_kernels kernels.cpp ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/matmul_asym8uxasym8u_asym8u.cpp + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_broadcast_f32.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_floor_div_broadcast_f32.c ) target_include_directories( diff --git a/backends/cadence/hifi/kernels/kernels.h b/backends/cadence/hifi/kernels/kernels.h index f7b2147ca8..d10e52a77e 100644 --- a/backends/cadence/hifi/kernels/kernels.h +++ b/backends/cadence/hifi/kernels/kernels.h @@ -22,6 +22,35 @@ namespace kernels { void memcpy(void* dst, const void* src, size_t num_bytes); +extern "C" WORD32 xa_nn_elm_floor_div_f32xf32_f32( + FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm); + +extern "C" WORD32 xa_nn_elm_floor_div_broadcast_4D_f32xf32_f32( + FLOAT32 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp1, + const WORD32 *const p_inp1_shape, + const FLOAT32 * __restrict__ p_inp2, + const WORD32 *const p_inp2_shape); + +extern "C" WORD32 xa_nn_elm_div_broadcast_4D_f32xf32_f32( + FLOAT32 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp1, + const WORD32 *const p_inp1_shape, + const FLOAT32 * __restrict__ p_inp2, + const WORD32 *const p_inp2_shape); + +extern "C" WORD32 xa_nn_elm_div_f32xf32_f32( + FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm); + + WORD32 matmul_asym8uxasym8u_asym8u( UWORD8* __restrict__ p_out, // output uint8 matrix const UWORD8* __restrict__ p_mat1, // weight uint8 matrix diff --git a/backends/cadence/hifi/operators/CMakeLists.txt b/backends/cadence/hifi/operators/CMakeLists.txt index 943982b2da..a05dd61dc3 100644 --- a/backends/cadence/hifi/operators/CMakeLists.txt +++ b/backends/cadence/hifi/operators/CMakeLists.txt @@ -35,7 +35,7 @@ set(_aten_ops__srcs "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_bmm.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_cat.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_clone.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_div.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_div.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_mul.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_permute_copy.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_sigmoid.cpp" diff --git a/backends/cadence/hifi/operators/op_div.cpp b/backends/cadence/hifi/operators/op_div.cpp new file mode 100644 index 0000000000..b203bf1abb --- /dev/null +++ b/backends/cadence/hifi/operators/op_div.cpp @@ -0,0 +1,362 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "kernels.h" + +namespace torch { +namespace executor { +namespace native { + +namespace { + +ScalarType get_compute_type(ScalarType a_type, ScalarType b_type) { + if (isFloatingType(a_type) && isFloatingType(b_type)) { + return promoteTypes(a_type, b_type); + } else if (isFloatingType(a_type)) { + return a_type; + } else if (isFloatingType(b_type)) { + return b_type; + } + return ScalarType::Float; +} + +} // namespace + +Tensor& +div_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + + ET_KERNEL_CHECK( + ctx, + !isComplexType(a_type) && !isQIntType(a_type) && !isBitsType(a_type), + InvalidArgument, + out); + ET_KERNEL_CHECK( + ctx, + !isComplexType(b_type) && !isQIntType(b_type) && !isBitsType(b_type), + InvalidArgument, + out); + + ET_KERNEL_CHECK(ctx, tensor_is_real_type(out), InvalidArgument, out); + + if((a.scalar_type() == ScalarType::Float)||(b.scalar_type() == ScalarType::Float)) + { + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool any_is_broadcasted = (a_is_broadcasted || b_is_broadcasted); + + if(any_is_broadcasted) + { + FLOAT32 * __restrict__ p_out = (FLOAT32 * __restrict__ )out.mutable_data_ptr(); + const FLOAT32 * __restrict__ p_inp1 = (const FLOAT32 * __restrict__)a.const_data_ptr(); + const FLOAT32 * __restrict__ p_inp2 = (const FLOAT32 * __restrict__)b.const_data_ptr(); + + WORD32 p_out_shape[4]; + WORD32 p_inp1_shape[4]; + WORD32 p_inp2_shape[4]; + + for(int i = 0; i < 4; i++) + { + p_inp1_shape[i] = 1; + p_inp2_shape[i] = 1; + p_out_shape[i] = 1; + } + + int off_o = 4 - out.dim(); + int off_a = 4 - a.dim(); + int off_b = 4 - b.dim(); + + for(int i = 0; i < out.dim(); i++) + p_out_shape[i+off_o] = out.size(i); + for(int i = 0; i < a.dim(); i++) + p_inp1_shape[i+off_a] = a.size(i); + for(int i = 0; i < b.dim(); i++) + p_inp2_shape[i+off_b] = b.size(i); + + WORD32 val = xa_nn_elm_div_broadcast_4D_f32xf32_f32(p_out, + p_out_shape, + p_inp1, + p_inp1_shape, + p_inp2, + p_inp2_shape); + } + else + { + FLOAT32 * __restrict__ p_out = (FLOAT32 * __restrict__ )out.mutable_data_ptr(); + const FLOAT32 * __restrict__ p_inp1 = (const FLOAT32 * __restrict__)a.const_data_ptr(); + const FLOAT32 * __restrict__ p_inp2 = (const FLOAT32 * __restrict__)b.const_data_ptr(); + + WORD32 num_elm = out.numel(); + + WORD32 val = xa_nn_elm_div_f32xf32_f32(p_out, + p_inp1, + p_inp2, + num_elm); + } + } + else + { + ScalarType common_type = get_compute_type(a_type, b_type); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "div.out", CTYPE_A, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "div.out", CTYPE_B, [&]() { + ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out", CTYPE_IN, [&]() { + ET_SWITCH_FLOAT_TYPES(out_type, ctx, "div.out", CTYPE_OUT, [&]() { + apply_binary_elementwise_fn( + [](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + CTYPE_IN value = a_casted / b_casted; + + return static_cast(value); + }, + a, + b, + out); + }); + }); + }); + }); + } + + return out; +} + +Tensor& div_out_mode( + RuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + exec_aten::optional mode, + Tensor& out) { + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType common_type = get_compute_type(a_type, b_type); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, tensor_is_real_type(out), InvalidArgument, out); + + // Allow casting float -> integral here + // non-bool -> bool is still disallowed + ET_KERNEL_CHECK( + ctx, + !(common_type != ScalarType::Bool && out_type == ScalarType::Bool), + InvalidArgument, + out); + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "div.out_mode", CTYPE_A, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "div.out_mode", CTYPE_B, [&]() { + ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out_mode", CTYPE_IN, [&]() { + ET_SWITCH_REAL_TYPES(out_type, ctx, "div.out_mode", CTYPE_OUT, [&]() { + apply_binary_elementwise_fn( + [mode](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + CTYPE_IN value = a_casted / b_casted; + if (mode.has_value() && mode.value() == "trunc") { + value = std::trunc(value); + } else if (mode.has_value() && mode.value() == "floor") { + value = std::floor(value); + } + return static_cast(value); + }, + a, + b, + out); + }); + }); + }); + }); + + return out; +} + +Tensor& div_scalar_out( + RuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + Tensor& out) { + (void)ctx; + + // Resize for dynamic shape + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, a.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = utils::get_scalar_dtype(b); + ScalarType common_type = isFloatingType(a_type) ? a_type : ScalarType::Float; + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "div.Scalar_out", CTYPE_A, [&]() { + ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, "div.Scalar_out", CTYPE_B, [&]() { + ET_SWITCH_FLOAT_TYPES(out_type, ctx, "div.Scalar_out", CTYPE, [&]() { + CTYPE_B b_val; + utils::extract_scalar(b, &b_val); + CTYPE b_casted = static_cast(b_val); + + apply_unary_map_fn( + [b_casted](const CTYPE_A val_a) { + CTYPE a_casted = static_cast(val_a); + CTYPE value = a_casted / b_casted; + return static_cast(value); + }, + a.const_data_ptr(), + out.mutable_data_ptr(), + out.numel()); + }); + }); + }); + + return out; +} + +Tensor& div_scalar_mode_out( + RuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + exec_aten::optional mode, + Tensor& out) { + (void)ctx; + + // Resize for dynamic shape + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, a.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + if((a.scalar_type() == ScalarType::Float)||(b.scalar_type() == ScalarType::Float)) + { + + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool any_is_broadcasted = (a_is_broadcasted || b_is_broadcasted); + + if(any_is_broadcasted) + { + FLOAT32 * __restrict__ p_out = (FLOAT32 * __restrict__ )out.mutable_data_ptr(); + const FLOAT32 * __restrict__ p_inp1 = (const FLOAT32 * __restrict__)a.const_data_ptr(); + const FLOAT32 * __restrict__ p_inp2 = (const FLOAT32 * __restrict__)b.const_data_ptr(); + + WORD32 p_out_shape[4]; + WORD32 p_inp1_shape[4]; + WORD32 p_inp2_shape[4]; + + for(int i = 0; i < 4; i++) + { + p_inp1_shape[i] = 1; + p_inp2_shape[i] = 1; + p_out_shape[i] = 1; + } + + int off_o = 4 - out.dim(); + int off_a = 4 - a.dim(); + int off_b = 4 - b.dim(); + + for(int i = 0; i < out.dim(); i++) + p_out_shape[i+off_o] = out.size(i); + for(int i = 0; i < a.dim(); i++) + p_inp1_shape[i+off_a] = a.size(i); + for(int i = 0; i < b.dim(); i++) + p_inp2_shape[i+off_b] = b.size(i); + + WORD32 val = xa_nn_elm_floor_div_broadcast_4D_f32xf32_f32(p_out, + p_out_shape, + p_inp1, + p_inp1_shape, + p_inp2, + p_inp2_shape); + } + else + { + FLOAT32 * __restrict__ p_out = (FLOAT32 * __restrict__ )out.mutable_data_ptr(); + const FLOAT32 * __restrict__ p_inp1 = (const FLOAT32 * __restrict__)a.const_data_ptr(); + const FLOAT32 * __restrict__ p_inp2 = (const FLOAT32 * __restrict__)b.const_data_ptr(); + + WORD32 num_elm = out.numel(); + + WORD32 val = xa_nn_elm_floor_div_f32xf32_f32(p_out, + p_inp1, + p_inp2, + num_elm); + } + } + else + { + ScalarType a_type = a.scalar_type(); + ScalarType b_type = utils::get_scalar_dtype(b); + ScalarType common_type = utils::promote_type_with_scalar(a_type, b); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); + + constexpr auto name = "div.Scalar_mode_out"; + + ET_SWITCH_REALB_TYPES(a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, name, CTYPE_B, [&]() { + ET_SWITCH_REAL_TYPES(out_type, ctx, name, CTYPE, [&]() { + CTYPE_B b_val; + utils::extract_scalar(b, &b_val); + CTYPE b_casted = static_cast(b_val); + + apply_unary_map_fn( + [b_casted, mode](const CTYPE_A val_a) { + CTYPE a_casted = static_cast(val_a); + CTYPE value = a_casted / b_casted; + if (mode.has_value() && mode.value() == "trunc") { + value = std::trunc(value); + } else if (mode.has_value() && mode.value() == "floor") { + value = utils::floor_divide(a_casted, b_casted); + } + return value; + }, + a.const_data_ptr(), + out.mutable_data_ptr(), + out.numel()); + }); + }); + }); + } + + + return out; +} + +} // namespace native +} // namespace executor +} // namespace torch diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_broadcast_f32.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_broadcast_f32.c new file mode 100644 index 0000000000..03b8d62518 --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_broadcast_f32.c @@ -0,0 +1,419 @@ +#include "xa_type_def.h" +#include "xa_nnlib_common_fpu.h" +#include "xa_nn_common.h" +#include "xa_nnlib_err_chk.h" +//#include "xa_nn_basic_state.h" +#include "xa_nnlib_kernels_api.h" + +#if HAVE_VFPU +static void internal_elm_div_broadcast_2D_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 out_lc, + WORD32 in_lc, + xtbool sign_flag) +{ + int i, j; + + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + int num_simd2_ops; + int num_scalar_ops; + + if(out_lc) + { + num_simd2_ops = in_lc >> 1; + num_scalar_ops = in_lc & 1; + } + else + { + num_simd2_ops = (in_lc >> 2) << 1; + num_scalar_ops = in_lc & 3; + } + + xtfloatx2 x1, x2, y; + xtfloat a0, b0, c0; + + /* For computing inp2 - inp1 */ + if(sign_flag){ + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_DIV_SX2(x2, x1); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_DIV_SX2(x2, x1); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_DIV_S(b0, a0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } + /* For computing inp1 - inp2 */ + else + { + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_DIV_SX2(x1, x2); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_DIV_SX2(x1, x2); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_DIV_S(a0, b0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } +} + +static void internal_elm_div_broadcast_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm, + xtbool sign_flag) +{ + int i; + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + const int num_simd2_ops = num_elm >> 1; + const int num_scalar_ops = num_elm & 1; + + xtfloat a0_7, out; + xtfloatx2 x1, x2, y; + x2 = XT_LSI((xtfloat *)p_b, 0); + + /* For computing inp2 - inp1 */ + if(sign_flag){ + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i p_inp2_shape[i] ? p_inp1_shape[i] : p_inp2_shape[i]))) + { + return -1; + } + } + + WORD32 inp1_strides[4], inp2_strides[4]; + inp1_strides[3] = 1; + inp2_strides[3] = 1; + for(i = 2; i >= 0; i--) + { + ae_int32x2 d_str, d_shape; + d_str = AE_MOVDA32X2(inp1_strides[i + 1], inp2_strides[i + 1]); + d_shape = AE_MOVDA32X2(p_inp1_shape[i + 1], p_inp2_shape[i + 1]); + d_str = AE_MULP32X2(d_str, d_shape); + inp1_strides[i] = AE_MOVAD32_H(d_str); + inp2_strides[i] = AE_MOVAD32_L(d_str); + } + + int need_broadcast = 0; + int inp1_const = 1, inp2_const = 1; + for(i = 0; i < 4; i++) + { + if(p_inp1_shape[i] != p_inp2_shape[i]) + { + if(p_inp1_shape[i] == 1) + inp1_strides[i] = 0; + else + inp2_strides[i] = 0; + + need_broadcast = 1; + } + if(p_inp1_shape[i] != 1) + inp1_const &= 0; + if(p_inp2_shape[i] != 1) + inp2_const &= 0; + } + int itr0, itr1, itr2; + + FLOAT32 *p_out_tmp = p_out; + const FLOAT32 *__restrict__ p_inp1_tmp = p_inp1; + const FLOAT32 *__restrict__ p_inp2_tmp = p_inp2; + if(need_broadcast == 0) + { + sign_flag = 0; + internal_elm_div_broadcast_2D_f32xf32_f32( + p_out, + p_inp1, + p_inp2, + 1, + p_out_shape[0] * inp1_strides[0], + sign_flag); + } + else if(inp1_strides[3] == inp2_strides[3]) + { + WORD32 in_lc, out_lc; + sign_flag = 0; + in_lc = p_out_shape[2] * p_out_shape[3]; + out_lc = 1; + if(inp1_strides[2] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[2]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + else if(inp2_strides[2] == 0) + { + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + internal_elm_div_broadcast_2D_f32xf32_f32( + p_out_tmp, + p_inp1_tmp0, + p_inp2_tmp0, + out_lc, + in_lc, + sign_flag); + p_out_tmp += in_lc * out_lc; + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + else if(inp1_const == 1 || inp2_const == 1) + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + sign_flag = 1; + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + } + internal_elm_div_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp, + p_inp2_tmp, + p_out_shape[0] * p_out_shape[1] * p_out_shape[2] * p_out_shape[3], + sign_flag); + } + else + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[3]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + tmp_strides[2] = inp1_strides[2]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + inp1_strides[2] = inp2_strides[2]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + inp2_strides[2] = tmp_strides[2]; + } + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + const FLOAT32 *__restrict__ p_inp1_tmp1 = p_inp1_tmp0; + const FLOAT32 *__restrict__ p_inp2_tmp1 = p_inp2_tmp0; + for(itr2 = 0; itr2 < p_out_shape[2]; itr2++) + { + { + internal_elm_div_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp1, + p_inp2_tmp1, + p_out_shape[3], + sign_flag); + } + p_out_tmp += p_out_shape[3]; + p_inp1_tmp1 += inp1_strides[2]; + p_inp2_tmp1 += inp2_strides[2]; + } + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + return 0; +} +#endif diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_floor_div_broadcast_f32.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_floor_div_broadcast_f32.c new file mode 100644 index 0000000000..8f419d2272 --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_floor_div_broadcast_f32.c @@ -0,0 +1,495 @@ +#include "xa_type_def.h" +#include "xa_nnlib_common_fpu.h" +#include "xa_nn_common.h" +#include "xa_nnlib_err_chk.h" +//#include "xa_nn_basic_state.h" +#include "xa_nnlib_kernels_api.h" + +#if !HAVE_VFPU + DISCARD_FUN_FOR_NONVOID_RETURN( + WORD32, xa_nn_elm_floor_div_f32xf32_f32, + ( + FLOAT32 *p_out, + const FLOAT32 *p_inp1, + const FLOAT32 *p_inp2, + WORD32 num_elm + ) + ) +#else +WORD32 xa_nn_elm_floor_div_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm) +{ + /* NULL pointer checks */ + XA_NNLIB_ARG_CHK_PTR(p_out, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp1, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp2, -1); + /* Pointer alignment checks */ + XA_NNLIB_ARG_CHK_ALIGN(p_out, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp1, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp2, sizeof(FLOAT32), -1); + /* Basic Parameter checks */ + XA_NNLIB_ARG_CHK_COND((num_elm <= 0), -1); + + int i; + xtfloatx2 *inp1 = (xtfloatx2 *)p_inp1; + xtfloatx2 *inp2 = (xtfloatx2 *)p_inp2; + xtfloatx2 *out = (xtfloatx2 *)p_out; + xtfloatx2 x1, x2, y; + ae_valign inp1_a, inp2_a, out_a; + + inp1_a = XT_LASX2PP(inp1); + inp2_a = XT_LASX2PP(inp2); + out_a = AE_ZALIGN64(); + /* Each iteration of loop is independent so safe to use concurrent pragma */ +#pragma concurrent + for(i=0;i < num_elm>>1;i++) + { + XT_LASX2IP(x1, inp1_a, inp1); + XT_LASX2IP(x2, inp2_a, inp2); + y = XT_DIV_SX2(x1, x2); + y = FIFLOOR_SX2(y); + XT_SASX2IP(y, out_a, out); + } + XT_SASX2POSFP(out_a, out); + + // Remainder Loop + if (num_elm & 1) + { + xtfloat a1, a2, a; + XT_LSIP(a1, (xtfloat *)inp1, 0); + XT_LSIP(a2, (xtfloat *)inp2, 0); + a = XT_DIV_S(a1, a2); + a = FIFLOOR_S(a); + XT_SSI(a, (xtfloat *)out, 0); + } + + return 0; +} +#endif + +#if HAVE_VFPU +static void internal_elm_floor_div_broadcast_2D_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 out_lc, + WORD32 in_lc, + xtbool sign_flag) +{ + int i, j; + + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + int num_simd2_ops; + int num_scalar_ops; + + if(out_lc) + { + num_simd2_ops = in_lc >> 1; + num_scalar_ops = in_lc & 1; + } + else + { + num_simd2_ops = (in_lc >> 2) << 1; + num_scalar_ops = in_lc & 3; + } + + xtfloatx2 x1, x2, y; + xtfloat a0, b0, c0; + + /* For computing inp2 - inp1 */ + if(sign_flag){ + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_DIV_SX2(x2, x1); + y = FIFLOOR_SX2(y); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_DIV_SX2(x2, x1); + y = FIFLOOR_SX2(y); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_DIV_S(b0, a0); + c0 = FIFLOOR_S(c0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } + /* For computing inp1 - inp2 */ + else + { + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_DIV_SX2(x1, x2); + y = FIFLOOR_SX2(y); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_DIV_SX2(x1, x2); + y = FIFLOOR_SX2(y); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_DIV_S(a0, b0); + c0 = FIFLOOR_S(c0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } +} + +static void internal_elm_floor_div_broadcast_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm, + xtbool sign_flag) +{ + int i; + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + const int num_simd2_ops = num_elm >> 1; + const int num_scalar_ops = num_elm & 1; + + xtfloat a0_7, out; + xtfloatx2 x1, x2, y; + x2 = XT_LSI((xtfloat *)p_b, 0); + + /* For computing inp2 - inp1 */ + if(sign_flag){ + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i p_inp2_shape[i] ? p_inp1_shape[i] : p_inp2_shape[i]))) + { + return -1; + } + } + + WORD32 inp1_strides[4], inp2_strides[4]; + inp1_strides[3] = 1; + inp2_strides[3] = 1; + for(i = 2; i >= 0; i--) + { + ae_int32x2 d_str, d_shape; + d_str = AE_MOVDA32X2(inp1_strides[i + 1], inp2_strides[i + 1]); + d_shape = AE_MOVDA32X2(p_inp1_shape[i + 1], p_inp2_shape[i + 1]); + d_str = AE_MULP32X2(d_str, d_shape); + inp1_strides[i] = AE_MOVAD32_H(d_str); + inp2_strides[i] = AE_MOVAD32_L(d_str); + } + + int need_broadcast = 0; + int inp1_const = 1, inp2_const = 1; + for(i = 0; i < 4; i++) + { + if(p_inp1_shape[i] != p_inp2_shape[i]) + { + if(p_inp1_shape[i] == 1) + inp1_strides[i] = 0; + else + inp2_strides[i] = 0; + + need_broadcast = 1; + } + if(p_inp1_shape[i] != 1) + inp1_const &= 0; + if(p_inp2_shape[i] != 1) + inp2_const &= 0; + } + int itr0, itr1, itr2; + + FLOAT32 *p_out_tmp = p_out; + const FLOAT32 *__restrict__ p_inp1_tmp = p_inp1; + const FLOAT32 *__restrict__ p_inp2_tmp = p_inp2; + if(need_broadcast == 0) + { + sign_flag = 0; + internal_elm_floor_div_broadcast_2D_f32xf32_f32( + p_out, + p_inp1, + p_inp2, + 1, + p_out_shape[0] * inp1_strides[0], + sign_flag); + } + else if(inp1_strides[3] == inp2_strides[3]) + { + WORD32 in_lc, out_lc; + sign_flag = 0; + in_lc = p_out_shape[2] * p_out_shape[3]; + out_lc = 1; + if(inp1_strides[2] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[2]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + else if(inp2_strides[2] == 0) + { + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + internal_elm_floor_div_broadcast_2D_f32xf32_f32( + p_out_tmp, + p_inp1_tmp0, + p_inp2_tmp0, + out_lc, + in_lc, + sign_flag); + p_out_tmp += in_lc * out_lc; + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + else if(inp1_const == 1 || inp2_const == 1) + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + sign_flag = 1; + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + } + internal_elm_floor_div_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp, + p_inp2_tmp, + p_out_shape[0] * p_out_shape[1] * p_out_shape[2] * p_out_shape[3], + sign_flag); + } + else + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[3]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + tmp_strides[2] = inp1_strides[2]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + inp1_strides[2] = inp2_strides[2]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + inp2_strides[2] = tmp_strides[2]; + } + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + const FLOAT32 *__restrict__ p_inp1_tmp1 = p_inp1_tmp0; + const FLOAT32 *__restrict__ p_inp2_tmp1 = p_inp2_tmp0; + for(itr2 = 0; itr2 < p_out_shape[2]; itr2++) + { + { + internal_elm_floor_div_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp1, + p_inp2_tmp1, + p_out_shape[3], + sign_flag); + } + p_out_tmp += p_out_shape[3]; + p_inp1_tmp1 += inp1_strides[2]; + p_inp2_tmp1 += inp2_strides[2]; + } + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + return 0; +} +#endif From f4bf4250c7a891527af2a21a690c4085b6d8c980 Mon Sep 17 00:00:00 2001 From: Rushi-cad Date: Mon, 29 Jul 2024 22:39:32 -0700 Subject: [PATCH 2/4] Adding NNLIB optimized kernel for operator Div and Floor_Div --- backends/cadence/hifi/kernels/kernels.h | 19 +- backends/cadence/hifi/operators/op_div.cpp | 215 ++++++++++----------- 2 files changed, 113 insertions(+), 121 deletions(-) diff --git a/backends/cadence/hifi/kernels/kernels.h b/backends/cadence/hifi/kernels/kernels.h index d10e52a77e..54d1914b89 100644 --- a/backends/cadence/hifi/kernels/kernels.h +++ b/backends/cadence/hifi/kernels/kernels.h @@ -14,13 +14,6 @@ /* For NNLIB APIs */ #include "xa_nnlib_kernels_api.h" - - -namespace impl { -namespace HiFi { -namespace kernels { - -void memcpy(void* dst, const void* src, size_t num_bytes); extern "C" WORD32 xa_nn_elm_floor_div_f32xf32_f32( FLOAT32 * __restrict__ p_out, @@ -43,13 +36,13 @@ extern "C" WORD32 xa_nn_elm_div_broadcast_4D_f32xf32_f32( const WORD32 *const p_inp1_shape, const FLOAT32 * __restrict__ p_inp2, const WORD32 *const p_inp2_shape); + -extern "C" WORD32 xa_nn_elm_div_f32xf32_f32( - FLOAT32 * __restrict__ p_out, - const FLOAT32 * __restrict__ p_inp1, - const FLOAT32 * __restrict__ p_inp2, - WORD32 num_elm); - +namespace impl { +namespace HiFi { +namespace kernels { + +void memcpy(void* dst, const void* src, size_t num_bytes); WORD32 matmul_asym8uxasym8u_asym8u( UWORD8* __restrict__ p_out, // output uint8 matrix diff --git a/backends/cadence/hifi/operators/op_div.cpp b/backends/cadence/hifi/operators/op_div.cpp index b203bf1abb..60634de930 100644 --- a/backends/cadence/hifi/operators/op_div.cpp +++ b/backends/cadence/hifi/operators/op_div.cpp @@ -171,30 +171,89 @@ Tensor& div_out_mode( !(common_type != ScalarType::Bool && out_type == ScalarType::Bool), InvalidArgument, out); + + if(common_type == ScalarType::Float) + { + + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool any_is_broadcasted = (a_is_broadcasted || b_is_broadcasted); - ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "div.out_mode", CTYPE_A, [&]() { - ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "div.out_mode", CTYPE_B, [&]() { - ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out_mode", CTYPE_IN, [&]() { - ET_SWITCH_REAL_TYPES(out_type, ctx, "div.out_mode", CTYPE_OUT, [&]() { - apply_binary_elementwise_fn( - [mode](const CTYPE_A val_a, const CTYPE_B val_b) { - CTYPE_IN a_casted = static_cast(val_a); - CTYPE_IN b_casted = static_cast(val_b); - CTYPE_IN value = a_casted / b_casted; - if (mode.has_value() && mode.value() == "trunc") { - value = std::trunc(value); - } else if (mode.has_value() && mode.value() == "floor") { - value = std::floor(value); - } - return static_cast(value); - }, - a, - b, - out); + if(any_is_broadcasted) + { + FLOAT32 * __restrict__ p_out = (FLOAT32 * __restrict__ )out.mutable_data_ptr(); + const FLOAT32 * __restrict__ p_inp1 = (const FLOAT32 * __restrict__)a.const_data_ptr(); + const FLOAT32 * __restrict__ p_inp2 = (const FLOAT32 * __restrict__)b.const_data_ptr(); + + WORD32 p_out_shape[4]; + WORD32 p_inp1_shape[4]; + WORD32 p_inp2_shape[4]; + + for(int i = 0; i < 4; i++) + { + p_inp1_shape[i] = 1; + p_inp2_shape[i] = 1; + p_out_shape[i] = 1; + } + + int off_o = 4 - out.dim(); + int off_a = 4 - a.dim(); + int off_b = 4 - b.dim(); + + for(int i = 0; i < out.dim(); i++) + p_out_shape[i+off_o] = out.size(i); + for(int i = 0; i < a.dim(); i++) + p_inp1_shape[i+off_a] = a.size(i); + for(int i = 0; i < b.dim(); i++) + p_inp2_shape[i+off_b] = b.size(i); + + WORD32 val = xa_nn_elm_floor_div_broadcast_4D_f32xf32_f32(p_out, + p_out_shape, + p_inp1, + p_inp1_shape, + p_inp2, + p_inp2_shape); + } + else + { + FLOAT32 * __restrict__ p_out = (FLOAT32 * __restrict__ )out.mutable_data_ptr(); + const FLOAT32 * __restrict__ p_inp1 = (const FLOAT32 * __restrict__)a.const_data_ptr(); + const FLOAT32 * __restrict__ p_inp2 = (const FLOAT32 * __restrict__)b.const_data_ptr(); + + WORD32 num_elm = out.numel(); + + WORD32 val = xa_nn_elm_floor_div_f32xf32_f32(p_out, + p_inp1, + p_inp2, + num_elm); + } + } + else + { + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "div.out_mode", CTYPE_A, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "div.out_mode", CTYPE_B, [&]() { + ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out_mode", CTYPE_IN, [&]() { + ET_SWITCH_REAL_TYPES(out_type, ctx, "div.out_mode", CTYPE_OUT, [&]() { + apply_binary_elementwise_fn( + [mode](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + CTYPE_IN value = a_casted / b_casted; + if (mode.has_value() && mode.value() == "trunc") { + value = std::trunc(value); + } else if (mode.has_value() && mode.value() == "floor") { + value = std::floor(value); + } + return static_cast(value); + }, + a, + b, + out); + }); }); }); }); - }); + } return out; } @@ -259,100 +318,40 @@ Tensor& div_scalar_mode_out( InvalidArgument, out, "Failed to resize output tensor."); - - if((a.scalar_type() == ScalarType::Float)||(b.scalar_type() == ScalarType::Float)) - { - - const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); - const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); - const bool any_is_broadcasted = (a_is_broadcasted || b_is_broadcasted); - - if(any_is_broadcasted) - { - FLOAT32 * __restrict__ p_out = (FLOAT32 * __restrict__ )out.mutable_data_ptr(); - const FLOAT32 * __restrict__ p_inp1 = (const FLOAT32 * __restrict__)a.const_data_ptr(); - const FLOAT32 * __restrict__ p_inp2 = (const FLOAT32 * __restrict__)b.const_data_ptr(); - - WORD32 p_out_shape[4]; - WORD32 p_inp1_shape[4]; - WORD32 p_inp2_shape[4]; - - for(int i = 0; i < 4; i++) - { - p_inp1_shape[i] = 1; - p_inp2_shape[i] = 1; - p_out_shape[i] = 1; - } - - int off_o = 4 - out.dim(); - int off_a = 4 - a.dim(); - int off_b = 4 - b.dim(); - - for(int i = 0; i < out.dim(); i++) - p_out_shape[i+off_o] = out.size(i); - for(int i = 0; i < a.dim(); i++) - p_inp1_shape[i+off_a] = a.size(i); - for(int i = 0; i < b.dim(); i++) - p_inp2_shape[i+off_b] = b.size(i); - - WORD32 val = xa_nn_elm_floor_div_broadcast_4D_f32xf32_f32(p_out, - p_out_shape, - p_inp1, - p_inp1_shape, - p_inp2, - p_inp2_shape); - } - else - { - FLOAT32 * __restrict__ p_out = (FLOAT32 * __restrict__ )out.mutable_data_ptr(); - const FLOAT32 * __restrict__ p_inp1 = (const FLOAT32 * __restrict__)a.const_data_ptr(); - const FLOAT32 * __restrict__ p_inp2 = (const FLOAT32 * __restrict__)b.const_data_ptr(); - - WORD32 num_elm = out.numel(); - - WORD32 val = xa_nn_elm_floor_div_f32xf32_f32(p_out, - p_inp1, - p_inp2, - num_elm); - } - } - else - { - ScalarType a_type = a.scalar_type(); - ScalarType b_type = utils::get_scalar_dtype(b); - ScalarType common_type = utils::promote_type_with_scalar(a_type, b); - ScalarType out_type = out.scalar_type(); - ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); + ScalarType a_type = a.scalar_type(); + ScalarType b_type = utils::get_scalar_dtype(b); + ScalarType common_type = utils::promote_type_with_scalar(a_type, b); + ScalarType out_type = out.scalar_type(); - constexpr auto name = "div.Scalar_mode_out"; + ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); - ET_SWITCH_REALB_TYPES(a_type, ctx, name, CTYPE_A, [&]() { - ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, name, CTYPE_B, [&]() { - ET_SWITCH_REAL_TYPES(out_type, ctx, name, CTYPE, [&]() { - CTYPE_B b_val; - utils::extract_scalar(b, &b_val); - CTYPE b_casted = static_cast(b_val); + constexpr auto name = "div.Scalar_mode_out"; - apply_unary_map_fn( - [b_casted, mode](const CTYPE_A val_a) { - CTYPE a_casted = static_cast(val_a); - CTYPE value = a_casted / b_casted; - if (mode.has_value() && mode.value() == "trunc") { - value = std::trunc(value); - } else if (mode.has_value() && mode.value() == "floor") { - value = utils::floor_divide(a_casted, b_casted); - } - return value; - }, - a.const_data_ptr(), - out.mutable_data_ptr(), - out.numel()); - }); + ET_SWITCH_REALB_TYPES(a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, name, CTYPE_B, [&]() { + ET_SWITCH_REAL_TYPES(out_type, ctx, name, CTYPE, [&]() { + CTYPE_B b_val; + utils::extract_scalar(b, &b_val); + CTYPE b_casted = static_cast(b_val); + + apply_unary_map_fn( + [b_casted, mode](const CTYPE_A val_a) { + CTYPE a_casted = static_cast(val_a); + CTYPE value = a_casted / b_casted; + if (mode.has_value() && mode.value() == "trunc") { + value = std::trunc(value); + } else if (mode.has_value() && mode.value() == "floor") { + value = utils::floor_divide(a_casted, b_casted); + } + return value; + }, + a.const_data_ptr(), + out.mutable_data_ptr(), + out.numel()); }); }); - } - + }); return out; } From 87aa38919661f0712d8673228da943da76d1a596 Mon Sep 17 00:00:00 2001 From: Rushi-cad Date: Tue, 30 Jul 2024 00:42:33 -0700 Subject: [PATCH 3/4] Added macro --- backends/cadence/hifi/operators/op_div.cpp | 30 ++++++++++++---------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/backends/cadence/hifi/operators/op_div.cpp b/backends/cadence/hifi/operators/op_div.cpp index 60634de930..536d2105eb 100644 --- a/backends/cadence/hifi/operators/op_div.cpp +++ b/backends/cadence/hifi/operators/op_div.cpp @@ -19,6 +19,8 @@ namespace torch { namespace executor { namespace native { + +#define NNLIB_MAX_DIM 4 /* Add fallback if broadcast and dim > 4 */ namespace { @@ -71,20 +73,20 @@ div_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { const FLOAT32 * __restrict__ p_inp1 = (const FLOAT32 * __restrict__)a.const_data_ptr(); const FLOAT32 * __restrict__ p_inp2 = (const FLOAT32 * __restrict__)b.const_data_ptr(); - WORD32 p_out_shape[4]; - WORD32 p_inp1_shape[4]; - WORD32 p_inp2_shape[4]; + WORD32 p_out_shape[NNLIB_MAX_DIM]; + WORD32 p_inp1_shape[NNLIB_MAX_DIM]; + WORD32 p_inp2_shape[NNLIB_MAX_DIM]; - for(int i = 0; i < 4; i++) + for(int i = 0; i < NNLIB_MAX_DIM; i++) { p_inp1_shape[i] = 1; p_inp2_shape[i] = 1; p_out_shape[i] = 1; } - int off_o = 4 - out.dim(); - int off_a = 4 - a.dim(); - int off_b = 4 - b.dim(); + int off_o = NNLIB_MAX_DIM - out.dim(); + int off_a = NNLIB_MAX_DIM - a.dim(); + int off_b = NNLIB_MAX_DIM - b.dim(); for(int i = 0; i < out.dim(); i++) p_out_shape[i+off_o] = out.size(i); @@ -185,20 +187,20 @@ Tensor& div_out_mode( const FLOAT32 * __restrict__ p_inp1 = (const FLOAT32 * __restrict__)a.const_data_ptr(); const FLOAT32 * __restrict__ p_inp2 = (const FLOAT32 * __restrict__)b.const_data_ptr(); - WORD32 p_out_shape[4]; - WORD32 p_inp1_shape[4]; - WORD32 p_inp2_shape[4]; + WORD32 p_out_shape[NNLIB_MAX_DIM]; + WORD32 p_inp1_shape[NNLIB_MAX_DIM]; + WORD32 p_inp2_shape[NNLIB_MAX_DIM]; - for(int i = 0; i < 4; i++) + for(int i = 0; i < NNLIB_MAX_DIM; i++) { p_inp1_shape[i] = 1; p_inp2_shape[i] = 1; p_out_shape[i] = 1; } - int off_o = 4 - out.dim(); - int off_a = 4 - a.dim(); - int off_b = 4 - b.dim(); + int off_o = NNLIB_MAX_DIM - out.dim(); + int off_a = NNLIB_MAX_DIM - a.dim(); + int off_b = NNLIB_MAX_DIM - b.dim(); for(int i = 0; i < out.dim(); i++) p_out_shape[i+off_o] = out.size(i); From ba660c1bb9589978bdc7edcd7582cc70cbd1be37 Mon Sep 17 00:00:00 2001 From: Rushi-cad Date: Tue, 30 Jul 2024 04:16:44 -0700 Subject: [PATCH 4/4] Integrating NNLIB optimization for rsqrt op --- backends/cadence/aot/functions_hifi.yaml | 5 ++ .../cadence/hifi/operators/CMakeLists.txt | 4 +- backends/cadence/hifi/operators/op_rsqrt.cpp | 50 +++++++++++++++++++ 3 files changed, 58 insertions(+), 1 deletion(-) create mode 100644 backends/cadence/hifi/operators/op_rsqrt.cpp diff --git a/backends/cadence/aot/functions_hifi.yaml b/backends/cadence/aot/functions_hifi.yaml index f9d75ef56b..492456ce98 100644 --- a/backends/cadence/aot/functions_hifi.yaml +++ b/backends/cadence/aot/functions_hifi.yaml @@ -101,6 +101,11 @@ kernels: - arg_meta: null kernel_name: torch::executor::where_out + +- op: rsqrt.out + kernels: + - arg_meta: null + kernel_name: torch::executor::rsqrt_out # custom ops - func: cadence::quantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) diff --git a/backends/cadence/hifi/operators/CMakeLists.txt b/backends/cadence/hifi/operators/CMakeLists.txt index a05dd61dc3..bf533bf887 100644 --- a/backends/cadence/hifi/operators/CMakeLists.txt +++ b/backends/cadence/hifi/operators/CMakeLists.txt @@ -36,6 +36,7 @@ set(_aten_ops__srcs "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_cat.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_clone.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_div.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_rsqrt.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_mul.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_permute_copy.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_sigmoid.cpp" @@ -44,7 +45,8 @@ set(_aten_ops__srcs "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_split_with_sizes_copy.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_sub.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_to_copy.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_where.cpp") + "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_where.cpp" + "${EXECUTORCH_ROOT}/kernels/portable/cpu/pattern/unary_ufunc_realhb_to_floath.cpp") add_library(aten_ops_cadence ${_aten_ops__srcs}) target_link_libraries(aten_ops_cadence PUBLIC executorch) target_link_libraries(aten_ops_cadence PRIVATE cadence_kernels) diff --git a/backends/cadence/hifi/operators/op_rsqrt.cpp b/backends/cadence/hifi/operators/op_rsqrt.cpp new file mode 100644 index 0000000000..e5e60bf783 --- /dev/null +++ b/backends/cadence/hifi/operators/op_rsqrt.cpp @@ -0,0 +1,50 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include + +#include "kernels.h" + +namespace torch { +namespace executor { +namespace native { +namespace { + +double rsqrt(double x) { + return 1.0 / std::sqrt(x); +} + +} // namespace + +Tensor& rsqrt_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { + + if(in.scalar_type() == ScalarType::Float) + { + WORD32 num_elm = out.numel(); + + FLOAT32 * __restrict__ p_out = (FLOAT32 * __restrict__ )out.mutable_data_ptr(); + const FLOAT32 * __restrict__ p_inp = (const FLOAT32 * __restrict__)in.const_data_ptr(); + + xa_nn_elm_rsqrt_f32_f32( + p_out, + p_inp, + num_elm); + + return out; + } + else + { + return internal::unary_ufunc_realhb_to_floath(rsqrt, ctx, in, out); + } + +} + +} // namespace native +} // namespace executor +} // namespace torch