diff --git a/backends/cadence/aot/functions_hifi.yaml b/backends/cadence/aot/functions_hifi.yaml index e7dde55684..84205afe32 100644 --- a/backends/cadence/aot/functions_hifi.yaml +++ b/backends/cadence/aot/functions_hifi.yaml @@ -37,6 +37,11 @@ - arg_meta: null kernel_name: torch::executor::cat_out +- op: clamp.Tensor_out + kernels: + - arg_meta: null + kernel_name: torch::executor::clamp_tensor_out + - op: clone.out kernels: - arg_meta: null @@ -62,6 +67,16 @@ - arg_meta: null kernel_name: torch::executor::full_out +- op: maximum.out + kernels: + - arg_meta: null + kernel_name: torch::executor::maximum_out + +- op: minimum.out + kernels: + - arg_meta: null + kernel_name: torch::executor::minimum_out + - op: mul.out kernels: - arg_meta: null diff --git a/backends/cadence/hifi/kernels/CMakeLists.txt b/backends/cadence/hifi/kernels/CMakeLists.txt index 989aa9ecf2..afec8fb3e0 100644 --- a/backends/cadence/hifi/kernels/CMakeLists.txt +++ b/backends/cadence/hifi/kernels/CMakeLists.txt @@ -9,10 +9,13 @@ add_library( cadence_kernels kernels.cpp ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/matmul_asym8uxasym8u_asym8u.cpp + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_broadcast_32_32.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_minimum_maximum_f32.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_add_f32_broadcast.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_mul_f32_broadcast.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_transpose_32.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_where_f32xf32_f32.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_clamp_f32.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_broadcast_f32.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_floor_div_broadcast_f32.c ) diff --git a/backends/cadence/hifi/kernels/kernels.h b/backends/cadence/hifi/kernels/kernels.h index 15b576ed78..7816cf6588 100644 --- a/backends/cadence/hifi/kernels/kernels.h +++ b/backends/cadence/hifi/kernels/kernels.h @@ -15,6 +15,58 @@ /* For NNLIB APIs */ #include "xa_nnlib_kernels_api.h" +extern "C" WORD32 xa_nn_broadcast_32_32( WORD32* __restrict__ p_out, /* pointer to write broadcasted output data to */ + const int *const out_shape, /* output shape resulting after broadcast */ + + WORD32* __restrict__ p_in, /* pointer to unextended input data */ + const int * const in_shape, /* input shape */ + int num_dims); + +extern "C" WORD32 xa_nn_elm_clamp_f32xf32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp, + const FLOAT32 * __restrict__ p_min, + const FLOAT32 * __restrict__ p_max, + WORD32 num_elm); + +extern "C" WORD32 xa_nn_elm_clamp_broadcast_4D_f32Xf32xf32_f32(FLOAT32 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp, + const WORD32 *const p_inp_shape, + const FLOAT32 * __restrict__ p_min, + const WORD32 *const p_min_shape, + const FLOAT32 * __restrict__ p_max, + const WORD32 *const p_max_shape + ); + +extern "C" WORD32 xa_nn_elm_maximum_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm); + +extern "C" WORD32 xa_nn_elm_minimum_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm); + +extern "C" WORD32 xa_nn_elm_where_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + const unsigned char *__restrict__ p_condition, + WORD32 num_elm); + +extern "C" WORD32 xa_nn_elm_maximum_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp1, + const WORD32 *const p_inp1_shape, + const FLOAT32 * __restrict__ p_inp2, + const WORD32 *const p_inp2_shape); + +extern "C" WORD32 xa_nn_elm_minimum_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp1, + const WORD32 *const p_inp1_shape, + const FLOAT32 * __restrict__ p_inp2, + const WORD32 *const p_inp2_shape); extern "C" WORD32 xa_nn_elm_floor_div_f32xf32_f32( FLOAT32 * __restrict__ p_out, @@ -39,7 +91,7 @@ extern "C" WORD32 xa_nn_elm_div_broadcast_4D_f32xf32_f32( const WORD32 *const p_inp2_shape); extern "C" WORD32 xa_nn_elm_where_f32xf32_f32(FLOAT32 * __restrict__ p_out, const FLOAT32 * __restrict__ p_inp1, const FLOAT32 * __restrict__ p_inp2, const unsigned char *__restrict__ p_condition, WORD32 num_elm); - + extern "C" WORD32 xa_nn_elm_where_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict__ p_out, const WORD32 *const p_out_shape, const FLOAT32 * __restrict__ p_inp1, const WORD32 *const p_inp1_shape, diff --git a/backends/cadence/hifi/operators/CMakeLists.txt b/backends/cadence/hifi/operators/CMakeLists.txt index fc63eba88e..4056958e3d 100644 --- a/backends/cadence/hifi/operators/CMakeLists.txt +++ b/backends/cadence/hifi/operators/CMakeLists.txt @@ -20,6 +20,9 @@ endif() # ATen compliant ops that are needed to run this model. set(_aten_ops__srcs + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_clamp.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_maximum.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_minimum.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_add.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_embedding.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_full.cpp" diff --git a/backends/cadence/hifi/operators/op_clamp.cpp b/backends/cadence/hifi/operators/op_clamp.cpp new file mode 100644 index 0000000000..00b7a2dca9 --- /dev/null +++ b/backends/cadence/hifi/operators/op_clamp.cpp @@ -0,0 +1,421 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include "kernels.h" + +#define NNLIB_MAX_DIM 4 + +namespace torch { +namespace executor { +namespace native { + +using Scalar = exec_aten::Scalar; +using ScalarType = exec_aten::ScalarType; +using Tensor = exec_aten::Tensor; + +namespace { + +template +/** Check if val, when cast to CTYPE_CAST, is not in the range of CTYPE_OUT */ +bool is_out_of_bounds(CTYPE_VAL val) { + const CTYPE_CAST val_cast = static_cast(val); + return val_cast < std::numeric_limits::lowest() || + val_cast > std::numeric_limits::max(); +} + +__ET_NODISCARD bool check_bounds( + const Scalar& val_scalar, + const torch::executor::native::ScalarType& val_type, + const torch::executor::native::ScalarType& out_type, + const char* val_name) { + auto is_valid = true; + + ET_SWITCH_SCALAR_OBJ_TYPES(val_type, ctx, "clamp.out", CTYPE_VAL, [&]() { + CTYPE_VAL val = 0; + utils::extract_scalar(val_scalar, &val); + if (isIntegralType(out_type, /*includeBool=*/false)) { + ET_SWITCH_INT_TYPES(out_type, ctx, "clamp.out", CTYPE_OUT, [&]() { + if (is_out_of_bounds(val)) { + ET_LOG(Error, "%s value out of bounds", val_name); + is_valid = false; + } + }); + } else if (isFloatingType(out_type)) { + ET_SWITCH_FLOATH_TYPES(out_type, ctx, "clamp", CTYPE_OUT, [&]() { + if (std::isfinite(val) && + is_out_of_bounds(val)) { + ET_LOG(Error, "%s value out of bounds", val_name); + is_valid = false; + } + }); + } + }); + + return is_valid; +} + +} // namespace + +Tensor& clamp_out( + RuntimeContext& ctx, + const Tensor& in, + const exec_aten::optional& min_opt, + const exec_aten::optional& max_opt, + Tensor& out) { + (void)ctx; + + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, in.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + ScalarType in_type = in.scalar_type(); + ScalarType min_type = in_type; + ScalarType max_type = in_type; + ScalarType common_type = in_type; + ScalarType out_type = out.scalar_type(); + + bool has_min = min_opt.has_value(); + if (has_min) { + min_type = utils::get_scalar_dtype(min_opt.value()); + common_type = utils::promote_type_with_scalar(common_type, min_opt.value()); + ET_KERNEL_CHECK( + ctx, + check_bounds(min_opt.value(), min_type, out_type, "minimum"), + InvalidArgument, + out); + } + bool has_max = max_opt.has_value(); + if (has_max) { + max_type = utils::get_scalar_dtype(max_opt.value()); + common_type = utils::promote_type_with_scalar(common_type, max_opt.value()); + ET_KERNEL_CHECK( + ctx, + check_bounds(max_opt.value(), max_type, out_type, "maximum"), + InvalidArgument, + out); + } + + ET_KERNEL_CHECK_MSG( + ctx, + has_min || has_max, + InvalidArgument, + out, + "At least one of 'min' or 'max' must not be None"); + + ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); + + ET_SWITCH_REALH_TYPES(out_type, ctx, "clamp", CTYPE_OUT, [&]() { + // Extract optional min value + CTYPE_OUT min = 0; + if (has_min) { + ET_SWITCH_SCALAR_OBJ_TYPES(min_type, ctx, "clamp", CTYPE_MIN, [&]() { + CTYPE_MIN min_val = 0; + utils::extract_scalar(min_opt.value(), &min_val); + min = static_cast(min_val); + }); + } + + // Extract optional max value + CTYPE_OUT max = 0; + if (has_max) { + ET_SWITCH_SCALAR_OBJ_TYPES(max_type, ctx, "clamp", CTYPE_MAX, [&]() { + CTYPE_MAX max_val = 0; + utils::extract_scalar(max_opt.value(), &max_val); + max = static_cast(max_val); + }); + } + + ET_SWITCH_REALHB_TYPES(in_type, ctx, "clamp", CTYPE_IN, [&]() { + apply_unary_map_fn( + [has_min, min, has_max, max](const CTYPE_IN val_in) { + CTYPE_OUT val_out = static_cast(val_in); + if (has_min) { + val_out = utils::max_override(val_out, min); + } + if (has_max) { + val_out = utils::min_override(val_out, max); + } + return val_out; + }, + in.const_data_ptr(), + out.mutable_data_ptr(), + in.numel()); + }); + }); + + return out; +} + +Tensor& clamp_tensor_out( + RuntimeContext& ctx, + const Tensor& in, + const exec_aten::optional& min_opt, + const exec_aten::optional& max_opt, + Tensor& out) { + (void)ctx; + + bool has_min = min_opt.has_value(); + bool has_max = max_opt.has_value(); + + ET_KERNEL_CHECK_MSG( + ctx, + has_min || has_max, + InvalidArgument, + out, + "At least one of 'min' or 'max' must not be None"); + + const Tensor& min = has_min ? min_opt.value() : in; + const Tensor& max = has_max ? max_opt.value() : in; + + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(in, min, max, out) == Error::Ok, + InvalidArgument, + out); + + ScalarType in_type = in.scalar_type(); + ScalarType min_type = min.scalar_type(); + ScalarType max_type = max.scalar_type(); + ScalarType common_type = in_type; + ScalarType out_type = out.scalar_type(); + + if (has_min) { + common_type = promoteTypes(common_type, min_type, /*half_to_float*/ true); + } + if (has_max) { + common_type = promoteTypes(common_type, max_type, /*half_to_float*/ true); + } + + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + + const int in_is_broadcasted = !out.sizes().equals(in.sizes()); + const int min_is_broadcasted = !out.sizes().equals(min.sizes()); + const int max_is_broadcasted = !out.sizes().equals(max.sizes()); + const int broadcast = (in_is_broadcasted || min_is_broadcasted || max_is_broadcasted); + + int max_dim = in.dim() > min.dim() ? in.dim() : min.dim(); + max_dim = max.dim() > max_dim ? max.dim() : max_dim; + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + bool fall_back = 0; + if((in_type != ScalarType::Float) || (min_type != ScalarType::Float) || (max_type != ScalarType::Float)) + fall_back = 1; + if((broadcast == 1) && (max_dim > NNLIB_MAX_DIM)) + fall_back = 1; + + if(fall_back) + { + if(!has_min) + { + const float* const max_data = max.const_data_ptr(); + const float* const inp_data = in.const_data_ptr(); + float* const out_data = out.mutable_data_ptr(); + if(broadcast == 1) + { + int out_shape[NNLIB_MAX_DIM]; + int inp_shape[NNLIB_MAX_DIM]; + int max_shape[NNLIB_MAX_DIM]; + + for(int i = 0; i < NNLIB_MAX_DIM; i++) + { + out_shape[i] = 1; + inp_shape[i] = 1; + max_shape[i] = 1; + } + + int max_dim = max.dim(), inp_dim = in.dim(), out_dim = out.dim(); + int off_o = NNLIB_MAX_DIM - out_dim; + int off_max = NNLIB_MAX_DIM - max_dim; + int off_inp = NNLIB_MAX_DIM - inp_dim; + for(int i = 0; i < out_dim; i++) + { + out_shape[i+off_o] = out.size(i); + } + for(int i = 0; i < max_dim; i++) + { + max_shape[i+off_max] = max.size(i); + } + for(int i = 0; i < inp_dim; i++) + { + inp_shape[i+off_inp] = in.size(i); + } + + xa_nn_elm_minimum_broadcast_4D_f32xf32_f32(out_data, out_shape, inp_data, inp_shape, max_data, max_shape); + } + else + { + xa_nn_elm_minimum_f32xf32_f32(out_data, inp_data, max_data, out.numel()); + } + } + else if(!has_max) + { + const float* const min_data = min.const_data_ptr(); + const float* const inp_data = in.const_data_ptr(); + float* const out_data = out.mutable_data_ptr(); + if(broadcast == 1) + { + int out_shape[NNLIB_MAX_DIM]; + int inp_shape[NNLIB_MAX_DIM]; + int min_shape[NNLIB_MAX_DIM]; + + for(int i = 0; i < NNLIB_MAX_DIM; i++) + { + out_shape[i] = 1; + inp_shape[i] = 1; + min_shape[i] = 1; + } + + int min_dim = min.dim(), max_dim = max.dim(), inp_dim = in.dim(), out_dim = out.dim(); + int off_o = NNLIB_MAX_DIM - out_dim; + int off_min = NNLIB_MAX_DIM - min_dim; + int off_inp = NNLIB_MAX_DIM - inp_dim; + for(int i = 0; i < out_dim; i++) + { + out_shape[i+off_o] = out.size(i); + } + for(int i = 0; i < min_dim; i++) + { + min_shape[i+off_min] = min.size(i); + } + for(int i = 0; i < inp_dim; i++) + { + inp_shape[i+off_inp] = in.size(i); + } + xa_nn_elm_maximum_broadcast_4D_f32xf32_f32(out_data, out_shape, inp_data, inp_shape, min_data, min_shape); + } + else + { + xa_nn_elm_maximum_f32xf32_f32(out_data, inp_data, min_data, out.numel()); + } + } + else + { + const float* const min_data = min.const_data_ptr(); + const float* const max_data = max.const_data_ptr(); + const float* const inp_data = in.const_data_ptr(); + float* const out_data = out.mutable_data_ptr(); + if(broadcast == 1) + { + int out_shape[NNLIB_MAX_DIM]; + int inp_shape[NNLIB_MAX_DIM]; + int min_shape[NNLIB_MAX_DIM]; + int max_shape[NNLIB_MAX_DIM]; + + for(int i = 0; i < NNLIB_MAX_DIM; i++) + { + out_shape[i] = 1; + inp_shape[i] = 1; + min_shape[i] = 1; + max_shape[i] = 1; + } + + int min_dim = min.dim(), max_dim = max.dim(), inp_dim = in.dim(), out_dim = out.dim(); + int off_o = NNLIB_MAX_DIM - out_dim; + int off_min = NNLIB_MAX_DIM - min_dim; + int off_max = NNLIB_MAX_DIM - max_dim; + int off_inp = NNLIB_MAX_DIM - inp_dim; + for(int i = 0; i < out_dim; i++) + { + out_shape[i+off_o] = out.size(i); + } + for(int i = 0; i < min_dim; i++) + { + min_shape[i+off_min] = min.size(i); + } + + for(int i = 0; i < max_dim; i++) + { + max_shape[i+off_max] = max.size(i); + } + for(int i = 0; i < inp_dim; i++) + { + inp_shape[i+off_inp] = in.size(i); + } + /* Add fallback if output and input dimension are larger than min and max dimension, this code doesn't support that*/ + if(inp_shape[0] != out_shape[0] || inp_shape[1] != out_shape[1] || inp_shape[2] != out_shape[2] || inp_shape[3] != out_shape[3]) + { + void* p_scratch = malloc(out_shape[0]*out_shape[1]*out_shape[2]*out_shape[3]); + const FLOAT32 *p_brd_cond = (const FLOAT32 *)p_scratch; + xa_nn_broadcast_32_32((WORD32*) p_brd_cond, out_shape, (WORD32*) inp_data, inp_shape, 4); + + for(int i = 0; i < 4; i++) + { + inp_shape[i] = out_shape[i]; + } + + xa_nn_elm_clamp_broadcast_4D_f32Xf32xf32_f32(out_data, out_shape, p_brd_cond, inp_shape, min_data, min_shape, max_data, max_shape); + free(p_scratch); + } + else + { + xa_nn_elm_clamp_broadcast_4D_f32Xf32xf32_f32(out_data, out_shape, inp_data, inp_shape, min_data, min_shape, max_data, max_shape); + } + } + else + { + xa_nn_elm_clamp_f32xf32xf32_f32(out_data, inp_data, min_data, max_data, out.numel()); + } + } + } + else + { + constexpr auto name = "clamp.Tensor_out"; + + ET_SWITCH_REALHB_TYPES(in_type, ctx, name, CTYPE_IN, [&]() { + ET_SWITCH_REALHB_TYPES(min_type, ctx, name, CTYPE_MIN, [&]() { + ET_SWITCH_REALHB_TYPES(max_type, ctx, name, CTYPE_MAX, [&]() { + ET_SWITCH_REALHB_TYPES(out_type, ctx, name, CTYPE_OUT, [&]() { + apply_ternary_elementwise_fn< + CTYPE_IN, + CTYPE_MIN, + CTYPE_MAX, + CTYPE_OUT>( + [has_min, has_max]( + const CTYPE_IN val_in, + const CTYPE_MIN val_min, + const CTYPE_MAX val_max) { + CTYPE_OUT val_out = static_cast(val_in); + if (has_min) { + val_out = utils::max_override( + val_out, static_cast(val_min)); + } + if (has_max) { + val_out = utils::min_override( + val_out, static_cast(val_max)); + } + return val_out; + }, + in, + min, + max, + out); + }); + }); + }); + }); + } + return out; +} + +} // namespace native +} // namespace executor +} // namespace torch diff --git a/backends/cadence/hifi/operators/op_maximum.cpp b/backends/cadence/hifi/operators/op_maximum.cpp new file mode 100644 index 0000000000..4b2ca55c86 --- /dev/null +++ b/backends/cadence/hifi/operators/op_maximum.cpp @@ -0,0 +1,167 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include "kernels.h" + +#define NNLIB_MAX_DIM 4 + +namespace torch { +namespace executor { +namespace native { +namespace { + +template < + bool can_cast, + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct MaximumInner; + +template < + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct MaximumInner { + static void run(const Tensor& a, const Tensor& b, Tensor& out) { + apply_binary_elementwise_fn( + // NOLINTNEXTLINE(facebook-hte-ConstantArgumentPassByValue) + [](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + CTYPE_IN value = utils::max_override(a_casted, b_casted); + + return static_cast(value); + }, + a, + b, + out); + } +}; + +struct ReportCanCastBug { + static void run(const Tensor&, const Tensor&, Tensor&) { + ET_DCHECK_MSG(false, "BUG: canCast should have been checked above"); + } +}; + +template < + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct MaximumInner + : public ReportCanCastBug {}; + +} // namespace + +Tensor& maximum_out( + RuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + Tensor& out) { + (void)ctx; + + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + + const int a_is_broadcasted = !out.sizes().equals(a.sizes()); + const int b_is_broadcasted = !out.sizes().equals(b.sizes()); + const int broadcast = (a_is_broadcasted || b_is_broadcasted); + + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + bool fall_back = 0; + if((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) + fall_back = 1; + if((broadcast == 1) && (max_dim > NNLIB_MAX_DIM)) + fall_back = 1; + + if(!fall_back) + { + float* a_data = a.mutable_data_ptr(); + float* b_data = b.mutable_data_ptr(); + float* out_data = out.mutable_data_ptr(); + + if(broadcast == 1) + { + int out_shape[4]; + int inp1_shape[4]; + int inp2_shape[4]; + + for(int i = 0; i < 4; i++) + { + out_shape[i] = 1; + inp1_shape[i] = 1; + inp2_shape[i] = 1; + } + + + int max = (a.dim() > b.dim()) ? a.dim() : b.dim(); + max = (max > out.dim()) ? max : out.dim(); + + int off_o = max - out.dim(); + int off_a = max - a.dim(); + int off_b = max - b.dim(); + + for(int i = 0; i < out.dim(); i++){ + out_shape[i+off_o] = out.size(i);} + + for(int i = 0; i < a.dim(); i++) + inp1_shape[i+off_a] = a.size(i); + + for(int i = 0; i < b.dim(); i++) + inp2_shape[i+off_b] = b.size(i); + + xa_nn_elm_maximum_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); + } + else + { + xa_nn_elm_maximum_f32xf32_f32(out_data, a_data, b_data, out.numel()); + } + } + else + { + ET_SWITCH_REALHB_TYPES(a_type, ctx, "maximum.out", CTYPE_A, [&]() { + ET_SWITCH_REALHB_TYPES(b_type, ctx, "maximum.out", CTYPE_B, [&]() { + using CTYPE_IN = typename torch::executor:: + promote_types::type; + ET_DCHECK(CppTypeToScalarType::value == common_type); + ET_SWITCH_REALHB_TYPES(out_type, ctx, "maximum.out", CTYPE_OUT, [&]() { + MaximumInner< + can_cast::value, + CTYPE_A, + CTYPE_B, + CTYPE_IN, + CTYPE_OUT>::run(a, b, out); + }); + }); + }); + } + + return out; +} + +} // namespace native +} // namespace executor +} // namespace torch diff --git a/backends/cadence/hifi/operators/op_minimum.cpp b/backends/cadence/hifi/operators/op_minimum.cpp new file mode 100644 index 0000000000..632162b2c7 --- /dev/null +++ b/backends/cadence/hifi/operators/op_minimum.cpp @@ -0,0 +1,162 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include "kernels.h" + +#define NNLIB_MAX_DIM 4 + +namespace torch { +namespace executor { +namespace native { +namespace { + +template < + bool can_cast, + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct MinimumInner; + +template < + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct MinimumInner { + static void run(const Tensor& a, const Tensor& b, Tensor& out) { + apply_binary_elementwise_fn( + // NOLINTNEXTLINE(facebook-hte-ConstantArgumentPassByValue) + [](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + CTYPE_IN value = utils::min_override(a_casted, b_casted); + + return static_cast(value); + }, + a, + b, + out); + } +}; + +struct ReportCanCastBug { + static void run(const Tensor&, const Tensor&, Tensor&) { + ET_DCHECK_MSG(false, "BUG: canCast should have been checked above"); + } +}; + +template < + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct MinimumInner + : public ReportCanCastBug {}; + +} // namespace + +Tensor& minimum_out( + RuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + Tensor& out) { + (void)ctx; + + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + + const int a_is_broadcasted = !out.sizes().equals(a.sizes()); + const int b_is_broadcasted = !out.sizes().equals(b.sizes()); + const int broadcast = (a_is_broadcasted || b_is_broadcasted); + + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + bool fall_back = 0; + if((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) + fall_back = 1; + if((broadcast == 1) && (max_dim > NNLIB_MAX_DIM)) + fall_back = 1; + + if(!fall_back) + { + float* a_data = a.mutable_data_ptr(); + float* b_data = b.mutable_data_ptr(); + float* out_data = out.mutable_data_ptr(); + + if(broadcast == 1) + { + int out_shape[NNLIB_MAX_DIM]; + int inp1_shape[NNLIB_MAX_DIM]; + int inp2_shape[NNLIB_MAX_DIM]; + + for(int i = 0; i < NNLIB_MAX_DIM; i++) + { + out_shape[i] = 1; + inp1_shape[i] = 1; + inp2_shape[i] = 1; + } + + int off_o = NNLIB_MAX_DIM - out.dim(); + int off_a = NNLIB_MAX_DIM - a.dim(); + int off_b = NNLIB_MAX_DIM - b.dim(); + + for(int i = 0; i < out.dim(); i++){ + out_shape[i+off_o] = out.size(i);} + + for(int i = 0; i < a.dim(); i++) + inp1_shape[i+off_a] = a.size(i); + + for(int i = 0; i < b.dim(); i++) + inp2_shape[i+off_b] = b.size(i); + + xa_nn_elm_minimum_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); + } + else + { + xa_nn_elm_minimum_f32xf32_f32(out_data, a_data, b_data, out.numel()); + } + } + else + { + ET_SWITCH_REALHB_TYPES(a_type, ctx, "minimum.out", CTYPE_A, [&]() { + ET_SWITCH_REALHB_TYPES(b_type, ctx, "minimum.out", CTYPE_B, [&]() { + using CTYPE_IN = typename torch::executor:: + promote_types::type; + ET_DCHECK(CppTypeToScalarType::value == common_type); + ET_SWITCH_REALHB_TYPES(out_type, ctx, "minimum.out", CTYPE_OUT, [&]() { + MinimumInner< + can_cast::value, + CTYPE_A, + CTYPE_B, + CTYPE_IN, + CTYPE_OUT>::run(a, b, out); + }); + }); + }); + } + return out; +} + +} // namespace native +} // namespace executor +} // namespace torch diff --git a/backends/cadence/hifi/operators/op_where.cpp b/backends/cadence/hifi/operators/op_where.cpp index 6370a1094a..64c7fa42e2 100644 --- a/backends/cadence/hifi/operators/op_where.cpp +++ b/backends/cadence/hifi/operators/op_where.cpp @@ -52,10 +52,11 @@ Tensor& where_out( const int broadcast = (a_is_broadcasted || b_is_broadcasted || cond_is_broadcasted); int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = cond.dim() > max_dim ? cond.dim() : max_dim; max_dim = out.dim() > max_dim ? out.dim() : max_dim; bool fall_back = 0; - if((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) - fall_back = 1; + if((a_type != ScalarType::Float) || (b_type != ScalarType::Float) || (cond_type != ScalarType::Float)) + fall_back = 1; if((broadcast == 1) && (max_dim > NNLIB_MAX_DIM)) fall_back = 1; @@ -97,7 +98,7 @@ if(!fall_back) for(int i = 0; i < cond.dim(); i++) con_shape[i+off_c] = cond.size(i); - /* Add fallback if broadcast and condition dimension are larger than inputs dimension */ + /* Add fallback if broadcast and condition dimension are larger than inputs dimension, this code doesn't support that*/ if(con_shape[0] != out_shape[0] || con_shape[1] != out_shape[1] || con_shape[2] != out_shape[2] || con_shape[3] != out_shape[3]) { diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_broadcast_32_32.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_broadcast_32_32.c new file mode 100644 index 0000000000..34a7111ee7 --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_broadcast_32_32.c @@ -0,0 +1,313 @@ +/******************************************************************************* +* Copyright (c) 2018-2024 Cadence Design Systems, Inc. +* +* Permission is hereby granted, free of charge, to any person obtaining +* a copy of this software and associated documentation files (the +* "Software"), to use this Software with Cadence processor cores only and +* not with any other processors and platforms, subject to +* the following conditions: +* +* The above copyright notice and this permission notice shall be included +* in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +******************************************************************************/ +/* + * xa_nn_broadcast_32_32.c + */ + +#include "xa_nnlib_common.h" +//#include "xa_nn_basic_state.h" + +#include +#include + +#include "stdio.h" + +/* + * This file is sourced from ../hifi5/xa_nn_broadcast_8_8.c + */ + +#define NUMDIMS_MAX 8 + +typedef struct bcast_expansion_struct_{ + size_t load_num_elem; + int replicate_loadedElm_times; + int repeat_operation; +} bcast_expansion_rule ; + +WORD32* broadcast_node_32(bcast_expansion_rule *steps, unsigned int step_id, + WORD32 *dst, WORD32 *src); + +void *xa_nn_memcpy(void * dest1,const void *src1, size_t n1) +{ + char *dest = (char *)dest1; + char *src = (char *)src1; + int n = (int)n1; + ae_int16x4 * __restrict d_align_addr, * __restrict s_align_addr; + int i; + void *orig_dest = dest; + + if (n < 32) { + return memcpy(dest, src, n); + } + + if ( !(((int) dest) %8) && !(((int) src) %8)) { // 64-bit aligned + s_align_addr = (ae_int16x4 *) src; + d_align_addr = (ae_int16x4 *) dest; + for (i=0; i>3; i++) { + d_align_addr[i] = s_align_addr[i]; + } + + for (i=(n&~7); i>3; i++) { + AE_LA16X4_IP(t, s_align, s_align_addr); + AE_LA16X4_IP(t2, s_align, s_align_addr); + AE_SA16X4_IP(t, d_align, d_align_addr); + AE_SA16X4_IP(t2, d_align, d_align_addr); + } + AE_SA64POS_FP(d_align, d_align_addr); + ae_int16 *s_src = (ae_int16 *) src; + ae_int16 *s_dest = (ae_int16 *) dest; + for (i=8*i; i8, -1); + + int i = 0; + + /* Check for valid IO shapes */ + for(i=0; i=0){ + + /* Find the sub-matrix size */ + while(in_shape[dim] != 1 && dim>=0){ + num_elem_load *= out_shape[dim]; + dim--; + } + + /* Find the number of times this sub-matrix needs to be copied */ + num_copy_times = 1; + while(in_shape[dim] == 1 && dim>=0){ + num_copy_times *= out_shape[dim]; + dim--; + } + + /* Find the number of times the above copy needs to be repeated */ + num_repeat = 1; + while(in_shape[dim] != 1 && dim>=0){ + num_repeat *= 1 * out_shape[dim]; + dim--; + } + + bcast_expansion_steps[k].load_num_elem = num_elem_load; + bcast_expansion_steps[k].replicate_loadedElm_times = num_copy_times; + bcast_expansion_steps[k].repeat_operation = num_repeat; + k++; + + num_elem_load = num_elem_load * num_copy_times * num_repeat; + } + + res = broadcast_node_32(bcast_expansion_steps, num_dims-1, + p_out, p_in); + (void)res; /* Unused return value */ + + return 0; +} + +WORD32* broadcast_node_32(bcast_expansion_rule *steps, unsigned int step_id, + WORD32 *dst, WORD32 *src) { + int step_itr=0, rep_itr=0; + int i=0, j=0, k=0; + bcast_expansion_rule *step = NULL; + + // ignore steps that are null + while(steps[step_id].repeat_operation == 0 && step_id>0){ + step_id--; + } + + // step is now the parent node for this iteration + step = &steps[step_id]; + size_t numLoadedElm = step->load_num_elem; + + WORD32 *cp_dst = dst; + WORD32 *cp_src = src; + WORD32 *cp_src_temp=NULL; + WORD32 *cp_dst_temp=NULL; + + if(numLoadedElm>32){ + if(step_id > 0){ + for(step_itr=0; step_itrrepeat_operation; step_itr++){ + src = broadcast_node_32(steps, step_id-1, dst, src); + cp_src = dst; + cp_dst = dst + numLoadedElm; + for(rep_itr=1; rep_itrreplicate_loadedElm_times; rep_itr++){ + xa_nn_memcpy(cp_dst, cp_src, 4 * numLoadedElm); + cp_dst += numLoadedElm; + } + dst = cp_dst; + } + return src; + } else { + if(numLoadedElm == 1){ + for(j=0; jrepeat_operation; j++){ +// memset((void*)cp_dst, (void*)cp_src, 4 * step->replicate_loadedElm_times); + for(i = 0; i < step->replicate_loadedElm_times; i++) + cp_dst[i] = cp_src[0]; + cp_dst += step->replicate_loadedElm_times; + cp_src++; + } + } else { + for(j=0; jrepeat_operation; j++){ + for(i=0; ireplicate_loadedElm_times; i++){ + xa_nn_memcpy(cp_dst, cp_src, 4 * numLoadedElm); + cp_dst += numLoadedElm; + } + cp_src += numLoadedElm; + } + } + return cp_src; + } + } + else{ + if(step_id > 0){ + for(step_itr=0; step_itrrepeat_operation; step_itr++){ + src = broadcast_node_32(steps, step_id-1, dst, src); + cp_src = dst; + cp_dst = dst + numLoadedElm; + for(rep_itr=1; rep_itrreplicate_loadedElm_times; rep_itr++){ + for(k=0; k<(int)numLoadedElm; k++){ + cp_src_temp = cp_src; + cp_dst_temp = cp_dst; + cp_dst_temp[k] = cp_src_temp[k]; + } + cp_dst += numLoadedElm; + } + dst = cp_dst; + } + return src; + } else { + if(numLoadedElm == 1){ + for(j=0; jrepeat_operation; j++){ +// memset((void*)cp_dst, *(WORD32 *)cp_src, 4 * step->replicate_loadedElm_times); + for(i = 0; i < step->replicate_loadedElm_times; i++) + cp_dst[i] = cp_src[0]; + cp_dst += step->replicate_loadedElm_times; + cp_src++; + } + } else { + for(j=0; j < step->repeat_operation; j++){ + for(i=0; i < step->replicate_loadedElm_times; i++){ + for(k=0; k<(int)(numLoadedElm); k++){ + cp_src_temp = cp_src; + cp_dst_temp = cp_dst; + cp_dst_temp[k] = cp_src_temp[k]; + + } + cp_dst += numLoadedElm; + } + cp_src += numLoadedElm; + } + } + return cp_src; + } + } +} diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_clamp_f32.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_clamp_f32.c new file mode 100644 index 0000000000..d957c1f361 --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_clamp_f32.c @@ -0,0 +1,560 @@ +/******************************************************************************* +* Copyright (c) 2018-2024 Cadence Design Systems, Inc. +* +* Permission is hereby granted, free of charge, to any person obtaining +* a copy of this software and associated documentation files (the +* "Software"), to use this Software with Cadence processor cores only and +* not with any other processors and platforms, subject to +* the following conditions: +* +* The above copyright notice and this permission notice shall be included +* in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +******************************************************************************/ +#include "nnlib-hifi4/xa_nnlib/include/xa_type_def.h" +#include "nnlib-hifi4/xa_nnlib/algo/common/include/xa_nnlib_common_fpu.h" +#include "nnlib-hifi4/xa_nnlib/algo/common/include/xa_nn_common.h" +#include "nnlib-hifi4/xa_nnlib/algo/common/include/xa_nnlib_err_chk.h" +#include "nnlib-hifi4/xa_nnlib/algo/kernels/basic/hifi4/xa_nn_basic_state.h" +#include "nnlib-hifi4/xa_nnlib/include/nnlib/xa_nnlib_kernels_api.h" + + +#if !HAVE_VFPU +DISCARD_FUN_FOR_NONVOID_RETURN( + WORD32, xa_nn_elm_clamp_f32xf32xf32_f32, + ( + FLOAT32 *p_out, + const FLOAT32 *p_inp, + const FLOAT32 *p_min, + const FLOAT32 *p_max, + WORD32 num_elm + ) + ) +#else +WORD32 xa_nn_elm_clamp_f32xf32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp, + const FLOAT32 * __restrict__ p_min, + const FLOAT32 * __restrict__ p_max, + WORD32 num_elm) +{ + + /* NULL pointer checks */ + XA_NNLIB_ARG_CHK_PTR(p_out, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp, -1); + XA_NNLIB_ARG_CHK_PTR(p_min, -1); + XA_NNLIB_ARG_CHK_PTR(p_max, -1); + /* Pointer alignment checks */ + XA_NNLIB_ARG_CHK_ALIGN(p_out, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_min, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_max, sizeof(FLOAT32), -1); + /* Basic Parameter checks */ + XA_NNLIB_ARG_CHK_COND((num_elm <= 0), -1); + + int i; + xtfloatx2 *inp = (xtfloatx2 *)p_inp; + xtfloatx2 *min = (xtfloatx2 *)p_min; + xtfloatx2 *max = (xtfloatx2 *)p_max; + xtfloatx2 *out = (xtfloatx2 *)p_out; + + xtfloatx2 x1, d_min, d_max, y; + + if(((((unsigned)p_out)&7) == 0) && ((((unsigned)p_inp)&7) == 0) && ((((unsigned)p_min)&7) == 0) && ((((unsigned)p_max)&7) == 0)) + { + for(i=0;i < num_elm>>1;i++) + { + XT_LSX2IP(x1, inp, 2*sizeof(FLOAT32)); + XT_LSX2IP(d_min, min, 2*sizeof(FLOAT32)); + XT_LSX2IP(d_max, max, 2*sizeof(FLOAT32)); + + y = XT_MAX_SX2(x1, d_min); + y = XT_MIN_SX2(y, d_max); + + XT_SSX2IP( y, out, 2*sizeof(FLOAT32)); + } + } + else + { + ae_valign inp_a, min_a, max_a, out_a; + + inp_a = XT_LASX2PP(inp); + min_a = XT_LASX2PP(min); + max_a = XT_LASX2PP(max); + out_a = AE_ZALIGN64(); + /* Each iteration of loop is independent so safe to use concurrent pragma */ +#pragma concurrent + for(i=0;i < num_elm>>1;i++) + { + XT_LASX2IP(x1, inp_a, inp); + XT_LASX2IP(d_min, min_a, min); + XT_LASX2IP(d_max, max_a, max); + + y = XT_MAX_SX2(x1, d_min); + y = XT_MIN_SX2(y, d_max); + + XT_SASX2IP(y, out_a, out); + } + XT_SASX2POSFP(out_a, out); + } + // Remainder Loop + if (num_elm & 1) + { + xtfloat a1, a2, a3, a; + XT_LSIP(a1, (xtfloat *)inp, 0); + XT_LSIP(a2, (xtfloat *)min, 0); + XT_LSIP(a3, (xtfloat *)max, 0); + a = XT_MAX_S(a1, a2); + a = XT_MIN_S(a, a3); + XT_SSI(a, (xtfloat *)out, 0); + } +} + +static void internal_elm_clamp_broadcast_2D_f32xf32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_min, + const FLOAT32 * __restrict__ p_max, + const FLOAT32 * __restrict__ p_inp, + WORD32 out_lc, + WORD32 in_lc, + xtbool sign_flag) +{ + int i, j; + + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_min; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_max; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + FLOAT32 * input = p_inp; + + int num_simd2_ops; + int num_scalar_ops; + + if(out_lc) + { + num_simd2_ops = in_lc >> 1; + num_scalar_ops = in_lc & 1; + } + else + { + num_simd2_ops = (in_lc >> 2) << 1; + num_scalar_ops = in_lc & 3; + } + + xtfloatx2 d_inp, x1, x2, y; + xtfloat in0, a0, b0, c0; + unsigned char con1, con2; + if(sign_flag){ + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_min[i * in_lc]; + p_b = (xtfloatx2 *)p_max; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + input = (xtfloatx2 *)&p_inp[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0) && ((((unsigned)input)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + XT_LSX2IP(d_inp, input, 2 * sizeof(FLOAT32)); + y = XT_MAX_SX2(d_inp, x2); + y = XT_MIN_SX2(y, x1); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp, vmin, vmax, out_a = AE_ZALIGN64(); + vmin = XT_LASX2PP(p_a); + vmax = XT_LASX2PP(p_b); + vinp = XT_LASX2PP(input); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vmin, p_a); + XT_LASX2IP(x2, vmax, p_b); + XT_LASX2IP(d_inp, vinp, input); + y = XT_MAX_SX2(d_inp, x2); + y = XT_MIN_SX2(y, x1); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, 0); + XT_LSIP(b0, (xtfloat *)p_b, 0); + XT_LSIP(in0, (xtfloat *)input, 0); + c0 = XT_MAX_S(in0, b0); + c0 = XT_MIN_S(a0, c0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } + else + { + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_min[i * in_lc]; + p_b = (xtfloatx2 *)p_max; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + input = (xtfloatx2 *)&p_inp[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0) && ((((unsigned)input)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + XT_LSX2IP(d_inp, input, 2 * sizeof(FLOAT32)); + y = XT_MAX_SX2(d_inp, x1); + y = XT_MIN_SX2(y, x2); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp, vmin, vmax, out_a = AE_ZALIGN64(); + vmin = XT_LASX2PP(p_a); + vmax = XT_LASX2PP(p_b); + vinp = XT_LASX2PP(input); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vmin, p_a); + XT_LASX2IP(x2, vmax, p_b); + XT_LASX2IP(d_inp, vinp, input); + y = XT_MAX_SX2(d_inp, x1); + y = XT_MIN_SX2(y, x2); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, 0); + XT_LSIP(b0, (xtfloat *)p_b, 0); + XT_LSIP(in0, (xtfloat *)input, 0); + c0 = XT_MAX_S(in0, a0); + c0 = XT_MIN_S(c0, b0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } +} +static void internal_elm_clamp_broadcast_f32xf32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_min, + const FLOAT32 * __restrict__ p_max, + const unsigned char * __restrict__ p_inp, + WORD32 num_elm, + xtbool sign_flag) +{ + int i; + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_min; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_max; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + xtfloatx2 *__restrict__ input = (xtfloatx2 *)p_inp; + + const int num_simd2_ops = num_elm >> 1; + const int num_scalar_ops = num_elm & 1; + + xtfloat a0_7, out, in0; + xtfloatx2 d_inp, x1, x2, y; + x2 = XT_LSI((xtfloat *)p_b, 0); + +/* Min pointer is pointing to actual max and max to min */ + if(sign_flag){ + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0) && ((((unsigned)input)&7) == 0)) + { + for(i=0; i p_max_shape[i] ? p_min_shape[i] : p_max_shape[i]))) + { + return -1; + } + } + + WORD32 min_strides[4], max_strides[4]; + min_strides[3] = 1; + max_strides[3] = 1; + for(i = 2; i >= 0; i--) + { + ae_int32x2 d_str, d_shape; + d_str = AE_MOVDA32X2(min_strides[i + 1], max_strides[i + 1]); + d_shape = AE_MOVDA32X2(p_min_shape[i + 1], p_max_shape[i + 1]); + d_str = AE_MULP32X2(d_str, d_shape); + min_strides[i] = AE_MOVAD32_H(d_str); + max_strides[i] = AE_MOVAD32_L(d_str); + } + + int need_broadcast = 0; + int min_const = 1, max_const = 1; + for(i = 0; i < 4; i++) + { + if(p_min_shape[i] != p_max_shape[i]) + { + if(p_min_shape[i] == 1) + min_strides[i] = 0; + else + max_strides[i] = 0; + + need_broadcast = 1; + } + if(p_min_shape[i] != 1) + min_const &= 0; + if(p_max_shape[i] != 1) + max_const &= 0; + } + + int itr0, itr1, itr2; + FLOAT32 *p_out_tmp = p_out; + const FLOAT32 *__restrict p_inp_temp = p_inp; + const FLOAT32 *__restrict__ p_min_tmp = p_min; + const FLOAT32 *__restrict__ p_max_tmp = p_max; + + if(need_broadcast == 0) + { + sign_flag = 0; + internal_elm_clamp_broadcast_2D_f32xf32xf32_f32( + p_out, + p_min, + p_max, + p_inp, + 1, + p_out_shape[0] * min_strides[0], + sign_flag); + } + else if(min_strides[3] == max_strides[3]) + { + WORD32 in_lc, out_lc; + sign_flag = 0; + in_lc = p_out_shape[2] * p_out_shape[3]; + out_lc = 1; + if(min_strides[2] == 0) + { + const FLOAT32 *tmp; + tmp = p_min_tmp; p_min_tmp = p_max_tmp; p_max_tmp = tmp; + sign_flag = 1; + int tmp_strides[2]; + tmp_strides[0] = min_strides[0]; + tmp_strides[1] = min_strides[1]; + + min_strides[0] = max_strides[0]; + min_strides[1] = max_strides[1]; + + max_strides[0] = tmp_strides[0]; + max_strides[1] = tmp_strides[1]; + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + else if(max_strides[2] == 0) + { + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_min_tmp0 = p_min_tmp; + const FLOAT32 *__restrict__ p_max_tmp0 = p_max_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + internal_elm_clamp_broadcast_2D_f32xf32xf32_f32( + p_out_tmp, + p_min_tmp0, + p_max_tmp0, + p_inp_temp, + out_lc, + in_lc, + sign_flag); + p_out_tmp += in_lc * out_lc; + p_min_tmp0 += min_strides[1]; + p_max_tmp0 += max_strides[1]; + p_inp_temp += in_lc * out_lc; + } + + p_min_tmp += min_strides[0]; + p_max_tmp += max_strides[0]; + } + } + else if(min_const == 1 || max_const == 1) + { + sign_flag = 0; + if(min_strides[3] == 0) + { + sign_flag = 1; + const FLOAT32 *tmp; + tmp = p_min_tmp; p_min_tmp = p_max_tmp; p_max_tmp = tmp; + } + internal_elm_clamp_broadcast_f32xf32xf32_f32( + p_out_tmp, + p_min_tmp, + p_max_tmp, + p_inp_temp, + p_out_shape[0] * p_out_shape[1] * p_out_shape[2] * p_out_shape[3], + sign_flag); + } + else + { + sign_flag = 0; + if(min_strides[3] == 0) + { + const FLOAT32 *tmp; + tmp = p_min_tmp; p_min_tmp = p_max_tmp; p_max_tmp = tmp; + sign_flag = 1; + int tmp_strides[3]; + tmp_strides[0] = min_strides[0]; + tmp_strides[1] = min_strides[1]; + tmp_strides[2] = min_strides[2]; + + min_strides[0] = max_strides[0]; + min_strides[1] = max_strides[1]; + min_strides[2] = max_strides[2]; + + max_strides[0] = tmp_strides[0]; + max_strides[1] = tmp_strides[1]; + max_strides[2] = tmp_strides[2]; + } + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_min_tmp0 = p_min_tmp; + const FLOAT32 *__restrict__ p_max_tmp0 = p_max_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + const FLOAT32 *__restrict__ p_min_tmp1 = p_min_tmp0; + const FLOAT32 *__restrict__ p_max_tmp1 = p_max_tmp0; + for(itr2 = 0; itr2 < p_out_shape[2]; itr2++) + { + { + internal_elm_clamp_broadcast_f32xf32xf32_f32( + p_out_tmp, + p_min_tmp1, + p_max_tmp1, + p_inp_temp, + p_out_shape[3], + sign_flag); + } + p_out_tmp += p_out_shape[3]; + p_min_tmp1 += min_strides[2]; + p_max_tmp1 += max_strides[2]; + p_inp_temp += p_out_shape[3]; + } + p_min_tmp0 += min_strides[1]; + p_max_tmp0 += max_strides[1]; + } + p_min_tmp += min_strides[0]; + p_max_tmp += max_strides[0]; + } + } + return 0; +} +#endif diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_minimum_maximum_f32.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_minimum_maximum_f32.c new file mode 100644 index 0000000000..404c8562d9 --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_minimum_maximum_f32.c @@ -0,0 +1,847 @@ +/******************************************************************************* +* Copyright (c) 2018-2024 Cadence Design Systems, Inc. +* +* Permission is hereby granted, free of charge, to any person obtaining +* a copy of this software and associated documentation files (the +* "Software"), to use this Software with Cadence processor cores only and +* not with any other processors and platforms, subject to +* the following conditions: +* +* The above copyright notice and this permission notice shall be included +* in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +******************************************************************************/ +#include "nnlib-hifi4/xa_nnlib/include/xa_type_def.h" +#include "nnlib-hifi4/xa_nnlib/algo/common/include/xa_nnlib_common_fpu.h" +#include "nnlib-hifi4/xa_nnlib/algo/common/include/xa_nn_common.h" +#include "nnlib-hifi4/xa_nnlib/algo/common/include/xa_nnlib_err_chk.h" +#include "nnlib-hifi4/xa_nnlib/algo/kernels/basic/hifi4/xa_nn_basic_state.h" +#include "nnlib-hifi4/xa_nnlib/include/nnlib/xa_nnlib_kernels_api.h" + +#if !HAVE_VFPU +DISCARD_FUN_FOR_NONVOID_RETURN( + WORD32, xa_nn_elm_maximum_f32xf32_f32, + ( + FLOAT32 *p_out, + const FLOAT32 *p_inp1, + const FLOAT32 *p_inp2, + WORD32 num_elm + ) + ) +#else +WORD32 xa_nn_elm_maximum_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm) +{ + + /* NULL pointer checks */ + XA_NNLIB_ARG_CHK_PTR(p_out, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp1, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp2, -1); + /* Pointer alignment checks */ + XA_NNLIB_ARG_CHK_ALIGN(p_out, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp1, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp2, sizeof(FLOAT32), -1); + /* Basic Parameter checks */ + XA_NNLIB_ARG_CHK_COND((num_elm <= 0), -1); + + int i; + xtfloatx2 *inp1 = (xtfloatx2 *)p_inp1; + xtfloatx2 *inp2 = (xtfloatx2 *)p_inp2; + xtfloatx2 *out = (xtfloatx2 *)p_out; + xtfloatx2 x1, x2, y; + unsigned char con1, con2; + xtbool2 con = int32_rtor_xtbool2(0x00000003); + + if(((((unsigned)p_out)&7) == 0) && ((((unsigned)p_inp1)&7) == 0) && ((((unsigned)p_inp2)&7) == 0)) + { + for(i=0;i < num_elm>>1;i++) + { + XT_LSX2IP(x1, inp1, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, inp2, 2*sizeof(FLOAT32)); + y = XT_MAX_SX2(x2, x1); + XT_SSX2IP( y, out, 2*sizeof(FLOAT32)); + } + } + else + { + ae_valign inp1_a, inp2_a, out_a; + + inp1_a = XT_LASX2PP(inp1); + inp2_a = XT_LASX2PP(inp2); + out_a = AE_ZALIGN64(); + /* Each iteration of loop is independent so safe to use concurrent pragma */ +#pragma concurrent + for(i=0;i < num_elm>>1;i++) + { + XT_LASX2IP(x1, inp1_a, inp1); + XT_LASX2IP(x2, inp2_a, inp2); + y = XT_MAX_SX2(x2, x1); + XT_SASX2IP(y, out_a, out); + } + XT_SASX2POSFP(out_a, out); + } + // Remainder Loop + if (num_elm & 1) + { + xtfloat a1, a2, a; + XT_LSIP(a1, (xtfloat *)inp1, 0); + XT_LSIP(a2, (xtfloat *)inp2, 0); + a = XT_MAX_S(a1, a2); + XT_SSI(a, (xtfloat *)out, 0); + } + return 0; +} +#endif + +#if HAVE_VFPU +static void internal_elm_maximum_broadcast_2D_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 out_lc, + WORD32 in_lc, + xtbool sign_flag) +{ + int i, j; + + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + int num_simd2_ops; + int num_scalar_ops; + + if(out_lc) + { + num_simd2_ops = in_lc >> 1; + num_scalar_ops = in_lc & 1; + } + else + { + num_simd2_ops = (in_lc >> 2) << 1; + num_scalar_ops = in_lc & 3; + } + + xtfloatx2 x1, x2, y; + xtfloat a0, b0, c0; + + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_MAX_SX2(x2, x1); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_MAX_SX2(x2, x1); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_MAX_S(b0, a0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } +} + +static void internal_elm_maximum_broadcast_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm, + xtbool sign_flag) +{ + int i; + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + const int num_simd2_ops = num_elm >> 1; + const int num_scalar_ops = num_elm & 1; + + xtfloat a0_7, out; + xtfloatx2 x1, x2, y; + x2 = XT_LSI((xtfloat *)p_b, 0); + + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i p_inp2_shape[i] ? p_inp1_shape[i] : p_inp2_shape[i]))) + { + return -1; + } + } + + WORD32 inp1_strides[4], inp2_strides[4]; + inp1_strides[3] = 1; + inp2_strides[3] = 1; + for(i = 2; i >= 0; i--) + { + ae_int32x2 d_str, d_shape; + d_str = AE_MOVDA32X2(inp1_strides[i + 1], inp2_strides[i + 1]); + d_shape = AE_MOVDA32X2(p_inp1_shape[i + 1], p_inp2_shape[i + 1]); + d_str = AE_MULP32X2(d_str, d_shape); + inp1_strides[i] = AE_MOVAD32_H(d_str); + inp2_strides[i] = AE_MOVAD32_L(d_str); + } + + int need_broadcast = 0; + int inp1_const = 1, inp2_const = 1; + for(i = 0; i < 4; i++) + { + if(p_inp1_shape[i] != p_inp2_shape[i]) + { + if(p_inp1_shape[i] == 1) + inp1_strides[i] = 0; + else + inp2_strides[i] = 0; + + need_broadcast = 1; + } + if(p_inp1_shape[i] != 1) + inp1_const &= 0; + if(p_inp2_shape[i] != 1) + inp2_const &= 0; + } + int itr0, itr1, itr2; + + FLOAT32 *p_out_tmp = p_out; + const FLOAT32 *__restrict__ p_inp1_tmp = p_inp1; + const FLOAT32 *__restrict__ p_inp2_tmp = p_inp2; + if(need_broadcast == 0) + { + sign_flag = 0; + internal_elm_maximum_broadcast_2D_f32xf32_f32( + p_out, + p_inp1, + p_inp2, + 1, + p_out_shape[0] * inp1_strides[0], + sign_flag); + } + else if(inp1_strides[3] == inp2_strides[3]) + { + WORD32 in_lc, out_lc; + sign_flag = 0; + in_lc = p_out_shape[2] * p_out_shape[3]; + out_lc = 1; + if(inp1_strides[2] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[2]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + else if(inp2_strides[2] == 0) + { + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + internal_elm_maximum_broadcast_2D_f32xf32_f32( + p_out_tmp, + p_inp1_tmp0, + p_inp2_tmp0, + out_lc, + in_lc, + sign_flag); + p_out_tmp += in_lc * out_lc; + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + else if(inp1_const == 1 || inp2_const == 1) + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + sign_flag = 1; + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + } + internal_elm_maximum_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp, + p_inp2_tmp, + p_out_shape[0] * p_out_shape[1] * p_out_shape[2] * p_out_shape[3], + sign_flag); + } + else + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[3]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + tmp_strides[2] = inp1_strides[2]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + inp1_strides[2] = inp2_strides[2]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + inp2_strides[2] = tmp_strides[2]; + } + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + const FLOAT32 *__restrict__ p_inp1_tmp1 = p_inp1_tmp0; + const FLOAT32 *__restrict__ p_inp2_tmp1 = p_inp2_tmp0; + for(itr2 = 0; itr2 < p_out_shape[2]; itr2++) + { + { + internal_elm_maximum_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp1, + p_inp2_tmp1, + p_out_shape[3], + sign_flag); + } + p_out_tmp += p_out_shape[3]; + p_inp1_tmp1 += inp1_strides[2]; + p_inp2_tmp1 += inp2_strides[2]; + } + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + return 0; +} +#endif + +#if !HAVE_VFPU +DISCARD_FUN_FOR_NONVOID_RETURN( + WORD32, xa_nn_elm_minimum_f32xf32_f32, + ( + FLOAT32 *p_out, + const FLOAT32 *p_inp1, + const FLOAT32 *p_inp2, + WORD32 num_elm + ) + ) +#else +WORD32 xa_nn_elm_minimum_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm) +{ + + /* NULL pointer checks */ + XA_NNLIB_ARG_CHK_PTR(p_out, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp1, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp2, -1); + /* Pointer alignment checks */ + XA_NNLIB_ARG_CHK_ALIGN(p_out, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp1, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp2, sizeof(FLOAT32), -1); + /* Basic Parameter checks */ + XA_NNLIB_ARG_CHK_COND((num_elm <= 0), -1); + + int i; + xtfloatx2 *inp1 = (xtfloatx2 *)p_inp1; + xtfloatx2 *inp2 = (xtfloatx2 *)p_inp2; + xtfloatx2 *out = (xtfloatx2 *)p_out; + xtfloatx2 x1, x2, y; + unsigned char con1, con2; + xtbool2 con = int32_rtor_xtbool2(0x00000003); + + if(((((unsigned)p_out)&7) == 0) && ((((unsigned)p_inp1)&7) == 0) && ((((unsigned)p_inp2)&7) == 0)) + { + for(i=0;i < num_elm>>1;i++) + { + XT_LSX2IP(x1, inp1, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, inp2, 2*sizeof(FLOAT32)); + y = XT_MIN_SX2(x2, x1); + XT_SSX2IP( y, out, 2*sizeof(FLOAT32)); + } + } + else + { + ae_valign inp1_a, inp2_a, out_a; + + inp1_a = XT_LASX2PP(inp1); + inp2_a = XT_LASX2PP(inp2); + out_a = AE_ZALIGN64(); + /* Each iteration of loop is independent so safe to use concurrent pragma */ +#pragma concurrent + for(i=0;i < num_elm>>1;i++) + { + XT_LASX2IP(x1, inp1_a, inp1); + XT_LASX2IP(x2, inp2_a, inp2); + y = XT_MIN_SX2(x2, x1); + XT_SASX2IP(y, out_a, out); + } + XT_SASX2POSFP(out_a, out); + } + // Remainder Loop + if (num_elm & 1) + { + xtfloat a1, a2, a; + XT_LSIP(a1, (xtfloat *)inp1, 0); + XT_LSIP(a2, (xtfloat *)inp2, 0); + a = XT_MIN_S(a1, a2); + XT_SSI(a, (xtfloat *)out, 0); + } + return 0; +} +#endif + +#if HAVE_VFPU +static void internal_elm_minimum_broadcast_2D_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 out_lc, + WORD32 in_lc, + xtbool sign_flag) +{ + int i, j; + + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + int num_simd2_ops; + int num_scalar_ops; + + if(out_lc) + { + num_simd2_ops = in_lc >> 1; + num_scalar_ops = in_lc & 1; + } + else + { + num_simd2_ops = (in_lc >> 2) << 1; + num_scalar_ops = in_lc & 3; + } + + xtfloatx2 x1, x2, y; + xtfloat a0, b0, c0; + + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_MIN_SX2(x2, x1); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_MIN_SX2(x2, x1); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_MIN_S(b0, a0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } +} + +static void internal_elm_minimum_broadcast_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm, + xtbool sign_flag) +{ + int i; + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + const int num_simd2_ops = num_elm >> 1; + const int num_scalar_ops = num_elm & 1; + + xtfloat a0_7, out; + xtfloatx2 x1, x2, y; + x2 = XT_LSI((xtfloat *)p_b, 0); + + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i p_inp2_shape[i] ? p_inp1_shape[i] : p_inp2_shape[i]))) + { + return -1; + } + } + + WORD32 inp1_strides[4], inp2_strides[4]; + inp1_strides[3] = 1; + inp2_strides[3] = 1; + for(i = 2; i >= 0; i--) + { + ae_int32x2 d_str, d_shape; + d_str = AE_MOVDA32X2(inp1_strides[i + 1], inp2_strides[i + 1]); + d_shape = AE_MOVDA32X2(p_inp1_shape[i + 1], p_inp2_shape[i + 1]); + d_str = AE_MULP32X2(d_str, d_shape); + inp1_strides[i] = AE_MOVAD32_H(d_str); + inp2_strides[i] = AE_MOVAD32_L(d_str); + } + + int need_broadcast = 0; + int inp1_const = 1, inp2_const = 1; + for(i = 0; i < 4; i++) + { + if(p_inp1_shape[i] != p_inp2_shape[i]) + { + if(p_inp1_shape[i] == 1) + inp1_strides[i] = 0; + else + inp2_strides[i] = 0; + + need_broadcast = 1; + } + if(p_inp1_shape[i] != 1) + inp1_const &= 0; + if(p_inp2_shape[i] != 1) + inp2_const &= 0; + } + int itr0, itr1, itr2; + + FLOAT32 *p_out_tmp = p_out; + const FLOAT32 *__restrict__ p_inp1_tmp = p_inp1; + const FLOAT32 *__restrict__ p_inp2_tmp = p_inp2; + if(need_broadcast == 0) + { + sign_flag = 0; + internal_elm_minimum_broadcast_2D_f32xf32_f32( + p_out, + p_inp1, + p_inp2, + 1, + p_out_shape[0] * inp1_strides[0], + sign_flag); + } + else if(inp1_strides[3] == inp2_strides[3]) + { + WORD32 in_lc, out_lc; + sign_flag = 0; + in_lc = p_out_shape[2] * p_out_shape[3]; + out_lc = 1; + if(inp1_strides[2] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[2]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + else if(inp2_strides[2] == 0) + { + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + internal_elm_minimum_broadcast_2D_f32xf32_f32( + p_out_tmp, + p_inp1_tmp0, + p_inp2_tmp0, + out_lc, + in_lc, + sign_flag); + p_out_tmp += in_lc * out_lc; + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + else if(inp1_const == 1 || inp2_const == 1) + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + sign_flag = 1; + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + } + internal_elm_minimum_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp, + p_inp2_tmp, + p_out_shape[0] * p_out_shape[1] * p_out_shape[2] * p_out_shape[3], + sign_flag); + } + else + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[3]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + tmp_strides[2] = inp1_strides[2]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + inp1_strides[2] = inp2_strides[2]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + inp2_strides[2] = tmp_strides[2]; + } + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + const FLOAT32 *__restrict__ p_inp1_tmp1 = p_inp1_tmp0; + const FLOAT32 *__restrict__ p_inp2_tmp1 = p_inp2_tmp0; + for(itr2 = 0; itr2 < p_out_shape[2]; itr2++) + { + { + internal_elm_minimum_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp1, + p_inp2_tmp1, + p_out_shape[3], + sign_flag); + } + p_out_tmp += p_out_shape[3]; + p_inp1_tmp1 += inp1_strides[2]; + p_inp2_tmp1 += inp2_strides[2]; + } + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + return 0; +} +#endif \ No newline at end of file