diff --git a/backends/cadence/aot/functions_hifi.yaml b/backends/cadence/aot/functions_hifi.yaml index b6a2c50001..5c53f7e7ae 100644 --- a/backends/cadence/aot/functions_hifi.yaml +++ b/backends/cadence/aot/functions_hifi.yaml @@ -20,7 +20,12 @@ - op: _softmax.out kernels: - arg_meta: null - kernel_name: torch::executor::softmax_out + kernel_name: cadence::impl::HiFi::softmax_out + +- op: atan2.out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::atan2_out - op: add.out kernels: @@ -35,7 +40,12 @@ - op: cat.out kernels: - arg_meta: null - kernel_name: torch::executor::cat_out + kernel_name: cadence::impl::HiFi::cat_out + +- op: clamp.Tensor_out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::clamp_tensor_out - op: clone.out kernels: @@ -60,7 +70,12 @@ - op: full.out kernels: - arg_meta: null - kernel_name: torch::executor::full_out + kernel_name: cadence::impl::HiFi::full_out + +- op: gt.Scalar_out + kernels: + - arg_meta: null + kernel_name: torch::executor::gt_scalar_out - op: gelu.out kernels: @@ -85,7 +100,7 @@ - op: mean.out kernels: - arg_meta: null - kernel_name: cadence::impl::HiFi::mean_dim_out + kernel_name: cadence::impl::HiFi::mean_dim_out - op: minimum.out kernels: @@ -100,7 +115,7 @@ - op: permute_copy.out kernels: - arg_meta: null - kernel_name: torch::executor::permute_copy_out + kernel_name: cadence::impl::HiFi::permute_copy_out - op: pow.Scalar_out kernels: @@ -117,6 +132,11 @@ - arg_meta: null kernel_name: cadence::impl::HiFi::pow_Tensor_Tensor_out +- op: remainder.Tensor_out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::remainder_Tensor_out + - op: rsqrt.out kernels: - arg_meta: null @@ -170,7 +190,6 @@ - arg_meta: null kernel_name: cadence::impl::HiFi::dequantize_per_tensor_out - - func: cadence::quantized_layer_norm.out(Tensor input, Tensor in_scale, Tensor in_zero_point, int[] normalized_shape, Tensor weight, Tensor bias, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!) kernels: - arg_meta: null @@ -184,6 +203,12 @@ kernels: - arg_meta: null kernel_name: cadence::impl::HiFi::quantized_linear_out + +- func: cadence::quantized_relu.out(Tensor X, Tensor X_zero_point, int out_zero_point, Tensor out_multiplier, Tensor out_shift, *, Tensor(a!) out) -> Tensor(a!) + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::quantized_relu_out + - func: cadence::quantized_linear.per_tensor_out(Tensor src, Tensor weight, Tensor bias, SymInt src_zero_point, SymInt weight_zero_point, SymInt out_multiplier, SymInt out_shift, SymInt out_zero_point, Tensor? offset, *, Tensor(a!) out) -> Tensor(a!) kernels: - arg_meta: null diff --git a/backends/cadence/hifi/kernels/CMakeLists.txt b/backends/cadence/hifi/kernels/CMakeLists.txt index 3d321443f8..9bbd386c75 100644 --- a/backends/cadence/hifi/kernels/CMakeLists.txt +++ b/backends/cadence/hifi/kernels/CMakeLists.txt @@ -10,14 +10,19 @@ add_library( kernels.cpp ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/matmul_asym8uxasym8u_asym8u.cpp ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_broadcast_32.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_concat_32.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_add_f32_broadcast.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_atan2_f32.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_clamp_f32_broadcast.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_f32_broadcast.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_mode_f32_broadcast.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_minimum_maximum_f32.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_mul_f32_broadcast.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_pow_f32.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_remainder_broadcast_f32.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_where_f32xf32_f32.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_reduce_32_32.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_transpose_32.c ) # Let files say "include ". set(_common_include_directories ${EXECUTORCH_ROOT}/..) diff --git a/backends/cadence/hifi/kernels/kernels.cpp b/backends/cadence/hifi/kernels/kernels.cpp index 1b335c846b..bf4a2d143f 100644 --- a/backends/cadence/hifi/kernels/kernels.cpp +++ b/backends/cadence/hifi/kernels/kernels.cpp @@ -20,6 +20,11 @@ memcpy(void* dst, const void* src, size_t num_bytes) { MEMCPY_8b(dst, src, num_bytes); } +void* allocate_temp_memory(KernelRuntimeContext& ctx, size_t size) { + Result temp_mem_res = ctx.allocate_temp(size); + return temp_mem_res.ok() ? temp_mem_res.get() : nullptr; +} + // Quantize a fp32 value to an int8_t/uint8_t value template __attribute__((always_inline)) T diff --git a/backends/cadence/hifi/kernels/kernels.h b/backends/cadence/hifi/kernels/kernels.h index 10927adc2a..c5795a617a 100644 --- a/backends/cadence/hifi/kernels/kernels.h +++ b/backends/cadence/hifi/kernels/kernels.h @@ -14,8 +14,12 @@ /* For NNLIB APIs */ #include "xa_nnlib_kernels_api.h" -/* Potential NNLIB function/APIs */ +#include + +using executorch::runtime::KernelRuntimeContext; +using executorch::runtime::Result; +/* Potential NNLIB function/APIs */ extern "C" WORD32 xa_nn_broadcast_32_32( WORD32* __restrict__ p_out, const int* const out_shape, @@ -23,6 +27,16 @@ extern "C" WORD32 xa_nn_broadcast_32_32( const int* const in_shape, int num_dims); +extern "C" WORD32 xa_nn_concat_32_32( + WORD32* __restrict__ p_out, + const WORD32* const p_out_shape, + const WORD32** pp_inps, + const WORD32* const* pp_inps_shape, + WORD32 num_out_dims, + WORD32 num_inp, + WORD32 num_inp_dims, + WORD32 axis); + extern "C" WORD32 xa_nn_elm_add_broadcast_4D_f32xf32_f32( FLOAT32* __restrict__ p_out, const WORD32* const p_out_shape, @@ -31,6 +45,26 @@ extern "C" WORD32 xa_nn_elm_add_broadcast_4D_f32xf32_f32( const FLOAT32* __restrict__ p_inp2, const WORD32* const p_inp2_shape); +extern "C" void +xa_nn_elm_atan2_f32(FLOAT32* z, const FLOAT32* y, const FLOAT32* x, WORD32 N); + +extern "C" WORD32 xa_nn_elm_clamp_f32xf32xf32_f32( + FLOAT32* __restrict__ p_out, + const FLOAT32* __restrict__ p_inp, + const FLOAT32* __restrict__ p_min, + const FLOAT32* __restrict__ p_max, + WORD32 num_elm); + +extern "C" WORD32 xa_nn_elm_clamp_broadcast_4D_f32Xf32xf32_f32( + FLOAT32* __restrict__ p_out, + const WORD32* const p_out_shape, + const FLOAT32* __restrict__ p_inp, + const WORD32* const p_inp_shape, + const FLOAT32* __restrict__ p_min, + const WORD32* const p_min_shape, + const FLOAT32* __restrict__ p_max, + const WORD32* const p_max_shape); + extern "C" WORD32 xa_nn_elm_div_broadcast_4D_f32xf32_f32( FLOAT32* __restrict__ p_out, const WORD32* const p_out_shape, @@ -97,6 +131,20 @@ extern "C" void xa_nn_elm_pow_f32( const FLOAT32* restrict y, WORD32 N); +extern "C" WORD32 xa_nn_elm_remainder_f32xf32_f32( + FLOAT32* __restrict__ p_out, + const FLOAT32* __restrict__ p_inp1, + const FLOAT32* __restrict__ p_inp2, + WORD32 num_elm); + +extern "C" WORD32 xa_nn_elm_remainder_broadcast_4D_f32xf32_f32( + FLOAT32* __restrict__ p_out, + const WORD32* const p_out_shape, + const FLOAT32* __restrict__ p_inp1, + const WORD32* const p_inp1_shape, + const FLOAT32* __restrict__ p_inp2, + const WORD32* const p_inp2_shape); + extern "C" WORD32 xa_nn_elm_where_f32xf32_f32( FLOAT32* __restrict__ p_out, const FLOAT32* __restrict__ p_inp1, @@ -125,11 +173,22 @@ extern "C" WORD32 xa_nn_reduce_mean_4D_f32_f32( WORD32 num_axis_dims, void* __restrict__ p_scratch_in); +extern "C" WORD32 xa_nn_transpose_32_32( + WORD32* __restrict__ p_out, + const WORD32* const p_out_shape, + const WORD32* __restrict__ p_inp, + const WORD32* const p_inp_shape, + const WORD32* __restrict__ p_permute_vec, + WORD32 num_out_dims, + WORD32 num_inp_dims); + namespace cadence { namespace impl { namespace HiFi { namespace kernels { +void* allocate_temp_memory(KernelRuntimeContext& ctx, size_t size); + void memcpy(void* dst, const void* src, size_t num_bytes); WORD32 matmul_asym8uxasym8u_asym8u( diff --git a/backends/cadence/hifi/operators/CMakeLists.txt b/backends/cadence/hifi/operators/CMakeLists.txt index 5e51f7fd3b..ab5a04897e 100644 --- a/backends/cadence/hifi/operators/CMakeLists.txt +++ b/backends/cadence/hifi/operators/CMakeLists.txt @@ -21,28 +21,34 @@ endif() # ATen compliant ops that are needed to run this model. set(_aten_ops__srcs "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_add.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_atan2.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_cat.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_clamp.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_div.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_full.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_maximum.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mean.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_minimum.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mul.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_permute_copy.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_pow.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_remainder.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_rsqrt.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_softmax.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sigmoid.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sub.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_tanh.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_where.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_bmm.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_cat.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_clone.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_embedding.cpp" + "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_gt.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_full.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_gelu.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_hardtanh.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_max_pool2d_with_indices.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_permute_copy.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_slice_copy.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_softmax.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_split_with_sizes_copy.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_to_copy.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_view_copy.cpp" @@ -74,7 +80,7 @@ target_include_directories( # Custom ops that are needed to run the test model. add_library( custom_ops "quantized_linear_out.cpp" "quantized_layer_norm.cpp" - "quantize_per_tensor.cpp" "dequantize_per_tensor.cpp" + "quantize_per_tensor.cpp" "quantized_relu_out.cpp" "dequantize_per_tensor.cpp" ) target_include_directories( custom_ops PUBLIC ${ROOT_DIR}/.. ${CMAKE_BINARY_DIR} diff --git a/backends/cadence/hifi/operators/op_atan2.cpp b/backends/cadence/hifi/operators/op_atan2.cpp new file mode 100644 index 0000000000..db2fc23be1 --- /dev/null +++ b/backends/cadence/hifi/operators/op_atan2.cpp @@ -0,0 +1,201 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include + +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::runtime::KernelRuntimeContext; +using executorch::runtime::tensors_have_same_dim_order; +using torch::executor::Error; +using torch::executor::resize_to_broadcast_target_size; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +Tensor& atan2_out( + KernelRuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + Tensor& out) { + // Determine output size and resize for dynamic shapes + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, b, out), InvalidArgument, out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType out_type = out.scalar_type(); + + constexpr auto name = "atan2.out"; + constexpr int kNnlibMaxDim = 16; + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); + bool optimized = true; + + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool broadcast = (a_is_broadcasted && b_is_broadcasted); + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + if (out_type != ScalarType::Float) + optimized = false; + + if (max_dim > kNnlibMaxDim) + optimized = false; + + WORD32 num_elm = out.numel(); + + if (optimized) { + if (broadcast) { + WORD32* __restrict__ ptr1 = + (WORD32* __restrict__)kernels::allocate_temp_memory( + ctx, num_elm * sizeof(WORD32)); + WORD32* __restrict__ ptr2 = + (WORD32* __restrict__)kernels::allocate_temp_memory( + ctx, num_elm * sizeof(WORD32)); + + WORD32* __restrict__ pin1 = + (WORD32* __restrict__)a.const_data_ptr(); + WORD32* __restrict__ pin2 = + (WORD32* __restrict__)b.const_data_ptr(); + + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_inp1_shape[kNnlibMaxDim]; + WORD32 p_inp2_shape[kNnlibMaxDim]; + + for (int i = 0; i < out_dim; i++) + p_out_shape[i] = out.size(i); + for (int i = 0; i < a_dim; i++) + p_inp1_shape[i] = a.size(i); + for (int i = 0; i < b_dim; i++) + p_inp2_shape[i] = b.size(i); + + WORD32 ret_val = + xa_nn_broadcast_32_32(ptr1, p_out_shape, pin1, p_inp1_shape, out_dim); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + + ret_val = + xa_nn_broadcast_32_32(ptr2, p_out_shape, pin2, p_inp2_shape, out_dim); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + + FLOAT32* __restrict__ p_out = + (FLOAT32* __restrict__)out.mutable_data_ptr(); + const FLOAT32* __restrict__ p_inp1 = (const FLOAT32* __restrict__)ptr1; + const FLOAT32* __restrict__ p_inp2 = (const FLOAT32* __restrict__)ptr2; + + xa_nn_elm_atan2_f32(p_out, p_inp1, p_inp2, num_elm); + + free(ptr1); + free(ptr2); + } else if (a_is_broadcasted && (!b_is_broadcasted)) { + FLOAT32* __restrict__ ptr1 = + (FLOAT32* __restrict__)kernels::allocate_temp_memory( + ctx, num_elm * sizeof(WORD32)); + + FLOAT32* __restrict__ pin1 = + (FLOAT32* __restrict__)a.const_data_ptr(); + + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_inp1_shape[kNnlibMaxDim]; + + for (int i = 0; i < out_dim; i++) + p_out_shape[i] = out.size(i); + for (int i = 0; i < a_dim; i++) + p_inp1_shape[i] = a.size(i); + + WORD32 ret_val = xa_nn_broadcast_32_32( + (WORD32*)ptr1, p_out_shape, (WORD32*)pin1, p_inp1_shape, out_dim); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + + FLOAT32* __restrict__ p_out = + (FLOAT32* __restrict__)out.mutable_data_ptr(); + const FLOAT32* __restrict__ p_inp1 = (const FLOAT32* __restrict__)ptr1; + const FLOAT32* __restrict__ p_inp2 = + (const FLOAT32* __restrict__)b.const_data_ptr(); + + xa_nn_elm_atan2_f32(p_out, p_inp1, p_inp2, num_elm); + + free(ptr1); + } else if (b_is_broadcasted && (!a_is_broadcasted)) { + WORD32* __restrict__ ptr1 = + (WORD32* __restrict__)kernels::allocate_temp_memory( + ctx, num_elm * sizeof(WORD32)); + + WORD32* __restrict__ pin1 = + (WORD32* __restrict__)b.const_data_ptr(); + + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_inp1_shape[kNnlibMaxDim]; + + for (int i = 0; i < out_dim; i++) + p_out_shape[i] = out.size(i); + for (int i = 0; i < b_dim; i++) + p_inp1_shape[i] = b.size(i); + + xa_nn_broadcast_32_32(ptr1, p_out_shape, pin1, p_inp1_shape, out_dim); + + FLOAT32* __restrict__ p_out = + (FLOAT32* __restrict__)out.mutable_data_ptr(); + const FLOAT32* __restrict__ p_inp1 = + (const FLOAT32* __restrict__)a.const_data_ptr(); + const FLOAT32* __restrict__ p_inp2 = (const FLOAT32* __restrict__)ptr1; + + xa_nn_elm_atan2_f32(p_out, p_inp1, p_inp2, num_elm); + + free(ptr1); + } else { + FLOAT32* __restrict__ p_out = + (FLOAT32* __restrict__)out.mutable_data_ptr(); + const FLOAT32* __restrict__ p_inp1 = + (const FLOAT32* __restrict__)a.const_data_ptr(); + const FLOAT32* __restrict__ p_inp2 = + (const FLOAT32* __restrict__)b.const_data_ptr(); + + xa_nn_elm_atan2_f32(p_out, p_inp1, p_inp2, num_elm); + } + return out; + } + + ET_SWITCH_REALHB_TYPES(a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_REALHB_TYPES(b_type, ctx, name, CTYPE_B, [&]() { + ET_SWITCH_FLOATH_TYPES(out_type, ctx, name, CTYPE_OUT, [&]() { + torch::executor:: + apply_binary_elementwise_fn( + [](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_OUT casted_a = static_cast(val_a); + CTYPE_OUT casted_b = static_cast(val_b); + return static_cast(std::atan2(casted_a, casted_b)); + }, + a, + b, + out); + }); + }); + }); + + return out; +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_cat.cpp b/backends/cadence/hifi/operators/op_cat.cpp new file mode 100644 index 0000000000..1a62892445 --- /dev/null +++ b/backends/cadence/hifi/operators/op_cat.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include + +#include + +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::aten::RuntimeContext; +using executorch::runtime::getLeadingDims; +using executorch::runtime::getTrailingDims; +using executorch::runtime::resize_tensor; +using executorch::runtime::tensors_have_same_dim_order; +using torch::executor::check_cat_args; +using torch::executor::Error; +using torch::executor::get_cat_out_target_size; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +Tensor& cat_out( + RuntimeContext& ctx, + exec_aten::ArrayRef tensors, + int64_t dim, + Tensor& out) { + constexpr auto name = "cat.out"; + constexpr int kNnlibMaxDim = 16; + + bool optimized = true; + + if (out.scalar_type() != ScalarType::Float) + optimized = false; + + if (optimized) { + WORD32 num_inp = tensors.size(); + WORD32 num_inp_dims = out.dim(); + WORD32 num_out_dims = num_inp_dims; + WORD32 axis = dim; + + WORD32 inp_shape[kNnlibMaxDim][kNnlibMaxDim]; + WORD32 p_out_shape[kNnlibMaxDim]; + + WORD32* ptr_shape[kNnlibMaxDim]; + const WORD32* ptr[kNnlibMaxDim]; + + int k = 0; + for (int i = 0; i < num_inp; i++) { + if (tensors[i].numel() == 0) + continue; + ptr[k] = (const WORD32*)tensors[i].const_data_ptr(); + for (int j = 0; j < num_inp_dims; j++) { + inp_shape[k][j] = tensors[i].size(j); + } + ptr_shape[k] = inp_shape[k]; + k++; + } + + num_inp = k; + + for (int i = 0; i < num_out_dims; i++) { + p_out_shape[i] = out.size(i); + } + + const WORD32** pp_inps = &ptr[0]; + + WORD32* p_out = (WORD32*)out.mutable_data_ptr(); + + const WORD32* const* pp_inps_shape = (const WORD32* const*)&ptr_shape[0]; + + WORD32 ret_val = xa_nn_concat_32_32( + p_out, + p_out_shape, + pp_inps, + pp_inps_shape, + num_out_dims, + num_inp, + num_inp_dims, + axis); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + + return out; + } + + if (dim < 0) { + dim += out.dim(); + } + + ET_KERNEL_CHECK(ctx, check_cat_args(tensors, dim, out), Internal, out); + + Tensor::SizesType + expected_out_size[executorch::runtime::kTensorDimensionLimit]; + size_t expected_out_dim = 0; + get_cat_out_target_size(tensors, dim, expected_out_size, &expected_out_dim); + + ET_KERNEL_CHECK( + ctx, + resize_tensor(out, {expected_out_size, expected_out_dim}) == Error::Ok, + InvalidArgument, + out); + + // Special handling when all inputs are 1D-empty tensors for aten consistency + // In that case, just return an 1D-empty tensor without checking dim + bool all_1d_empty = true; + for (size_t i = 0; i < tensors.size(); ++i) { + if (tensors[i].numel() != 0 || tensors[i].dim() != 1) { + all_1d_empty = false; + break; + } + } + if (all_1d_empty) { + return out; + } + + const size_t outer = getLeadingDims(out, dim); + const size_t dim_stride = getTrailingDims(out, dim); + const size_t ninputs = tensors.size(); + + const auto out_type = out.scalar_type(); + ET_SWITCH_REALHB_TYPES(out_type, ctx, name, CTYPE_OUT, [&] { + CTYPE_OUT* out_ptr = out.mutable_data_ptr(); + for (size_t i = 0; i < outer; ++i) { + for (size_t j = 0; j < ninputs; ++j) { + const auto in_type = tensors[j].scalar_type(); + ET_SWITCH_REALHB_TYPES(in_type, ctx, name, CTYPE_IN, [&] { + if (tensors[j].numel() == 0) { + return; + } + size_t inner = tensors[j].size(dim) * dim_stride; + const CTYPE_IN* const in_ptr = + tensors[j].const_data_ptr() + i * inner; + + for (size_t k = 0; k < inner; ++k) { + out_ptr[k] = static_cast(in_ptr[k]); + } + out_ptr += inner; + }); + } + } + }); + + return out; +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_clamp.cpp b/backends/cadence/hifi/operators/op_clamp.cpp new file mode 100644 index 0000000000..290c4d087d --- /dev/null +++ b/backends/cadence/hifi/operators/op_clamp.cpp @@ -0,0 +1,445 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +using Scalar = exec_aten::Scalar; +using ScalarType = exec_aten::ScalarType; +using Tensor = exec_aten::Tensor; +using executorch::aten::RuntimeContext; +using executorch::runtime::canCast; +using executorch::runtime::isFloatingType; +using executorch::runtime::isIntegralType; +using executorch::runtime::promoteTypes; +using torch::executor::apply_ternary_elementwise_fn; +using torch::executor::Error; +using torch::executor::resize_to_broadcast_target_size; +using torch::executor::native::utils::extract_scalar; +using torch::executor::native::utils::get_scalar_dtype; +using torch::executor::native::utils::max_override; +using torch::executor::native::utils::min_override; +using torch::executor::native::utils::promote_type_with_scalar; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +namespace { + +template +/** Check if val, when cast to CTYPE_CAST, is not in the range of CTYPE_OUT */ +bool is_out_of_bounds(CTYPE_VAL val) { + const CTYPE_CAST val_cast = static_cast(val); + return val_cast < std::numeric_limits::lowest() || + val_cast > std::numeric_limits::max(); +} + +__ET_NODISCARD bool check_bounds( + const Scalar& val_scalar, + const ScalarType& val_type, + const ScalarType& out_type, + const char* val_name) { + auto is_valid = true; + + ET_SWITCH_SCALAR_OBJ_TYPES(val_type, ctx, "clamp.out", CTYPE_VAL, [&]() { + CTYPE_VAL val = 0; + extract_scalar(val_scalar, &val); + if (isIntegralType(out_type, /*includeBool=*/false)) { + ET_SWITCH_INT_TYPES(out_type, ctx, "clamp.out", CTYPE_OUT, [&]() { + if (is_out_of_bounds(val)) { + ET_LOG(Error, "%s value out of bounds", val_name); + is_valid = false; + } + }); + } else if (isFloatingType(out_type)) { + ET_SWITCH_FLOATH_TYPES(out_type, ctx, "clamp", CTYPE_OUT, [&]() { + if (std::isfinite(val) && + is_out_of_bounds(val)) { + ET_LOG(Error, "%s value out of bounds", val_name); + is_valid = false; + } + }); + } + }); + + return is_valid; +} + +} // namespace + +Tensor& clamp_out( + RuntimeContext& ctx, + const Tensor& in, + const exec_aten::optional& min_opt, + const exec_aten::optional& max_opt, + Tensor& out) { + (void)ctx; + + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, in.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + ScalarType in_type = in.scalar_type(); + ScalarType min_type = in_type; + ScalarType max_type = in_type; + ScalarType common_type = in_type; + ScalarType out_type = out.scalar_type(); + + bool has_min = min_opt.has_value(); + if (has_min) { + min_type = get_scalar_dtype(min_opt.value()); + common_type = promote_type_with_scalar(common_type, min_opt.value()); + ET_KERNEL_CHECK( + ctx, + check_bounds(min_opt.value(), min_type, out_type, "minimum"), + InvalidArgument, + out); + } + bool has_max = max_opt.has_value(); + if (has_max) { + max_type = get_scalar_dtype(max_opt.value()); + common_type = promote_type_with_scalar(common_type, max_opt.value()); + ET_KERNEL_CHECK( + ctx, + check_bounds(max_opt.value(), max_type, out_type, "maximum"), + InvalidArgument, + out); + } + + ET_KERNEL_CHECK_MSG( + ctx, + has_min || has_max, + InvalidArgument, + out, + "At least one of 'min' or 'max' must not be None"); + + ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); + + ET_SWITCH_REALH_TYPES(out_type, ctx, "clamp", CTYPE_OUT, [&]() { + // Extract optional min value + CTYPE_OUT min = 0; + if (has_min) { + ET_SWITCH_SCALAR_OBJ_TYPES(min_type, ctx, "clamp", CTYPE_MIN, [&]() { + CTYPE_MIN min_val = 0; + extract_scalar(min_opt.value(), &min_val); + min = static_cast(min_val); + }); + } + + // Extract optional max value + CTYPE_OUT max = 0; + if (has_max) { + ET_SWITCH_SCALAR_OBJ_TYPES(max_type, ctx, "clamp", CTYPE_MAX, [&]() { + CTYPE_MAX max_val = 0; + extract_scalar(max_opt.value(), &max_val); + max = static_cast(max_val); + }); + } + + ET_SWITCH_REALHB_TYPES(in_type, ctx, "clamp", CTYPE_IN, [&]() { + torch::executor::apply_unary_map_fn( + [has_min, min, has_max, max](const CTYPE_IN val_in) { + CTYPE_OUT val_out = static_cast(val_in); + if (has_min) { + val_out = max_override(val_out, min); + } + if (has_max) { + val_out = min_override(val_out, max); + } + return val_out; + }, + in.const_data_ptr(), + out.mutable_data_ptr(), + in.numel()); + }); + }); + + return out; +} + +Tensor& clamp_tensor_out( + RuntimeContext& ctx, + const Tensor& in, + const exec_aten::optional& min_opt, + const exec_aten::optional& max_opt, + Tensor& out) { + (void)ctx; + + bool has_min = min_opt.has_value(); + bool has_max = max_opt.has_value(); + + ET_KERNEL_CHECK_MSG( + ctx, + has_min || has_max, + InvalidArgument, + out, + "At least one of 'min' or 'max' must not be None"); + + const Tensor& min = has_min ? min_opt.value() : in; + const Tensor& max = has_max ? max_opt.value() : in; + + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(in, min, max, out) == Error::Ok, + InvalidArgument, + out); + + constexpr int kNnlibMaxDim = + 4; /*fallback to not optimised if broadcast and dim > 4 */ + + ScalarType in_type = in.scalar_type(); + ScalarType min_type = min.scalar_type(); + ScalarType max_type = max.scalar_type(); + ScalarType common_type = in_type; + ScalarType out_type = out.scalar_type(); + + if (has_min) { + common_type = promoteTypes(common_type, min_type, /*half_to_float*/ true); + } + if (has_max) { + common_type = promoteTypes(common_type, max_type, /*half_to_float*/ true); + } + + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + + bool in_is_broadcasted = !out.sizes().equals(in.sizes()); + bool min_is_broadcasted = !out.sizes().equals(min.sizes()); + bool max_is_broadcasted = !out.sizes().equals(max.sizes()); + bool broadcast = + (in_is_broadcasted || min_is_broadcasted || max_is_broadcasted); + + int max_dim = in.dim() > min.dim() ? in.dim() : min.dim(); + max_dim = max.dim() > max_dim ? max.dim() : max_dim; + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + bool optimized = true; + bool fall_back = false; + if ((in_type != ScalarType::Float) || (min_type != ScalarType::Float) || + (max_type != ScalarType::Float)) + optimized = false; + if ((broadcast == true) && (max_dim > kNnlibMaxDim)) + optimized = false; + + if (optimized) { + if (!has_min) { + const float* const max_data = max.const_data_ptr(); + const float* const inp_data = in.const_data_ptr(); + float* const out_data = out.mutable_data_ptr(); + if (broadcast) { + int out_shape[kNnlibMaxDim]; + int inp_shape[kNnlibMaxDim]; + int max_shape[kNnlibMaxDim]; + + for (int i = 0; i < kNnlibMaxDim; i++) { + out_shape[i] = 1; + inp_shape[i] = 1; + max_shape[i] = 1; + } + + int max_dim = max.dim(), inp_dim = in.dim(), out_dim = out.dim(); + int off_o = kNnlibMaxDim - out_dim; + int off_max = kNnlibMaxDim - max_dim; + int off_inp = kNnlibMaxDim - inp_dim; + for (int i = 0; i < out_dim; i++) { + out_shape[i + off_o] = out.size(i); + } + for (int i = 0; i < max_dim; i++) { + max_shape[i + off_max] = max.size(i); + } + for (int i = 0; i < inp_dim; i++) { + inp_shape[i + off_inp] = in.size(i); + } + + WORD32 ret_val = xa_nn_elm_minimum_broadcast_4D_f32xf32_f32( + out_data, out_shape, inp_data, inp_shape, max_data, max_shape); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + + } else { + WORD32 ret_val = xa_nn_elm_minimum_f32xf32_f32( + out_data, inp_data, max_data, out.numel()); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } + } else if (!has_max) { + const float* const min_data = min.const_data_ptr(); + const float* const inp_data = in.const_data_ptr(); + float* const out_data = out.mutable_data_ptr(); + if (broadcast == 1) { + int out_shape[kNnlibMaxDim]; + int inp_shape[kNnlibMaxDim]; + int min_shape[kNnlibMaxDim]; + + for (int i = 0; i < kNnlibMaxDim; i++) { + out_shape[i] = 1; + inp_shape[i] = 1; + min_shape[i] = 1; + } + + int min_dim = min.dim(), max_dim = max.dim(), inp_dim = in.dim(), + out_dim = out.dim(); + int off_o = kNnlibMaxDim - out_dim; + int off_min = kNnlibMaxDim - min_dim; + int off_inp = kNnlibMaxDim - inp_dim; + for (int i = 0; i < out_dim; i++) + out_shape[i + off_o] = out.size(i); + for (int i = 0; i < min_dim; i++) + min_shape[i + off_min] = min.size(i); + for (int i = 0; i < inp_dim; i++) + inp_shape[i + off_inp] = in.size(i); + WORD32 ret_val = xa_nn_elm_maximum_broadcast_4D_f32xf32_f32( + out_data, out_shape, inp_data, inp_shape, min_data, min_shape); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + + } else { + WORD32 ret_val = xa_nn_elm_maximum_f32xf32_f32( + out_data, inp_data, min_data, out.numel()); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } + } else { + const float* const min_data = min.const_data_ptr(); + const float* const max_data = max.const_data_ptr(); + const float* const inp_data = in.const_data_ptr(); + float* const out_data = out.mutable_data_ptr(); + if (broadcast == 1) { + int out_shape[kNnlibMaxDim]; + int inp_shape[kNnlibMaxDim]; + int min_shape[kNnlibMaxDim]; + int max_shape[kNnlibMaxDim]; + + for (int i = 0; i < kNnlibMaxDim; i++) { + out_shape[i] = 1; + inp_shape[i] = 1; + min_shape[i] = 1; + max_shape[i] = 1; + } + + int min_dim = min.dim(), max_dim = max.dim(), inp_dim = in.dim(), + out_dim = out.dim(); + int off_o = kNnlibMaxDim - out_dim; + int off_min = kNnlibMaxDim - min_dim; + int off_max = kNnlibMaxDim - max_dim; + int off_inp = kNnlibMaxDim - inp_dim; + for (int i = 0; i < out_dim; i++) + out_shape[i + off_o] = out.size(i); + for (int i = 0; i < min_dim; i++) + min_shape[i + off_min] = min.size(i); + + for (int i = 0; i < max_dim; i++) + max_shape[i + off_max] = max.size(i); + + for (int i = 0; i < inp_dim; i++) + inp_shape[i + off_inp] = in.size(i); + + if (inp_shape[0] != out_shape[0] || inp_shape[1] != out_shape[1] || + inp_shape[2] != out_shape[2] || inp_shape[3] != out_shape[3]) { + void* p_scratch = (void*)kernels::allocate_temp_memory( + ctx, + (out_shape[0] * out_shape[1] * out_shape[2] * out_shape[3]) * + sizeof(int)); + const FLOAT32* p_brd_cond = (const FLOAT32*)p_scratch; + xa_nn_broadcast_32_32( + (WORD32*)p_brd_cond, out_shape, (WORD32*)inp_data, inp_shape, 4); + + for (int i = 0; i < 4; i++) { + inp_shape[i] = out_shape[i]; + } + + WORD32 ret_val = xa_nn_elm_clamp_broadcast_4D_f32Xf32xf32_f32( + out_data, + out_shape, + p_brd_cond, + inp_shape, + min_data, + min_shape, + max_data, + max_shape); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + + free(p_scratch); + } else { + WORD32 ret_val = xa_nn_elm_clamp_broadcast_4D_f32Xf32xf32_f32( + out_data, + out_shape, + inp_data, + inp_shape, + min_data, + min_shape, + max_data, + max_shape); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } + } else { + WORD32 ret_val = xa_nn_elm_clamp_f32xf32xf32_f32( + out_data, inp_data, min_data, max_data, out.numel()); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } + } + return out; + } + + constexpr auto name = "clamp.Tensor_out"; + + ET_SWITCH_REALHB_TYPES(in_type, ctx, name, CTYPE_IN, [&]() { + ET_SWITCH_REALHB_TYPES(min_type, ctx, name, CTYPE_MIN, [&]() { + ET_SWITCH_REALHB_TYPES(max_type, ctx, name, CTYPE_MAX, [&]() { + ET_SWITCH_REALHB_TYPES(out_type, ctx, name, CTYPE_OUT, [&]() { + apply_ternary_elementwise_fn< + CTYPE_IN, + CTYPE_MIN, + CTYPE_MAX, + CTYPE_OUT>( + [has_min, has_max]( + const CTYPE_IN val_in, + const CTYPE_MIN val_min, + const CTYPE_MAX val_max) { + CTYPE_OUT val_out = static_cast(val_in); + if (has_min) { + val_out = + max_override(val_out, static_cast(val_min)); + } + if (has_max) { + val_out = + min_override(val_out, static_cast(val_max)); + } + return val_out; + }, + in, + min, + max, + out); + }); + }); + }); + }); + return out; +} +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence diff --git a/backends/cadence/hifi/operators/op_full.cpp b/backends/cadence/hifi/operators/op_full.cpp new file mode 100644 index 0000000000..47804a64f4 --- /dev/null +++ b/backends/cadence/hifi/operators/op_full.cpp @@ -0,0 +1,100 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +using exec_aten::IntArrayRef; +using exec_aten::RuntimeContext; +using exec_aten::Scalar; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using torch::executor::Error; +using torch::executor::native::utils::extract_scalar; +using torch::executor::native::utils::get_scalar_dtype; + +Tensor& full_out( + RuntimeContext& ctx, + const IntArrayRef sizes, + const Scalar& fill_value, + Tensor& out) { + (void)ctx; + + ScalarType val_type = get_scalar_dtype(fill_value); + ScalarType out_type = out.scalar_type(); + + // Resize for dynamic shape + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, sizes) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + constexpr auto name = "full.out"; + + bool optimized = false; + if (out_type == ScalarType::Long || out_type == ScalarType::Float || + out_type == ScalarType::Byte || out_type == ScalarType::Char) + optimized = true; + + if (out_type != val_type) + optimized = false; + + if (optimized) { + if (out_type == ScalarType::Long) { + int* data_out = out.mutable_data_ptr(); + int val; + extract_scalar(fill_value, &val); + for (size_t i = 0; i < out.numel(); ++i) { + data_out[i] = val; + } + } else if (out_type == ScalarType::Float) { + float* data_out = out.mutable_data_ptr(); + float val; + extract_scalar(fill_value, &val); + + WORD32 ret_val = xa_nn_memset_f32_f32(data_out, val, out.numel()); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + + } else if (out_type == ScalarType::Byte || out_type == ScalarType::Char) { + char* data_out = out.mutable_data_ptr(); + int val; + extract_scalar(fill_value, &val); + memset((void*)data_out, val, out.numel()); + } + return out; + } + + ET_SWITCH_SCALAR_OBJ_TYPES(val_type, ctx, name, CTYPE_VAL, [&] { + CTYPE_VAL val; + extract_scalar(fill_value, &val); + + ET_SWITCH_REAL_TYPES_AND(Bool, out_type, ctx, name, CTYPE_OUT, [&] { + CTYPE_OUT val_casted = static_cast(val); + auto data_out = out.mutable_data_ptr(); + for (size_t i = 0; i < out.numel(); ++i) { + data_out[i] = val_casted; + } + }); + }); + return out; +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_maximum.cpp b/backends/cadence/hifi/operators/op_maximum.cpp index f9a3658891..f85d3470e9 100644 --- a/backends/cadence/hifi/operators/op_maximum.cpp +++ b/backends/cadence/hifi/operators/op_maximum.cpp @@ -23,7 +23,6 @@ using torch::executor::apply_binary_elementwise_fn; using torch::executor::Error; using torch::executor::resize_to_broadcast_target_size; - namespace cadence { namespace impl { namespace HiFi { diff --git a/backends/cadence/hifi/operators/op_mean.cpp b/backends/cadence/hifi/operators/op_mean.cpp index 478e10da71..cdc844ec5c 100644 --- a/backends/cadence/hifi/operators/op_mean.cpp +++ b/backends/cadence/hifi/operators/op_mean.cpp @@ -125,7 +125,9 @@ Tensor& mean_dim_out( int scratch_size = xa_nn_reduce_getsize_nhwc( -3, inp_shape, num_inp_dims, p_axis, num_axis_dims, 1); - void* __restrict__ p_scratch_in = (void* __restrict__)malloc(scratch_size); + void* __restrict__ p_scratch_in = + (void* __restrict__)kernels::allocate_temp_memory( + ctx, scratch_size * sizeof(int)); xa_nn_reduce_mean_4D_f32_f32( p_out, diff --git a/backends/cadence/hifi/operators/op_permute_copy.cpp b/backends/cadence/hifi/operators/op_permute_copy.cpp new file mode 100644 index 0000000000..bb72eaf521 --- /dev/null +++ b/backends/cadence/hifi/operators/op_permute_copy.cpp @@ -0,0 +1,198 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include + +using exec_aten::ScalarType; +using exec_aten::SizesType; +using exec_aten::Tensor; +using executorch::runtime::IntArrayRef; +using executorch::runtime::KernelRuntimeContext; +using executorch::runtime::kTensorDimensionLimit; +using executorch::runtime::resize_tensor; +using executorch::runtime::tensors_have_same_dim_order; +using torch::executor::check_permute_copy_args; +using torch::executor::Error; +using torch::executor::get_permute_copy_out_target_size; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +namespace { + +void increment_coordinate_permuted( + const Tensor& tensor, + size_t* const coordinate, + IntArrayRef dims) { + for (int i = dims.size() - 1; i >= 0; i--) { + size_t d = dims[i] >= 0 ? dims[i] : dims[i] + tensor.dim(); + coordinate[d]++; + if (coordinate[d] == tensor.size(d)) { + coordinate[d] = 0; + } else { + return; + } + } +} + +} // namespace + +Tensor& permute_copy_out( + KernelRuntimeContext& ctx, + const Tensor& in, + IntArrayRef dims, + Tensor& out) { + (void)ctx; + + ET_KERNEL_CHECK( + ctx, check_permute_copy_args(in, dims, out), InvalidArgument, out); + + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out); + + Tensor::SizesType expected_out_size[kTensorDimensionLimit]; + size_t expected_out_dim = 0; + get_permute_copy_out_target_size( + in, dims, expected_out_size, &expected_out_dim); + ET_KERNEL_CHECK( + ctx, + resize_tensor(out, {expected_out_size, expected_out_dim}) == Error::Ok, + InvalidArgument, + out); + + const auto in_type = out.scalar_type(); + + constexpr auto name = "permute_copy.out"; + constexpr int kNnlibMaxDim = 16; + + bool optimized = false; + + if (out.scalar_type() == ScalarType::Float || + out.scalar_type() == ScalarType::Char || + out.scalar_type() == ScalarType::Byte) + optimized = true; + + if (in.dim() > kNnlibMaxDim) + optimized = false; + + if (optimized) { + if (in_type == ScalarType::Float) { + WORD32* p_inp = (WORD32*)in.const_data_ptr(); + WORD32* p_out = (WORD32*)out.mutable_data_ptr(); + + WORD32 num_inp_dims = in.dim(); + WORD32 num_out_dims = num_inp_dims; + + WORD32 p_inp_shape[kNnlibMaxDim]; + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_permute_vec[kNnlibMaxDim]; + + for (int i = 0; i < num_inp_dims; i++) { + p_inp_shape[i] = in.size(i); + p_out_shape[i] = in.size(dims[i]); + p_permute_vec[i] = dims[i]; + } + + WORD32 ret_val = xa_nn_transpose_32_32( + p_out, + p_out_shape, + p_inp, + p_inp_shape, + p_permute_vec, + num_out_dims, + num_inp_dims); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + + } else if (in_type == ScalarType::Char) { + WORD8* p_inp = (WORD8*)in.const_data_ptr(); + WORD8* p_out = (WORD8*)out.mutable_data_ptr(); + + WORD32 num_inp_dims = in.dim(); + WORD32 num_out_dims = num_inp_dims; + + WORD32 p_inp_shape[kNnlibMaxDim]; + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_permute_vec[kNnlibMaxDim]; + + for (int i = 0; i < num_inp_dims; i++) { + p_inp_shape[i] = in.size(i); + p_out_shape[i] = in.size(dims[i]); + p_permute_vec[i] = dims[i]; + } + + WORD32 val = xa_nn_transpose_8_8( + p_out, + p_out_shape, + p_inp, + p_inp_shape, + p_permute_vec, + num_out_dims, + num_inp_dims); + + ET_KERNEL_CHECK(ctx, val == 0, Internal, out); + + } else if (in_type == ScalarType::Byte) { + WORD8* p_inp = (WORD8*)in.const_data_ptr(); + WORD8* p_out = (WORD8*)out.mutable_data_ptr(); + + WORD32 num_inp_dims = in.dim(); + WORD32 num_out_dims = num_inp_dims; + + WORD32 p_inp_shape[kNnlibMaxDim]; + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_permute_vec[kNnlibMaxDim]; + + for (int i = 0; i < num_inp_dims; i++) { + p_inp_shape[i] = in.size(i); + p_out_shape[i] = in.size(dims[i]); + p_permute_vec[i] = dims[i]; + } + + WORD32 val = xa_nn_transpose_8_8( + p_out, + p_out_shape, + p_inp, + p_inp_shape, + p_permute_vec, + num_out_dims, + num_inp_dims); + + ET_KERNEL_CHECK(ctx, val == 0, Internal, out); + } + return out; + } + + size_t in_coord[kTensorDimensionLimit] = {0}; + size_t trailing_dims_memo[kTensorDimensionLimit]; + executorch::runtime::memoizeTrailingDims(in, trailing_dims_memo); + + // in and out must be the same dtype + ET_SWITCH_ALL_TYPES(in_type, ctx, name, CTYPE, [&] { + const CTYPE* const in_data = in.const_data_ptr(); + CTYPE* const out_data = out.mutable_data_ptr(); + + for (size_t i = 0; i < out.numel(); ++i) { + out_data[i] = + in_data[executorch::runtime::coordinateToIndexWithTrailingDimsMemo( + in, in_coord, trailing_dims_memo)]; + increment_coordinate_permuted(in, in_coord, dims); + } + }); + + return out; +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_pow.cpp b/backends/cadence/hifi/operators/op_pow.cpp index 9669e96123..74c24afbc0 100644 --- a/backends/cadence/hifi/operators/op_pow.cpp +++ b/backends/cadence/hifi/operators/op_pow.cpp @@ -120,9 +120,11 @@ Tensor& pow_Tensor_Tensor_out( if (optimized) { if (broadcast) { WORD32* __restrict__ ptr1 = - (WORD32* __restrict__)malloc(num_elm * sizeof(WORD32)); + (WORD32* __restrict__)kernels::allocate_temp_memory( + ctx, num_elm * sizeof(int)); WORD32* __restrict__ ptr2 = - (WORD32* __restrict__)malloc(num_elm * sizeof(WORD32)); + (WORD32* __restrict__)kernels::allocate_temp_memory( + ctx, num_elm * sizeof(int)); WORD32* __restrict__ pin1 = (WORD32* __restrict__)a.const_data_ptr(); @@ -155,7 +157,8 @@ Tensor& pow_Tensor_Tensor_out( free(ptr2); } else if (a_is_broadcasted && (!b_is_broadcasted)) { FLOAT32* __restrict__ ptr1 = - (FLOAT32* __restrict__)malloc((num_elm + 2) * sizeof(WORD32)); + (FLOAT32* __restrict__)kernels::allocate_temp_memory( + ctx, num_elm * sizeof(int)); FLOAT32* __restrict__ pin1 = (FLOAT32* __restrict__)a.const_data_ptr(); @@ -182,7 +185,8 @@ Tensor& pow_Tensor_Tensor_out( free(ptr1); } else if (b_is_broadcasted && (!a_is_broadcasted)) { WORD32* __restrict__ ptr1 = - (WORD32* __restrict__)malloc(num_elm * sizeof(WORD32)); + (WORD32* __restrict__)kernels::allocate_temp_memory( + ctx, num_elm * sizeof(int)); WORD32* __restrict__ pin1 = (WORD32* __restrict__)b.const_data_ptr(); @@ -351,4 +355,3 @@ Tensor& pow_Scalar_out( } // namespace HiFi } // namespace impl } // namespace cadence - diff --git a/backends/cadence/hifi/operators/op_remainder.cpp b/backends/cadence/hifi/operators/op_remainder.cpp new file mode 100644 index 0000000000..7fba5a5385 --- /dev/null +++ b/backends/cadence/hifi/operators/op_remainder.cpp @@ -0,0 +1,258 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include + +#include +#include +#include +#include +#include + +#include "kernels.h" + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +using exec_aten::Scalar; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::aten::RuntimeContext; +using executorch::runtime::canCast; +using executorch::runtime::promoteTypes; +using torch::executor::apply_binary_elementwise_fn; +using torch::executor::apply_unary_map_fn; +using torch::executor::Error; +using torch::executor::native::utils::extract_scalar; +using torch::executor::native::utils::get_scalar_dtype; +using torch::executor::native::utils::promote_type_with_scalar; +using torch::executor::native::utils::remainder_override; +using torch::executor::resize_to_broadcast_target_size; +using executorch::runtime::can_cast; +using executorch::runtime::CppTypeToScalarType; + +namespace { +template < + bool can_cast, + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct RemainderInner; + +template < + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct RemainderInner { + static void run(const Tensor& a, const Tensor& b, Tensor& out) { + apply_binary_elementwise_fn( + // NOLINTNEXTLINE(facebook-hte-ConstantArgumentPassByValue) + [](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + CTYPE_IN value = remainder_override(a_casted, b_casted); + + return static_cast(value); + }, + a, + b, + out); + } +}; + +struct ReportCanCastBug { + static void run(const Tensor&, const Tensor&, Tensor&) { + ET_DCHECK_MSG(false, "BUG: canCast should have been checked above"); + } +}; + +template < + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct RemainderInner + : public ReportCanCastBug {}; + +} // namespace +Tensor& remainder_Tensor_out( + RuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + Tensor& out) { + (void)ctx; + + constexpr int kNnlibMaxDim = + 4; /*fallback to not optimised if broadcast and dim > 4 */ + + bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + bool broadcast = (a_is_broadcasted || b_is_broadcasted); + + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + bool optimized = true; + + if((a.scalar_type() != ScalarType::Float)||(b.scalar_type() != ScalarType::Float)) + optimized = false; + + if ((broadcast == true) && (max_dim > kNnlibMaxDim)) + optimized = false; + + if(optimized) + { + FLOAT32 * __restrict__ p_out = (FLOAT32 * __restrict__ )out.mutable_data_ptr(); + const FLOAT32 * __restrict__ p_inp1 = (const FLOAT32 * __restrict__)a.const_data_ptr(); + const FLOAT32 * __restrict__ p_inp2 = (const FLOAT32 * __restrict__)b.const_data_ptr(); + + if(broadcast) + { + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_inp1_shape[kNnlibMaxDim]; + WORD32 p_inp2_shape[kNnlibMaxDim]; + + for(int i = 0; i < kNnlibMaxDim; i++) + { + p_inp1_shape[i] = 1; + p_inp2_shape[i] = 1; + p_out_shape[i] = 1; + } + + int off_o = kNnlibMaxDim - out.dim(); + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); + + for(int i = 0; i < out.dim(); i++) + p_out_shape[i+off_o] = out.size(i); + for(int i = 0; i < a.dim(); i++) + p_inp1_shape[i+off_a] = a.size(i); + for(int i = 0; i < b.dim(); i++) + p_inp2_shape[i+off_b] = b.size(i); + + WORD32 ret_val = xa_nn_elm_remainder_broadcast_4D_f32xf32_f32(p_out, + p_out_shape, + p_inp1, + p_inp1_shape, + p_inp2, + p_inp2_shape); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } + else{ + WORD32 ret_val = xa_nn_elm_remainder_f32xf32_f32(p_out, + p_inp1, + p_inp2, + out.numel()); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } + return out; + } + // Determine output size and resize for dynamic shapes + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType common_type = promoteTypes(a_type, b_type); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + + ET_SWITCH_REAL_TYPES_AND( + Bool, a_type, ctx, "remainder.Tensor_out", CTYPE_A, [&]() { + ET_SWITCH_REAL_TYPES_AND( + Bool, b_type, ctx, "remainder.Tensor_out", CTYPE_B, [&]() { + using CTYPE_IN = typename torch::executor:: + promote_types::type; + ET_DCHECK(CppTypeToScalarType::value == common_type); + ET_SWITCH_REAL_TYPES( + out_type, ctx, "remainder.Tensor_out", CTYPE_OUT, [&]() { + RemainderInner< + can_cast::value, + CTYPE_A, + CTYPE_B, + CTYPE_IN, + CTYPE_OUT>::run(a, b, out); + }); + }); + }); + + return out; +} + +Tensor& remainder_Scalar_out( + RuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + Tensor& out) { + (void)ctx; + + // Resize for dynamic shape + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, a.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = get_scalar_dtype(b); + ScalarType common_type = promote_type_with_scalar(a_type, b); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + + ET_SWITCH_REAL_TYPES_AND( + Bool, a_type, ctx, "remainder.Scalar_out", CTYPE_A, [&]() { + ET_SWITCH_SCALAR_OBJ_TYPES( + b_type, ctx, "remainder.Scalar_out", CTYPE_B, [&]() { + CTYPE_B val_b = 0; + extract_scalar(b, &val_b); + ET_SWITCH_REAL_TYPES( + common_type, ctx, "remainder.Scalar_out", CTYPE_IN, [&]() { + ET_SWITCH_REAL_TYPES( + out_type, + ctx, + "remainder.Scalar_out", + CTYPE_OUT, + [&]() { + apply_unary_map_fn( + [val_b](const CTYPE_A val_a) { + CTYPE_IN a_casted = + static_cast(val_a); + CTYPE_IN b_casted = + static_cast(val_b); + CTYPE_IN value = remainder_override( + a_casted, b_casted); + + return static_cast(value); + }, + a.const_data_ptr(), + out.mutable_data_ptr(), + out.numel()); + }); + }); + }); + }); + + return out; +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence diff --git a/backends/cadence/hifi/operators/op_softmax.cpp b/backends/cadence/hifi/operators/op_softmax.cpp new file mode 100644 index 0000000000..a2068fd15b --- /dev/null +++ b/backends/cadence/hifi/operators/op_softmax.cpp @@ -0,0 +1,195 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include + +#include +#include +#include +#include +#include "kernels.h" + +using Tensor = exec_aten::Tensor; +using exec_aten::ScalarType; +using executorch::runtime::KernelRuntimeContext; +using torch::executor::Error; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +Tensor& softmax_out( + KernelRuntimeContext& ctx, + const Tensor& in, + int64_t dim, + bool half_to_float, + Tensor& out) { + (void)ctx; + + ET_KERNEL_CHECK( + ctx, + torch::executor::check_softmax_args(in, dim, half_to_float, out), + InvalidArgument, + out); + + ET_KERNEL_CHECK( + ctx, resize_tensor(out, in.sizes()) == Error::Ok, InvalidArgument, out); + + ET_KERNEL_CHECK( + ctx, + executorch::runtime::tensors_have_same_dim_order(in, out), + InvalidArgument, + out); + + // Adjust for negative dim + dim = dim < 0 ? dim + executorch::runtime::nonzero_dim(in) : dim; + + const exec_aten::optional& dim_t = dim; + const size_t d = ET_NORMALIZE_IX(dim_t.value(), in.dim()); + const size_t size = in.size(d); + + size_t stride = 1, outer_size = 1; + + size_t outer_stride = 1; + + constexpr auto name = "_softmax.out"; + constexpr int kNnlibMaxDim = 16; + + bool optimized = true; + + if (out.scalar_type() != ScalarType::Float) + optimized = false; + + if (in.dim() > kNnlibMaxDim) + optimized = false; + + if (optimized) { + int* p_inp = (int*)in.const_data_ptr(); + int* out_data = (int*)out.mutable_data_ptr(); + + int num_inp_dims = in.dim(); + int num_out_dims = num_inp_dims; + + int p_inp_shape[kNnlibMaxDim]; + int p_out_shape[kNnlibMaxDim]; + int p_permute_vec[kNnlibMaxDim]; + + for (int i = 0; i < num_inp_dims; i++) + p_inp_shape[i] = in.size(i); + + for (int i = 0; i < num_inp_dims; i++) { + if (i == d) + p_permute_vec[i] = num_inp_dims - 1; + else if (i == (num_inp_dims - 1)) + p_permute_vec[num_inp_dims - 1] = d; + else + p_permute_vec[i] = i; + + p_out_shape[i] = p_inp_shape[p_permute_vec[i]]; + + if (i != d) + outer_size = outer_size * p_inp_shape[i]; + } + + outer_stride = size; + + int* p_out = + (int*)kernels::allocate_temp_memory(ctx, out.numel() * sizeof(int)); + int* p_out1 = + (int*)kernels::allocate_temp_memory(ctx, out.numel() * sizeof(int)); + + WORD32 ret_val = xa_nn_transpose_32_32( + p_out, + p_out_shape, + p_inp, + p_inp_shape, + p_permute_vec, + num_out_dims, + num_inp_dims); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + + for (size_t outer_idx = 0; outer_idx < outer_size; ++outer_idx) { + size_t outer = outer_idx * outer_stride; + for (size_t inner_idx = 0; inner_idx < stride; ++inner_idx) { + size_t base = outer + inner_idx; + + float* p_in_data = (float*)&p_out[base]; + float* p_out_data = (float*)&p_out1[base]; + + ret_val = xa_nn_vec_softmax_f32_f32(p_out_data, p_in_data, size); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + } + } + + ret_val = xa_nn_transpose_32_32( + out_data, + p_inp_shape, + p_out1, + p_out_shape, + p_permute_vec, + num_out_dims, + num_inp_dims); + + ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out); + + return out; + } + + ET_SWITCH_FLOATH_TYPES(in.scalar_type(), ctx, name, CTYPE, [&]() { + const CTYPE* const in_data = in.const_data_ptr(); + CTYPE* const out_data = out.mutable_data_ptr(); + + torch::executor::apply_over_dim( + [in_data, out_data]( + const size_t size, const size_t stride, const size_t base) { + // calculate max in softmax dim. During softmax computation each + // value is subtracted by the maximum in value before calling exp + // to preserve numerical stability. + const CTYPE max_in = torch::executor::apply_unary_reduce_fn( + [](const CTYPE val_in, CTYPE val_accum) { + return std::max(val_in, val_accum); + }, + in_data + base, + size, + stride); + + const CTYPE temp_sum = + torch::executor::apply_unary_map_reduce_fn( + [max_in](const CTYPE val_in) { + return std::exp(val_in - max_in); + }, + [](const CTYPE mapped_in, CTYPE val_accum) { + return val_accum + mapped_in; + }, + in_data + base, + size, + stride); + + torch::executor::apply_unary_map_fn( + [max_in, temp_sum](const CTYPE val_in) { + return std::exp(val_in - max_in) / temp_sum; + }, + in_data + base, + out_data + base, + size, + stride); + }, + in, + dim); + }); + + return out; +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence diff --git a/backends/cadence/hifi/operators/op_where.cpp b/backends/cadence/hifi/operators/op_where.cpp index 06bd0bc3c9..c4ad8177cf 100644 --- a/backends/cadence/hifi/operators/op_where.cpp +++ b/backends/cadence/hifi/operators/op_where.cpp @@ -109,8 +109,10 @@ Tensor& where_out( if (con_shape[0] != out_shape[0] || con_shape[1] != out_shape[1] || con_shape[2] != out_shape[2] || con_shape[3] != out_shape[3]) { - void* p_scratch = - malloc(out_shape[0] * out_shape[1] * out_shape[2] * out_shape[3]); + void* p_scratch = (void*)kernels::allocate_temp_memory( + ctx, + (out_shape[0] * out_shape[1] * out_shape[2] * out_shape[3]) * + sizeof(int)); const unsigned char* p_brd_cond = (const unsigned char*)p_scratch; xa_nn_broadcast_8_8( (WORD8* __restrict__)p_brd_cond, diff --git a/backends/cadence/hifi/operators/quantized_relu_out.cpp b/backends/cadence/hifi/operators/quantized_relu_out.cpp new file mode 100644 index 0000000000..6b7fae6e05 --- /dev/null +++ b/backends/cadence/hifi/operators/quantized_relu_out.cpp @@ -0,0 +1,103 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include + +using Tensor = exec_aten::Tensor; +using KernelRuntimeContext = torch::executor::KernelRuntimeContext; +using ScalarType = exec_aten::ScalarType; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +template +void quantized_relu_( + const Tensor& input, + const Tensor& in_zero_point, + const int64_t out_zero_point, + const Tensor& out_multiplier, + const Tensor& out_shift, + Tensor& output) { + T q_zero_point = in_zero_point.const_data_ptr()[0]; + const T* __restrict__ in = input.const_data_ptr(); + T* __restrict__ out = output.mutable_data_ptr(); + + const int32_t* __restrict__ out_multiplier_data = + out_multiplier.const_data_ptr(); + const int32_t* __restrict__ out_shift_data = + out_shift.const_data_ptr(); + + // Compute the out_scale from out_multiplier and out_shift + const float out_scale = + -out_multiplier_data[0] * 1.0 / (1 << 31) * pow(2, out_shift_data[0]); + + for (size_t i = 0, e = input.numel(); i < e; ++i) { + float temp = in[i] > q_zero_point ? (in[i] - q_zero_point) : 0; + out[i] = kernels::quantize(temp, out_scale, (int32_t)out_zero_point); + } +} + +void quantized_relu_out( + KernelRuntimeContext& ctx, + const Tensor& input, + const Tensor& in_zero_point, + const int64_t out_zero_point, + const Tensor& out_multiplier, + const Tensor& out_shift, + Tensor& output) { + if (input.scalar_type() == executorch::aten::ScalarType::Byte) { + const uint8_t* p_in = input.const_data_ptr(); + uint8_t* p_out = output.mutable_data_ptr(); + uint8_t q_zero_point = in_zero_point.const_data_ptr()[0]; + + WORD32 ret_val = xa_nn_vec_relu_asym8u_asym8u( + p_out, + p_in, + (int)q_zero_point, + out_multiplier.const_data_ptr()[0], + out_shift.const_data_ptr()[0], + (int)out_zero_point, + (int)out_zero_point, + 255, + input.numel()); + + ET_CHECK_MSG(ret_val == 0, "An internal error occured"); + + } else if (input.scalar_type() == executorch::aten::ScalarType::Char) { + const int8_t* p_in = input.const_data_ptr(); + int8_t* p_out = output.mutable_data_ptr(); + int8_t q_zero_point = in_zero_point.const_data_ptr()[0]; + + WORD32 ret_val = xa_nn_vec_relu_asym8s_asym8s( + p_out, + p_in, + (int)q_zero_point, + out_multiplier.const_data_ptr()[0], + out_shift.const_data_ptr()[0], + (int)out_zero_point, + (int)out_zero_point, + 127, + input.numel()); + + ET_CHECK_MSG(ret_val == 0, "An internal error occured"); + + } else { + ET_CHECK_MSG( + false, + "Unhandled input dtype %hhd", + static_cast(input.scalar_type())); + } +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_concat_32.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_concat_32.c new file mode 100644 index 0000000000..244f404d2e --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_concat_32.c @@ -0,0 +1,172 @@ +#include "xa_type_def.h" +#include "xa_nn_common.h" +#include "xa_nnlib_kernels_api.h" +#include "xa_nnlib_common_macros.h" +#include "xa_nnlib_err_chk.h" +#include "xa_nnlib_common.h" + +WORD32 xa_nn_concat_32_32(WORD32 * __restrict__ p_out + ,const WORD32 *const p_out_shape + ,const WORD32 **pp_inps + ,const WORD32 *const *pp_inps_shape + ,WORD32 num_out_dims + ,WORD32 num_inp + ,WORD32 num_inp_dims + ,WORD32 axis) +{ + XA_NNLIB_ARG_CHK_PTR(p_out, -1); + XA_NNLIB_ARG_CHK_PTR(p_out_shape, -1); + XA_NNLIB_ARG_CHK_PTR(pp_inps, -1); + XA_NNLIB_ARG_CHK_PTR(pp_inps_shape, -1); + /* Pointer alignment checks */ + XA_NNLIB_ARG_CHK_ALIGN(p_out_shape, sizeof(WORD32), -1); + XA_NNLIB_ARG_CHK_ALIGN(pp_inps, sizeof(WORD32 *), -1); + XA_NNLIB_ARG_CHK_ALIGN(pp_inps_shape, sizeof(WORD32 *), -1); + //Validate Arguments + XA_NNLIB_ARG_CHK_COND((num_out_dims <= 0 || num_out_dims > 6), -1); + XA_NNLIB_ARG_CHK_COND((num_inp <= 0 || num_inp > 10), -1); + XA_NNLIB_ARG_CHK_COND((num_inp_dims != num_out_dims), -1); + XA_NNLIB_ARG_CHK_COND((axis < -num_out_dims || axis >= num_out_dims), -1); + + int i = 0, j = 0; + for(i = 0; i < num_out_dims; i++) + { + XA_NNLIB_ARG_CHK_COND((p_out_shape[i] <= 0), -1); + } + + if(axis < 0) + axis = num_out_dims + axis; + + WORD32 concat_size = 0; + for (i = 0; i < num_inp; i++) + { + XA_NNLIB_ARG_CHK_PTR(pp_inps[i], -1); + XA_NNLIB_ARG_CHK_PTR(pp_inps_shape[i], -1); + XA_NNLIB_ARG_CHK_ALIGN(pp_inps_shape[i], sizeof(WORD32), -1); +#pragma loop_count min=1 + for(j = 0; j < num_out_dims; j++) + { + XA_NNLIB_ARG_CHK_COND((pp_inps_shape[i][j] != p_out_shape[j] && j != axis), -1); + } + + XA_NNLIB_ARG_CHK_COND((pp_inps_shape[i][axis] <= 0), -1); + concat_size += pp_inps_shape[i][axis]; + } + + XA_NNLIB_ARG_CHK_COND((p_out_shape[axis] != concat_size), -1); + + //Calculate outer and inner size for axis + WORD32 outer_size = 1; +#pragma no_simd + for(int i = 0; i < axis; i++) + { + outer_size *= p_out_shape[i]; + } + + WORD32 base_inner_size = 1; +#pragma no_simd + for(int i = axis + 1; i < num_out_dims; i++) + { + base_inner_size *= p_out_shape[i]; + } + + WORD32 *ptmp_out = p_out; + for(int i = 0; i < num_inp; i++) + { + const WORD32 copy_size = pp_inps_shape[i][axis] * base_inner_size; + WORD32 *output_ptr = ptmp_out; + const WORD32* input_ptr = pp_inps[i]; + + if(((copy_size & 1) == 0) && (((concat_size * base_inner_size) & 1) == 0) + && (((unsigned)input_ptr & 1) == 0) && (((unsigned)output_ptr & 1) == 0)) + { + if(copy_size <= 8) + { + const ae_f32 *pae_inp = (const ae_f32 *)input_ptr; + for(int k = 0; k < outer_size; k++) + { + ae_f32 *pae_out = (ae_f32 *)output_ptr; +#pragma concurrent +#pragma no_simd + for(int ic = 0; ic < copy_size; ic++) + { + *pae_out++ = *pae_inp++; + } + output_ptr += concat_size * base_inner_size; + } + } + else + { + for(int k = 0; k < outer_size; k++) + { + const ae_int32x2 *pae_inp = (const ae_int32x2 *)input_ptr; + ae_int32x2 *pae_out = (ae_int32x2 *)output_ptr; + ae_valign inp_a, out_a; + inp_a = AE_LA64_PP(pae_inp); + out_a = AE_ZALIGN64(); + for(int ic = 0; ic < (copy_size >> 1); ic++) + { + ae_int32x2 d0; + AE_LA32X2_IP(d0, inp_a, pae_inp); + AE_SA32X2_IP(d0, out_a, pae_out); + } + AE_SA64POS_FP(out_a, pae_out); + const ae_f32 *puae_inp = (const ae_f32 *)pae_inp; + ae_f32 *puae_out = (ae_f32 *)pae_out; +#pragma concurrent + for(int ic = 0; ic < (copy_size & 1); ic++) + { + puae_out[copy_size - 1] = puae_inp[copy_size - 1]; + } + input_ptr += copy_size; + output_ptr += concat_size * base_inner_size; + } + } + } + else + { + if(copy_size <= 6) + { + for(int k = 0; k < outer_size; k++) + { +#pragma concurrent +#pragma no_unroll + for(int ic = 0; ic < copy_size; ic++) + { + output_ptr[ic] = *input_ptr++; + } + output_ptr += concat_size * base_inner_size; + } + } + else + { + for(int k = 0; k < outer_size; k++) + { + const ae_int32x2 *pae_inp = (const ae_int32x2 *)input_ptr; + ae_int32x2 *pae_out = (ae_int32x2 *)output_ptr; + ae_valign inp_a, out_a; + inp_a = AE_LA64_PP(pae_inp); + out_a = AE_ZALIGN64(); + +#pragma concurrent + for(int ic = 0; ic < copy_size >> 1; ic++) + { + ae_int32x2 d0; + AE_LA32X2_IP(d0, inp_a, pae_inp); + AE_SA32X2_IP(d0, out_a, pae_out); + } + AE_SA64POS_FP(out_a, pae_out); + + for(int ic = 0; ic < (copy_size & 1); ic++) + { + output_ptr[copy_size - 1] = input_ptr[copy_size - 1]; + } + input_ptr += copy_size; + output_ptr += concat_size * base_inner_size; + } + } + } + ptmp_out += copy_size; + } + return 0; +} \ No newline at end of file diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_atan2_f32.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_atan2_f32.c new file mode 100644 index 0000000000..6f95360ed9 --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_atan2_f32.c @@ -0,0 +1,882 @@ +/* ------------------------------------------------------------------------ */ +/* Copyright (c) 2018 by Cadence Design Systems, Inc. ALL RIGHTS RESERVED. */ +/* These coded instructions, statements, and computer programs ("Cadence */ +/* Libraries") are the copyrighted works of Cadence Design Systems Inc. */ +/* Cadence IP is licensed for use with Cadence processor cores only and */ +/* must not be used for any other processors and platforms. Your use of the */ +/* Cadence Libraries is subject to the terms of the license agreement you */ +/* have entered into with Cadence Design Systems, or a sublicense granted */ +/* to you by a direct Cadence licensee. */ +/* ------------------------------------------------------------------------ */ +/* IntegrIT, Ltd. www.integrIT.com, info@integrIT.com */ +/* */ +/* DSP Library */ +/* */ +/* This library contains copyrighted materials, trade secrets and other */ +/* proprietary information of IntegrIT, Ltd. This software is licensed for */ +/* use with Cadence processor cores only and must not be used for any other */ +/* processors and platforms. The license to use these sources was given to */ +/* Cadence, Inc. under Terms and Condition of a Software License Agreement */ +/* between Cadence, Inc. and IntegrIT, Ltd. */ +/* ------------------------------------------------------------------------ */ +/* Copyright (C) 2015-2018 IntegrIT, Limited. */ +/* All Rights Reserved. */ +/* ------------------------------------------------------------------------ */ +#include + +#include "../include/NatureDSP_Signal_math.h" +#include "NatureDSP_types.h" +#include "xa_nn_common.h" + +/* Common helper macros. */ +#include "xa_nnlib_common_fpu.h" + +#include "xa_nnlib_common.h" + +const union ufloat32uint32 xa_nnlib_plusInff ={0x7f800000}; +const union ufloat32uint32 xa_nnlib_qNaNf = { 0x7fc00000 }; +const union ufloat32uint32 pif ={0x40490fdb}; /* pi */ +const union ufloat32uint32 pi2f={0x3fc90fdb}; /* pi/2 */ + +const union ufloat32uint32 ALIGN(8) xa_nnlib_atanftbl1[8] = +{ + {0x3dbc14c0},/* 9.183645248413086e-002 */ + {0xbe30c39c},/*-1.726211905479431e-001 */ + {0x3b2791e4},/* 2.556913532316685e-003 */ + {0x3e4dac9d},/* 2.008537799119949e-001 */ + {0xb97d9a57},/*-2.418545627733693e-004 */ + {0xbeaaa7b5},/*-3.333107531070709e-001 */ + {0xb54f34c8},/*-7.719031600572635e-007 */ + {0x31cf3fa2} /* 6.031727117772334e-009 */ +}; + +const union ufloat32uint32 ALIGN(8) xa_nnlib_atanftbl2[8]= +{ + {0xbcccc037},/*-2.499399892985821e-002 */ + {0x3e217c35},/* 1.577003747224808e-001 */ + {0xbecf4163},/*-4.047957360744476e-001 */ + {0x3ef7b762},/* 4.838209748268127e-001 */ + {0xbdf35059},/*-1.188055947422981e-001 */ + {0xbe9b8b75},/*-3.037983477115631e-001 */ + {0xbb80ed5c},/*-3.934545442461968e-003 */ + {0x3956fc52} /* 2.050262701231986e-004 */ +}; + +#if !HAVE_VFPU && !HAVE_FPU +DISCARD_FUN(void, xa_nn_elm_atan2_f32,( FLOAT32 * z, const FLOAT32 * y, const FLOAT32 * x, int N )) +#elif HAVE_VFPU +#define sz_f32 (int)sizeof(FLOAT32) + +/*=========================================================================== + Vector matematics: + vec_atan2 full quadrant Arctangent +===========================================================================*/ + +/*------------------------------------------------------------------------- + Full-Quadrant Arc Tangent + The functions compute the arc tangent of the ratios y[N]/x[N] and store the + result to output vector z[N]. + Floating point functions output is in radians. Fixed point functions + scale its output by pi. + + NOTE: + 1. Scalar floating point function is compatible with standard ANSI C routines and set + errno and exception flags accordingly + 2. Scalar floating point function assigns EDOM to errno whenever y==0 and x==0. + + Accuracy: + 24 bit version: 768 (3.57e-7) + floating point: 2 ULP + + Special cases: + y | x | result | extra conditions + --------|-------|-----------|--------------------- + +/-0 | -0 | +/-pi | + +/-0 | +0 | +/-0 | + +/-0 | x | +/-pi | x<0 + +/-0 | x | +/-0 | x>0 + y | +/-0 | -pi/2 | y<0 + y | +/-0 | pi/2 | y>0 + +/-y | -inf | +/-pi | finite y>0 + +/-y | +inf | +/-0 | finite y>0 + +/-inf | x | +/-pi/2 | finite x + +/-inf | -inf | +/-3*pi/4 | + +/-inf | +inf | +/-pi/4 | + + Input: + y[N] vector of numerator values, Q31 or floating point + x[N] vector of denominator values, Q31 or floating point + N length of vectors + Output: + z[N] results, Q31 or floating point + +---------------------------------------------------------------------------*/ + +void xa_nn_elm_atan2_f32( FLOAT32 * z, const FLOAT32 * y, const FLOAT32 * x, WORD32 N ) +{ + /* + const union ufloat32uint32* p; + int sx,sy,big; + sx=takesignf(x); + sy=takesignf(y); + x=fabs(x); + y=fabs(y); + if(x==0.f && y==0.f) + { + // The actual result depends on input signs. + x = 1.f; + y = 0.f; + } + + big=x>y; + if(big) + { + x=y/x; + } + else + { + // compare x==y is necessary to support (+/-Inf, +/-Inf) cases + x = (x == y) ? 1.0f : x / y; + } + p = (x<0.5f) ? atanftbl1 : atanftbl2; + // approximate atan(x)/x-1 + y = p[0].f; + y = x*y + p[1].f; + y = x*y + p[2].f; + y = x*y + p[3].f; + y = x*y + p[4].f; + y = x*y + p[5].f; + y = x*y + p[6].f; + y = x*y + p[7].f; + // convert result to true atan(x) + y = x*y + x; + + if (!big) y = pi2f.f - y; + if (sx) y = pif.f - y; + if (sy) y = -y; + return y; + */ + + const xtfloatx2 * X; + const xtfloatx2 * Y; + xtfloatx2 * restrict Z; + const xtfloatx2 * S_rd; + xtfloatx2 * restrict S_wr; + + ae_valign X_va, Y_va, Z_va; + + /* Current block index; overall number of blocks; number of values in the current block */ + int blkIx, blkNum, blkLen; + /* Block size, blkLen <= blkSize */ + const int blkSize = MAX_ALLOCA_SZ/sz_f32; + /* Allocate a fixed-size scratch area on the stack. */ + FLOAT32 ALIGN(8) scr[blkSize]; + + int n; + + if ( N<=0 ) return; + + NASSERT_ALIGN8( scr ); + + /* + * Data are processed in blocks of scratch area size. Further, the algorithm + * implementation is splitted in order to feed the optimizing compiler with a + * few loops of managable size. + */ + + blkNum = ( N + blkSize-1 )/blkSize; + + for ( blkIx=0; blkIxy0 ) p0 = y0/x0; + * // Special case of x==y is necessary to support (+/-Inf, +/-Inf) cases. + * else p0 = ( x0==y0 ? 1.f : x0/y0 ); + * + * scr[n] = p0; + * } + * } + */ + + { + /* Input values */ + xtfloatx2 x0, y0; + /* Numerator; denominator; reciprocal; quotient */ + xtfloatx2 num, den, rcp, quo; + /* Scaling factor; error term */ + xtfloatx2 scl, eps; + /* Is NaN; Inf/Inf; x/Inf; 0/0; x and y are subnormal */ + xtbool2 b_nan, b_num_inf, b_den_inf, b_eqz, b_subn; + + X = (xtfloatx2*)( (uintptr_t)x + blkIx*blkSize*sz_f32 ); + Y = (xtfloatx2*)( (uintptr_t)y + blkIx*blkSize*sz_f32 ); + S_wr = (xtfloatx2*)scr; + + X_va = XT_LASX2PP( X ); + Y_va = XT_LASX2PP( Y ); + + __Pragma( "loop_count min=1" ); + for ( n=0; n<(blkLen+1)/2; n++ ) + { + XT_LASX2IP( x0, X_va, X ); + XT_LASX2IP( y0, Y_va, Y ); + + /* Replicate NaNs in both x and y to ensure NaN propagation. */ + b_nan = XT_UN_SX2( x0, y0 ); + XT_MOVT_SX2( x0, xa_nnlib_qNaNf.f, b_nan ); + XT_MOVT_SX2( y0, xa_nnlib_qNaNf.f, b_nan ); + + x0 = XT_ABS_SX2( x0 ); + y0 = XT_ABS_SX2( y0 ); + + /* num <= den */ + num = XT_MIN_SX2( x0, y0 ); + den = XT_MAX_SX2( y0, x0 ); + + /* Scale up numerator and denominator if BOTH are subnormal. */ + b_subn = XT_OLT_SX2( num, FLT_MIN ); + scl = (xtfloatx2)8388608.f; XT_MOVF_SX2( scl, (xtfloatx2)1.0f, b_subn ); + num = XT_MUL_SX2( num, scl ); + den = XT_MUL_SX2( den, scl ); + + /* Classify numerator and denominator. */ + b_num_inf = XT_OEQ_SX2( num, xa_nnlib_plusInff.f ); /* Inf/Inf */ + b_den_inf = XT_OEQ_SX2( den, xa_nnlib_plusInff.f ); /* x/Inf */ + b_eqz = XT_OEQ_SX2( den, (xtfloatx2)(xtfloatx2)(0.0f) ); /* 0/0 */ + + /* Initial appromimation for 1/den. */ + rcp = XT_RECIP0_SX2( den ); + /* Newton-Raphson iteration for 1/den. */ + eps = (xtfloatx2)1.0f; + XT_MSUB_SX2( eps, rcp, den ); + XT_MADD_SX2( rcp, rcp, eps ); + /* Approximation for the quotient num/den. */ + quo = XT_MUL_SX2( num, rcp ); + /* Refine the quotient by a modified Newton-Raphson iteration. */ + eps = num; + XT_MSUB_SX2( eps, quo, den ); + XT_MADD_SX2( quo, rcp, eps ); + + /* Force conventional results for special cases. */ + XT_MOVT_SX2( quo, (xtfloatx2)(0.0f), b_den_inf ); /* x/Inf -> 0 */ + XT_MOVT_SX2( quo, (xtfloatx2)1.0f, b_num_inf ); /* Inf/Inf -> 1 */ + XT_MOVT_SX2( quo, (xtfloatx2)(0.0f), b_eqz ); /* 0/0 -> 0 */ + + XT_SSX2IP( quo, S_wr, +2*sz_f32 ); + } + } + + __Pragma( "no_reorder" ); + + /* + * Part II, polynomial approximation and full quadrant restoration. + * Reference C code: + * + * { + * const union ufloat32uint32 * ptbl; + * float32_t x0, y0, z0, p0; + * int sx, sy; + * + * for ( n=0; n0 + y | +/-0 | -pi/2 | y<0 + y | +/-0 | pi/2 | y>0 + +/-y | -inf | +/-pi | finite y>0 + +/-y | +inf | +/-0 | finite y>0 + +/-inf | x | +/-pi/2 | finite x + +/-inf | -inf | +/-3*pi/4 | + +/-inf | +inf | +/-pi/4 | + +Input: + y[N] input data, Q15 or floating point + x[N] input data, Q15 or floating point + N length of vectors +Output: + z[N] result, Q15 or floating point + +Restrictions: +x, y, z should not overlap +---------------------------------------------------------------------------*/ + +// Taken from Fusion +void xa_nn_elm_atan2_f32( FLOAT32 * z, const FLOAT32 * y, const FLOAT32 * x, WORD32 N ) +{ + /* + * const union ufloat32uint32* p; + * int sx,sy,big; + * sx=takesignf(x); + * sy=takesignf(y); + * x=fabs(x); + * y=fabs(y); + * if(x==0.f && y==0.f) + * { + * // The actual result depends on input signs. + * x = 1.f; + * y = 0.f; + * } + * + * big=x>y; + * if(big) + * { + * x=y/x; + * } + * else + * { + * // compare x==y is necessary to support (+/-Inf, +/-Inf) cases + * x = (x == y) ? 1.0f : x / y; + * } + * p = (x<0.5f) ? atanftbl1 : atanftbl2; + * // approximate atan(x)/x-1 + * y = p[0].f; + * y = x*y + p[1].f; + * y = x*y + p[2].f; + * y = x*y + p[3].f; + * y = x*y + p[4].f; + * y = x*y + p[5].f; + * y = x*y + p[6].f; + * y = x*y + p[7].f; + * // convert result to true atan(x) + * y = x*y + x; + * + * if (!big) y = pi2f.f - y; + * if (sx) y = pif.f - y; + * if (sy) y = -y; + * return y; + */ + const xtfloat * restrict X; + const xtfloat * restrict Y; + int32_t * restrict Z; + const xtfloat * restrict S_rd; + xtfloat * restrict S_wr; + const xtfloat * restrict POLY_TBL1; + const xtfloat * restrict POLY_TBL2; + + /* Current block index; overall number of blocks; number of values in the current block */ + int blkIx, blkNum, blkLen; + /* Block size, blkLen <= blkSize */ + const int blkSize = MAX_ALLOCA_SZ / sz_f32; + /* Allocate a fixed-size scratch area on the stack. */ + float32_t ALIGN(8) scr[blkSize]; + + int n; + + if (N <= 0) return; + + NASSERT_ALIGN8(scr); + + /* + * Data are processed in blocks of scratch area size. Further, the algorithm + * implementation is splitted in order to feed the optimizing compiler with a + * few loops of managable size. + */ + + blkNum = (N + blkSize - 1) / blkSize; + POLY_TBL1 = (xtfloat*)xa_nnlib_atanftbl1; + POLY_TBL2 = (xtfloat*)xa_nnlib_atanftbl2; + for (blkIx = 0; blkIxy0 ) p0 = y0/x0; + * // Special case of x==y is necessary to support (+/-Inf, +/-Inf) cases. + * else p0 = ( x0==y0 ? 1.f : x0/y0 ); + * + * scr[n] = p0; + * } + * } + */ + + { + /* Input values */ + xtfloat x0, y0, i0; + /* Numerator; denominator; reciprocal; quotient */ + xtfloat num, den, rcp, quo; + /* Auxiliary vars */ + xtfloat s, eps; + /* Is NaN; Inf/Inf; x/Inf; 0/0; x and y are subnormal */ + xtbool b_nan, b_num_inf, b_den_inf, b_eqz, b_subn; + const xtfloat * pT; + + X = (xtfloat*)((uintptr_t)x + blkIx*blkSize*sz_f32); + Y = (xtfloat*)((uintptr_t)y + blkIx*blkSize*sz_f32); + S_wr = (xtfloat*)scr; + + static const uint32_t TAB[4] = { 0x7fc00000, 0x00800000, + 0x4b000000, 0x7f800000 + }; + pT = (xtfloat *)TAB; + __Pragma("loop_count min=1"); + for (n = 0; n 0 or x/Inf -> 0*/ + XT_MOVT_S(quo, XT_CONST_S(1), b_num_inf); /* Inf/Inf -> 1 */ + + XT_SSIP(quo, S_wr, sz_f32); + } + } + __Pragma("no_reorder"); + + /* + * Part II, polynomial approximation and full quadrant restoration. + * Reference C code: + * + * { + * const union ufloat32uint32 * ptbl; + * float32_t x0, y0, z0, p0; + * int sx, sy; + * + * for ( n=0; n>1;i++) + { + XT_LSX2IP(x1, inp, 2*sizeof(FLOAT32)); + XT_LSX2IP(d_min, min, 2*sizeof(FLOAT32)); + XT_LSX2IP(d_max, max, 2*sizeof(FLOAT32)); + + y = XT_MAX_SX2(x1, d_min); + y = XT_MIN_SX2(y, d_max); + + XT_SSX2IP( y, out, 2*sizeof(FLOAT32)); + } + } + else + { + ae_valign inp_a, min_a, max_a, out_a; + + inp_a = XT_LASX2PP(inp); + min_a = XT_LASX2PP(min); + max_a = XT_LASX2PP(max); + out_a = AE_ZALIGN64(); + /* Each iteration of loop is independent so safe to use concurrent pragma */ +#pragma concurrent + for(i=0;i < num_elm>>1;i++) + { + XT_LASX2IP(x1, inp_a, inp); + XT_LASX2IP(d_min, min_a, min); + XT_LASX2IP(d_max, max_a, max); + + y = XT_MAX_SX2(x1, d_min); + y = XT_MIN_SX2(y, d_max); + + XT_SASX2IP(y, out_a, out); + } + XT_SASX2POSFP(out_a, out); + } + // Remainder Loop + if (num_elm & 1) + { + xtfloat a1, a2, a3, a; + XT_LSIP(a1, (xtfloat *)inp, 0); + XT_LSIP(a2, (xtfloat *)min, 0); + XT_LSIP(a3, (xtfloat *)max, 0); + a = XT_MAX_S(a1, a2); + a = XT_MIN_S(a, a3); + XT_SSI(a, (xtfloat *)out, 0); + } + return 0; +} + +static void internal_elm_clamp_broadcast_f32xf32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_min, + const FLOAT32 * __restrict__ p_max, + const FLOAT32 * __restrict__ p_inp, + WORD32 num_elm, + xtbool sign_flag) +{ + int i; + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_min; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_max; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + xtfloatx2 *__restrict__ input = (xtfloatx2 *)p_inp; + + const int num_simd2_ops = num_elm >> 1; + const int num_scalar_ops = num_elm & 1; + + xtfloat a0_7, out, in0; + xtfloatx2 d_inp, x1, x2, y; + x2 = XT_LSI((xtfloat *)p_b, 0); + +/* Min pointer is pointing to actual max and max to min */ + if(sign_flag){ + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0) && ((((unsigned)input)&7) == 0)) + { + for(i=0; i> 1; + const int num_scalar_ops = num_elm & 1; + + xtfloat a0_7, out, in0; + xtfloatx2 d_inp, x1, x2, y; + x2 = XT_LSI((xtfloat *)p_b, 0); + x1 = XT_LSI((xtfloat *)p_a, 0); + + if(((((unsigned)p_c)&7) == 0) && ((((unsigned)input)&7) == 0)) + { + for(i=0; i> 1; + num_scalar_ops = in_lc & 1; + } + else + { + num_simd2_ops = (in_lc >> 2) << 1; + num_scalar_ops = in_lc & 3; + } + + xtfloatx2 d_inp, x1, x2, y; + xtfloat in0, a0, b0, c0; + unsigned char con1, con2; + if(sign_flag){ + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_min[i * in_lc]; + p_b = (xtfloatx2 *)p_max; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + input = (xtfloatx2 *)&p_inp[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0) && ((((unsigned)input)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + XT_LSX2IP(d_inp, input, 2 * sizeof(FLOAT32)); + y = XT_MAX_SX2(d_inp, x2); + y = XT_MIN_SX2(y, x1); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp, vmin, vmax, out_a = AE_ZALIGN64(); + vmin = XT_LASX2PP(p_a); + vmax = XT_LASX2PP(p_b); + vinp = XT_LASX2PP(input); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vmin, p_a); + XT_LASX2IP(x2, vmax, p_b); + XT_LASX2IP(d_inp, vinp, input); + y = XT_MAX_SX2(d_inp, x2); + y = XT_MIN_SX2(y, x1); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, 0); + XT_LSIP(b0, (xtfloat *)p_b, 0); + XT_LSIP(in0, (xtfloat *)input, 0); + c0 = XT_MAX_S(in0, b0); + c0 = XT_MIN_S(a0, c0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } + else + { + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_min[i * in_lc]; + p_b = (xtfloatx2 *)p_max; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + input = (xtfloatx2 *)&p_inp[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0) && ((((unsigned)input)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + XT_LSX2IP(d_inp, input, 2 * sizeof(FLOAT32)); + y = XT_MAX_SX2(d_inp, x1); + y = XT_MIN_SX2(y, x2); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp, vmin, vmax, out_a = AE_ZALIGN64(); + vmin = XT_LASX2PP(p_a); + vmax = XT_LASX2PP(p_b); + vinp = XT_LASX2PP(input); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vmin, p_a); + XT_LASX2IP(x2, vmax, p_b); + XT_LASX2IP(d_inp, vinp, input); + y = XT_MAX_SX2(d_inp, x1); + y = XT_MIN_SX2(y, x2); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, 0); + XT_LSIP(b0, (xtfloat *)p_b, 0); + XT_LSIP(in0, (xtfloat *)input, 0); + c0 = XT_MAX_S(in0, a0); + c0 = XT_MIN_S(c0, b0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } +} + +static void internal_elm_clamp_broadcast_both_2D_f32xf32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_min, + const FLOAT32 * __restrict__ p_max, + const FLOAT32 * __restrict__ p_inp, + WORD32 out_lc, + WORD32 in_lc) +{ + int i, j; + + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_min; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_max; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + xtfloatx2 *__restrict__ input = (xtfloatx2 *)p_inp; + + int num_simd2_ops; + int num_scalar_ops; + + if(out_lc) + { + num_simd2_ops = in_lc >> 1; + num_scalar_ops = in_lc & 1; + } + else + { + num_simd2_ops = (in_lc >> 2) << 1; + num_scalar_ops = in_lc & 3; + } + + xtfloatx2 d_inp, x1, x2, y; + xtfloat in0, a0, b0, c0; + unsigned char con1, con2; + + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)p_min; + p_b = (xtfloatx2 *)p_max; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + input = (xtfloatx2 *)&p_inp[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0) && ((((unsigned)input)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + XT_LSX2IP(d_inp, input, 2 * sizeof(FLOAT32)); + y = XT_MAX_SX2(d_inp, x1); + y = XT_MIN_SX2(y, x2); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp, vmin, vmax, out_a = AE_ZALIGN64(); + vmin = XT_LASX2PP(p_a); + vmax = XT_LASX2PP(p_b); + vinp = XT_LASX2PP(input); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vmin, p_a); + XT_LASX2IP(x2, vmax, p_b); + XT_LASX2IP(d_inp, vinp, input); + y = XT_MAX_SX2(d_inp, x1); + y = XT_MIN_SX2(y, x2); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, 0); + XT_LSIP(b0, (xtfloat *)p_b, 0); + XT_LSIP(in0, (xtfloat *)input, 0); + c0 = XT_MAX_S(in0, a0); + c0 = XT_MIN_S(c0, b0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } +} + +WORD32 xa_nn_elm_clamp_broadcast_4D_f32Xf32xf32_f32(FLOAT32 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp, + const WORD32 *const p_inp_shape, + const FLOAT32 * __restrict__ p_min, + const WORD32 *const p_min_shape, + const FLOAT32 * __restrict__ p_max, + const WORD32 *const p_max_shape + ) +{ + /* NULL pointer checks */ + XA_NNLIB_ARG_CHK_PTR(p_out, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp, -1); + XA_NNLIB_ARG_CHK_PTR(p_min, -1); + XA_NNLIB_ARG_CHK_PTR(p_max, -1); + XA_NNLIB_ARG_CHK_PTR(p_out_shape, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp_shape, -1); + XA_NNLIB_ARG_CHK_PTR(p_min_shape, -1); + XA_NNLIB_ARG_CHK_PTR(p_max_shape, -1); + /* Pointer alignment checks */ + XA_NNLIB_ARG_CHK_ALIGN(p_out, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_min, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_max, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_out_shape, sizeof(WORD32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp_shape, sizeof(WORD32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_min_shape, sizeof(WORD32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_max_shape, sizeof(WORD32), -1); + /* Check shapes */ + int i; + xtbool sign_flag; + for(i = 0; i < 4; i++) + { + if((p_min_shape[i] != p_max_shape[i]) && ((p_min_shape[i] != 1) && (p_max_shape[i] != 1))) + { + return -1; + } + } + const float *p_min_new = p_min; + for(i = 0; i < 4; i++) + { + for(int j=0; j < p_min_shape[i]; j++) + { + p_min_new++; + } + } + const FLOAT32 *p_max_new = p_max; + for(i = 0; i < 4; i++) + { + for(int j=0; j < p_max_shape[i]; j++) + { + p_max_new++; + } + } + const FLOAT32 *p_inp_new = p_inp; + for(i = 0; i < 4; i++) + { + for(int j=0; j < p_inp_shape[i]; j++) + { + p_inp_new++; + } + } + WORD32 min_strides[4], max_strides[4]; + min_strides[3] = 1; + max_strides[3] = 1; + for(i = 2; i >= 0; i--) + { + ae_int32x2 d_str, d_shape; + d_str = AE_MOVDA32X2(min_strides[i + 1], max_strides[i + 1]); + d_shape = AE_MOVDA32X2(p_min_shape[i + 1], p_max_shape[i + 1]); + d_str = AE_MULP32X2(d_str, d_shape); + min_strides[i] = AE_MOVAD32_H(d_str); + max_strides[i] = AE_MOVAD32_L(d_str); + } + + int need_broadcast = 0; + int min_const = 1, max_const = 1; + for(i = 0; i < 4; i++) + { + if(p_min_shape[i] == 1) + { + min_strides[i] = 0; + need_broadcast = 1; + } + else + { + min_const &= 0; + } + if(p_max_shape[i] == 1) + { + max_strides[i] = 0; + need_broadcast = 1; + } + else + { + max_const &= 0; + } + } + + int itr0, itr1, itr2; + FLOAT32 *p_out_tmp = p_out; + const FLOAT32 *__restrict p_inp_temp = p_inp; + const FLOAT32 *__restrict__ p_min_tmp = p_min; + const FLOAT32 *__restrict__ p_max_tmp = p_max; + + if(need_broadcast == 0) + { + sign_flag = 0; + internal_elm_clamp_broadcast_2D_f32xf32xf32_f32( + p_out, + p_min, + p_max, + p_inp, + 1, + p_out_shape[0] * min_strides[0], + sign_flag); + } + else if((min_strides[3] == 1)&& (max_strides[3] == 1)) + { + WORD32 in_lc, out_lc; + sign_flag = 0; + in_lc = p_out_shape[2] * p_out_shape[3]; + out_lc = 1; + if((min_strides[2] == 0) && (max_strides[2] == 0)) + { + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_min_tmp0 = p_min_tmp; + const FLOAT32 *__restrict__ p_max_tmp0 = p_max_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + internal_elm_clamp_broadcast_both_2D_f32xf32xf32_f32( + p_out_tmp, + p_min_tmp0, + p_max_tmp0, + p_inp_temp, + out_lc, + in_lc); + p_out_tmp += in_lc * out_lc; + p_min_tmp0 += min_strides[1]; + p_max_tmp0 += max_strides[1]; + p_inp_temp += in_lc * out_lc; + } + p_min_tmp += min_strides[0]; + p_max_tmp += max_strides[0]; + } + } + else + { + if(min_strides[2] == 0) + { + const FLOAT32 *tmp; + tmp = p_min_tmp; p_min_tmp = p_max_tmp; p_max_tmp = tmp; + sign_flag = 1; + int tmp_strides[2]; + tmp_strides[0] = min_strides[0]; + tmp_strides[1] = min_strides[1]; + + min_strides[0] = max_strides[0]; + min_strides[1] = max_strides[1]; + + max_strides[0] = tmp_strides[0]; + max_strides[1] = tmp_strides[1]; + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + else if(max_strides[2] == 0) + { + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_min_tmp0 = p_min_tmp; + const FLOAT32 *__restrict__ p_max_tmp0 = p_max_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + internal_elm_clamp_broadcast_2D_f32xf32xf32_f32( + p_out_tmp, + p_min_tmp0, + p_max_tmp0, + p_inp_temp, + out_lc, + in_lc, + sign_flag); + p_out_tmp += in_lc * out_lc; + p_min_tmp0 += min_strides[1]; + p_max_tmp0 += max_strides[1]; + p_inp_temp += in_lc * out_lc; + } + + p_min_tmp += min_strides[0]; + p_max_tmp += max_strides[0]; + } + } + } + else if(min_const == 1 || max_const == 1) + { + if((min_const == 1)&&(max_const == 1)) + { + internal_elm_clamp_broadcast_both_f32xf32xf32_f32( + p_out_tmp, + p_min_tmp, + p_max_tmp, + p_inp_temp, + p_out_shape[0] * p_out_shape[1] * p_out_shape[2] * p_out_shape[3]); + } + else + { + sign_flag = 0; + if(min_strides[3] == 0) + { + sign_flag = 1; + const FLOAT32 *tmp; + tmp = p_min_tmp; p_min_tmp = p_max_tmp; p_max_tmp = tmp; + } + internal_elm_clamp_broadcast_f32xf32xf32_f32( + p_out_tmp, + p_min_tmp, + p_max_tmp, + p_inp_temp, + p_out_shape[0] * p_out_shape[1] * p_out_shape[2] * p_out_shape[3], + sign_flag); + } + } + else + { + sign_flag = 0; + if((min_strides[3] == 0) && (max_strides[3] == 0)) + { + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_min_tmp0 = p_min_tmp; + const FLOAT32 *__restrict__ p_max_tmp0 = p_max_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + const FLOAT32 *__restrict__ p_min_tmp1 = p_min_tmp0; + const FLOAT32 *__restrict__ p_max_tmp1 = p_max_tmp0; + for(itr2 = 0; itr2 < p_out_shape[2]; itr2++) + { + { + internal_elm_clamp_broadcast_both_f32xf32xf32_f32( + p_out_tmp, + p_min_tmp1, + p_max_tmp1, + p_inp_temp, + p_out_shape[3]); + } + p_out_tmp += p_out_shape[3]; + p_min_tmp1 += min_strides[2]; + p_max_tmp1 += max_strides[2]; + p_inp_temp += p_out_shape[3]; + } + p_min_tmp0 += min_strides[1]; + p_max_tmp0 += max_strides[1]; + } + p_min_tmp += min_strides[0]; + p_max_tmp += max_strides[0]; + } + } + else + { + if(min_strides[3] == 0) + { + const FLOAT32 *tmp; + tmp = p_min_tmp; p_min_tmp = p_max_tmp; p_max_tmp = tmp; + sign_flag = 1; + int tmp_strides[3]; + tmp_strides[0] = min_strides[0]; + tmp_strides[1] = min_strides[1]; + tmp_strides[2] = min_strides[2]; + + min_strides[0] = max_strides[0]; + min_strides[1] = max_strides[1]; + min_strides[2] = max_strides[2]; + + max_strides[0] = tmp_strides[0]; + max_strides[1] = tmp_strides[1]; + max_strides[2] = tmp_strides[2]; + } + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_min_tmp0 = p_min_tmp; + const FLOAT32 *__restrict__ p_max_tmp0 = p_max_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + const FLOAT32 *__restrict__ p_min_tmp1 = p_min_tmp0; + const FLOAT32 *__restrict__ p_max_tmp1 = p_max_tmp0; + for(itr2 = 0; itr2 < p_out_shape[2]; itr2++) + { + { + internal_elm_clamp_broadcast_f32xf32xf32_f32( + p_out_tmp, + p_min_tmp1, + p_max_tmp1, + p_inp_temp, + p_out_shape[3], + sign_flag); + } + p_out_tmp += p_out_shape[3]; + p_min_tmp1 += min_strides[2]; + p_max_tmp1 += max_strides[2]; + p_inp_temp += p_out_shape[3]; + } + p_min_tmp0 += min_strides[1]; + p_max_tmp0 += max_strides[1]; + } + p_min_tmp += min_strides[0]; + p_max_tmp += max_strides[0]; + } + } + } + return 0; +} +#endif diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_remainder_broadcast_f32.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_remainder_broadcast_f32.c new file mode 100644 index 0000000000..3b40752211 --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_remainder_broadcast_f32.c @@ -0,0 +1,525 @@ +#include "xa_type_def.h" +#include "xa_nnlib_common_fpu.h" +#include "xa_nn_common.h" +#include "xa_nnlib_err_chk.h" +//#include "xa_nn_basic_state.h" +#include "xa_nnlib_kernels_api.h" + + +#if !HAVE_VFPU +DISCARD_FUN_FOR_NONVOID_RETURN( + WORD32, xa_nn_elm_remainder_f32xf32_f32, + ( + FLOAT32 *p_out, + const FLOAT32 *p_inp1, + const FLOAT32 *p_inp2, + WORD32 num_elm + ) + ) +#else +WORD32 xa_nn_elm_remainder_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm) +{ + /* NULL pointer checks */ + XA_NNLIB_ARG_CHK_PTR(p_out, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp1, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp2, -1); + /* Pointer alignment checks */ + XA_NNLIB_ARG_CHK_ALIGN(p_out, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp1, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp2, sizeof(FLOAT32), -1); + /* Basic Parameter checks */ + XA_NNLIB_ARG_CHK_COND((num_elm <= 0), -1); + + int i; + xtfloatx2 *inp1 = (xtfloatx2 *)p_inp1; + xtfloatx2 *inp2 = (xtfloatx2 *)p_inp2; + xtfloatx2 *out = (xtfloatx2 *)p_out; + xtfloatx2 x1, x2, y; + ae_valign inp1_a, inp2_a, out_a; + + inp1_a = XT_LASX2PP(inp1); + inp2_a = XT_LASX2PP(inp2); + out_a = AE_ZALIGN64(); + /* Each iteration of loop is independent so safe to use concurrent pragma */ +#pragma concurrent + for(i=0;i < num_elm>>1;i++) + { + XT_LASX2IP(x1, inp1_a, inp1); + XT_LASX2IP(x2, inp2_a, inp2); + y = XT_DIV_SX2(x1, x2); + y = FIFLOOR_SX2(y); + y = XT_MUL_SX2(y, x2); + y = XT_SUB_SX2(x1, y); + XT_SASX2IP(y, out_a, out); + } + XT_SASX2POSFP(out_a, out); + + // Remainder Loop + if (num_elm & 1) + { + xtfloat a1, a2, a; + XT_LSIP(a1, (xtfloat *)inp1, 0); + XT_LSIP(a2, (xtfloat *)inp2, 0); + a = XT_DIV_S(a1, a2); + a = FIFLOOR_S(a); + a = XT_MUL_S(a, a2); + a = XT_SUB_S(a1, a); + XT_SSI(a, (xtfloat *)out, 0); + } + + return 0; +} +#endif + +#if HAVE_VFPU +static void internal_elm_remainder_broadcast_2D_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 out_lc, + WORD32 in_lc, + xtbool sign_flag) +{ + int i, j; + + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + int num_simd2_ops; + int num_scalar_ops; + + if(out_lc) + { + num_simd2_ops = in_lc >> 1; + num_scalar_ops = in_lc & 1; + } + else + { + num_simd2_ops = (in_lc >> 2) << 1; + num_scalar_ops = in_lc & 3; + } + + xtfloatx2 x1, x2, y; + xtfloat a0, b0, c0; + + /* For computing inp2 - inp1 */ + if(sign_flag){ + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_DIV_SX2(x2, x1); + y = FIFLOOR_SX2(y); + y = XT_MUL_SX2(y, x1); + y = XT_SUB_SX2(x2, y); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_DIV_SX2(x2, x1); + y = FIFLOOR_SX2(y); + y = XT_MUL_SX2(y, x1); + y = XT_SUB_SX2(x2, y); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_DIV_S(b0, a0); + c0 = FIFLOOR_S(c0); + c0 = XT_MUL_S(c0, a0); + c0 = XT_SUB_S(b0, c0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } + /* For computing inp1 - inp2 */ + else + { + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_DIV_SX2(x1, x2); + y = FIFLOOR_SX2(y); + y = XT_MUL_SX2(y, x2); + y = XT_SUB_SX2(x1, y); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_DIV_SX2(x1, x2); + y = FIFLOOR_SX2(y); + y = XT_MUL_SX2(y, x2); + y = XT_SUB_SX2(x1, y); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_DIV_S(a0, b0); + c0 = FIFLOOR_S(c0); + c0 = XT_MUL_S(c0, b0); + c0 = XT_SUB_S(a0, c0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } +} + +static void internal_elm_remainder_broadcast_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm, + xtbool sign_flag) +{ + int i; + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + const int num_simd2_ops = num_elm >> 1; + const int num_scalar_ops = num_elm & 1; + + xtfloat a0_7, out; + xtfloatx2 x1, x2, y; + x2 = XT_LSI((xtfloat *)p_b, 0); + + /* For computing inp2 - inp1 */ + if(sign_flag){ + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i p_inp2_shape[i] ? p_inp1_shape[i] : p_inp2_shape[i]))) + { + return -1; + } + } + + WORD32 inp1_strides[4], inp2_strides[4]; + inp1_strides[3] = 1; + inp2_strides[3] = 1; + for(i = 2; i >= 0; i--) + { + ae_int32x2 d_str, d_shape; + d_str = AE_MOVDA32X2(inp1_strides[i + 1], inp2_strides[i + 1]); + d_shape = AE_MOVDA32X2(p_inp1_shape[i + 1], p_inp2_shape[i + 1]); + d_str = AE_MULP32X2(d_str, d_shape); + inp1_strides[i] = AE_MOVAD32_H(d_str); + inp2_strides[i] = AE_MOVAD32_L(d_str); + } + + int need_broadcast = 0; + int inp1_const = 1, inp2_const = 1; + for(i = 0; i < 4; i++) + { + if(p_inp1_shape[i] != p_inp2_shape[i]) + { + if(p_inp1_shape[i] == 1) + inp1_strides[i] = 0; + else + inp2_strides[i] = 0; + + need_broadcast = 1; + } + if(p_inp1_shape[i] != 1) + inp1_const &= 0; + if(p_inp2_shape[i] != 1) + inp2_const &= 0; + } + int itr0, itr1, itr2; + + FLOAT32 *p_out_tmp = p_out; + const FLOAT32 *__restrict__ p_inp1_tmp = p_inp1; + const FLOAT32 *__restrict__ p_inp2_tmp = p_inp2; + if(need_broadcast == 0) + { + sign_flag = 0; + internal_elm_remainder_broadcast_2D_f32xf32_f32( + p_out, + p_inp1, + p_inp2, + 1, + p_out_shape[0] * inp1_strides[0], + sign_flag); + } + else if(inp1_strides[3] == inp2_strides[3]) + { + WORD32 in_lc, out_lc; + sign_flag = 0; + in_lc = p_out_shape[2] * p_out_shape[3]; + out_lc = 1; + if(inp1_strides[2] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[2]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + else if(inp2_strides[2] == 0) + { + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + internal_elm_remainder_broadcast_2D_f32xf32_f32( + p_out_tmp, + p_inp1_tmp0, + p_inp2_tmp0, + out_lc, + in_lc, + sign_flag); + p_out_tmp += in_lc * out_lc; + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + else if(inp1_const == 1 || inp2_const == 1) + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + sign_flag = 1; + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + } + internal_elm_remainder_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp, + p_inp2_tmp, + p_out_shape[0] * p_out_shape[1] * p_out_shape[2] * p_out_shape[3], + sign_flag); + } + else + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[3]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + tmp_strides[2] = inp1_strides[2]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + inp1_strides[2] = inp2_strides[2]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + inp2_strides[2] = tmp_strides[2]; + } + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + const FLOAT32 *__restrict__ p_inp1_tmp1 = p_inp1_tmp0; + const FLOAT32 *__restrict__ p_inp2_tmp1 = p_inp2_tmp0; + for(itr2 = 0; itr2 < p_out_shape[2]; itr2++) + { + { + internal_elm_remainder_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp1, + p_inp2_tmp1, + p_out_shape[3], + sign_flag); + } + p_out_tmp += p_out_shape[3]; + p_inp1_tmp1 += inp1_strides[2]; + p_inp2_tmp1 += inp2_strides[2]; + } + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + return 0; +} +#endif + diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_transpose_32.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_transpose_32.c new file mode 100644 index 0000000000..e7b80e3a1d --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_transpose_32.c @@ -0,0 +1,260 @@ +#include "xa_nnlib_common.h" +#include "stdio.h" +/* + * Currently only supports upto 5D input tensors. + * 1/2/3/4 D input tensors will be scaled up to 5D. + * For example, 2x3 -> 1x1x1x2x3. + */ + +WORD32 xa_nn_transpose_32_32(WORD32 * __restrict__ p_out + ,const WORD32 *const p_out_shape + ,const WORD32 * __restrict__ p_inp + ,const WORD32 *const p_inp_shape + ,const WORD32 * __restrict__ p_permute_vec + ,WORD32 num_out_dims + ,WORD32 num_inp_dims) +{ + /* NULL pointer checks */ + XA_NNLIB_ARG_CHK_PTR(p_out, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp, -1); + XA_NNLIB_ARG_CHK_PTR(p_permute_vec, -1); + XA_NNLIB_ARG_CHK_PTR(p_out_shape, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp_shape, -1); + + /* Invalid input checks */ + XA_NNLIB_ARG_CHK_COND(((num_inp_dims <= 0) || (num_inp_dims > 5)), -1); + XA_NNLIB_ARG_CHK_COND((num_out_dims != num_inp_dims), -1); + + int itr = 0; + for(itr=0; itr < num_inp_dims; itr++) + { + XA_NNLIB_ARG_CHK_COND((p_inp_shape[itr] <= 0), -1); + } + for(itr=0; itr < num_out_dims; itr++) + { + XA_NNLIB_ARG_CHK_COND((p_out_shape[itr] <= 0), -1); + } + + + /* Output shape provided must be correct based on input + * shape and permute values */ + for(itr=0; itr < num_out_dims; itr++) + { + int output_dim = p_out_shape[itr]; + int expected_dim = p_inp_shape[p_permute_vec[itr]]; + XA_NNLIB_ARG_CHK_COND((output_dim != expected_dim), -1); + } + + /* Pointer alignment checks */ + XA_NNLIB_ARG_CHK_ALIGN(p_out, sizeof(WORD32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp, sizeof(WORD32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_permute_vec, sizeof(WORD32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_out_shape, sizeof(WORD32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp_shape, sizeof(WORD32), -1); + + /* Shift all dim with 1 in the outer part */ + int eff_output_shape[5]; + int eff_permute_vec[5]; + + for(int i = 0; i < num_out_dims; i++) + { + eff_output_shape[i] = p_out_shape[i]; + eff_permute_vec[i] = p_permute_vec[i]; + } + + int one_i=num_out_dims-1, non_one_i=num_out_dims-1; + while(one_i > 0 && non_one_i >=0){ + while(one_i > 0 && eff_output_shape[one_i]!=1){ + one_i--; + } + non_one_i = one_i; + while(non_one_i >= 0 && eff_output_shape[non_one_i]==1) + { + non_one_i--; + } + if(one_i > 0 && non_one_i >=0){ + int temp; + /*swap output_shape*/ + { + temp = eff_output_shape[one_i]; + eff_output_shape[one_i] = eff_output_shape[non_one_i]; + eff_output_shape[non_one_i] = temp; + } + /*swap permute_vec*/ + { + temp = eff_permute_vec[one_i]; + eff_permute_vec[one_i] = eff_permute_vec[non_one_i]; + eff_permute_vec[non_one_i] = temp; + } + + } + } + + /* Promoting lesser dim tensors to 5D tensors. + * Also updating the permute_vec and shapes as needed for optimization */ + int p_5D_inp_shape[5] = {1, 1, 1, 1, 1}; + int p_5D_out_shape[5] = {1, 1, 1, 1, 1}; + int p_5D_permute_vec[5] = {0, 1, 2, 3, 4}; + + /* Check if any inner inp dimension is same in the output */ + int last_dim_same = 1, last_n_same_dim = 0; + itr = num_inp_dims - 1; + while(itr >= 0) + { + last_n_same_dim = (last_dim_same && (eff_permute_vec[itr] == itr)) ? (last_n_same_dim + 1) : last_n_same_dim; + last_dim_same = (eff_permute_vec[itr] == itr) ? last_dim_same & 1 : last_dim_same & 0; + itr--; + } + + int dims_added = 5 - num_inp_dims; + itr = num_inp_dims - 1; + int same_count = last_n_same_dim; + int count = 4; + while(itr >= 0) + { + p_5D_inp_shape[count] = (same_count > 0) ? p_5D_inp_shape[count]*p_inp_shape[itr] : p_inp_shape[itr]; + p_5D_out_shape[count] = (same_count > 0) ? p_5D_out_shape[count]*eff_output_shape[itr] : eff_output_shape[itr]; + same_count--; + itr--; + count = (same_count > 0) ? count : count - 1; + } + + itr = num_inp_dims - 1; + same_count = (last_n_same_dim) ? num_inp_dims - (last_n_same_dim - 1) : 0; + count = 4; + while(itr >= 0) + { + p_5D_permute_vec[count] = (same_count > 0) ? eff_permute_vec[itr-(last_n_same_dim - 1)] + dims_added + last_n_same_dim - 1 : eff_permute_vec[itr] + dims_added; + same_count--; + itr--; + count--; + } + + int out_dim0, out_dim1, out_dim2, out_dim3, out_dim4; + int inp_dim1, inp_dim2, inp_dim3, inp_dim4; + int inp_stride[5]; + + out_dim0 = p_5D_out_shape[0]; + out_dim1 = p_5D_out_shape[1]; + out_dim2 = p_5D_out_shape[2]; + out_dim3 = p_5D_out_shape[3]; + out_dim4 = p_5D_out_shape[4]; + + inp_dim1 = p_5D_inp_shape[1]; + inp_dim2 = p_5D_inp_shape[2]; + inp_dim3 = p_5D_inp_shape[3]; + inp_dim4 = p_5D_inp_shape[4]; + + inp_stride[0] = inp_dim1*inp_dim2*inp_dim3*inp_dim4; + inp_stride[1] = inp_dim2*inp_dim3*inp_dim4; + inp_stride[2] = inp_dim3*inp_dim4; + inp_stride[3] = inp_dim4; + inp_stride[4] = 1; + + if(last_n_same_dim) + { + int itr0, itr1, itr2, itr3, itr4; + WORD32 *p_inp0 = (WORD32 *)p_inp; + for(itr0 = 0; itr0 < out_dim0; itr0++) + { + WORD32 *p_inp1 = p_inp0+(itr0*inp_stride[p_5D_permute_vec[0]]); +#pragma loop_count min=1 + for(itr1 = 0; itr1 < out_dim1; itr1++) + { + WORD32 *p_inp2 = p_inp1+(itr1*inp_stride[p_5D_permute_vec[1]]); +#pragma loop_count min=1 + for(itr2 = 0; itr2 < out_dim2; itr2++) + { + WORD32 *p_inp3 = p_inp2+(itr2*inp_stride[p_5D_permute_vec[2]]); +#pragma loop_count min=1 + for(itr3 = 0; itr3 < out_dim3; itr3++, p_out+=out_dim4) + { + WORD32 *p_inp4 = p_inp3+(itr3*inp_stride[p_5D_permute_vec[3]]); + if((((unsigned)p_inp4 & 1) == 0) && (((unsigned)p_out & 1) == 0)) + { + ae_int32x2 *__restrict__ pae_i = (ae_int32x2 *)(p_inp4); + ae_int32x2 *__restrict__ pae_o = (ae_int32x2 *)(p_out); + ae_int32x2 d0; + for(itr4 = 0; itr4 < (out_dim4 >> 1); itr4++) + { + AE_L32X2_IP(d0, pae_i, 2 * sizeof(WORD32)); + AE_S32X2_IP(d0, pae_o, 2 * sizeof(WORD32)); + } + ae_int32 *__restrict__ puae_i = (ae_int32 *)(pae_i); + ae_int32 *__restrict__ puae_o = (ae_int32 *)(pae_o); +#pragma loop_count max=3 + for(itr4 = 0; itr4 < (out_dim4 & 1); itr4++) + { + puae_o[itr4] = puae_i[itr4]; + } + } + else + { + ae_int32x2 *__restrict__ pae_i = (ae_int32x2 *)(p_inp4); + ae_int32x2 *__restrict__ pae_o = (ae_int32x2 *)(p_out); + ae_valign a_inp = AE_LA64_PP(pae_i); + ae_valign a_out = AE_ZALIGN64(); + ae_int32x2 d0; + for(itr4 = 0; itr4 < (out_dim4 >> 1); itr4++) + { + AE_LA32X2_IP(d0, a_inp, pae_i); + AE_SA32X2_IP(d0, a_out, pae_o); + } + AE_SA64POS_FP(a_out, pae_o); + ae_int32 *__restrict__ puae_i = (ae_int32 *)(pae_i); + ae_int32 *__restrict__ puae_o = (ae_int32 *)(pae_o); +#pragma loop_count max=3 + for(itr4 = 0; itr4 < (out_dim4 & 1); itr4++) + { + puae_o[itr4] = puae_i[itr4]; + } + } + } + } + } + } + } + else + { + int itr0, itr1, itr2, itr3, itr4; + WORD32 *p_inp0 = (WORD32 *)p_inp; + for(itr0 = 0; itr0 < out_dim0; itr0++) + { + WORD32 *p_inp1 = p_inp0+(itr0*inp_stride[p_5D_permute_vec[0]]); + for(itr1 = 0; itr1 < out_dim1; itr1++) + { + WORD32 *p_inp2 = p_inp1+(itr1*inp_stride[p_5D_permute_vec[1]]); + for(itr2 = 0; itr2 < out_dim2; itr2++) + { + WORD32 *p_inp3 = p_inp2+(itr2*inp_stride[p_5D_permute_vec[2]]); + for(itr3 = 0; itr3 < out_dim3; itr3++) + { + WORD32 *p_inp4 = p_inp3+(itr3*inp_stride[p_5D_permute_vec[3]]); + + ae_valign a_out = AE_ZALIGN64(); + for(itr4 = 0; itr4 < (out_dim4 >> 1); itr4++) + { + ae_int32x2 d0, d1; + ae_int32x2 tmp0; + + AE_L32_XP(d0, (ae_int32 *)p_inp4, inp_stride[p_5D_permute_vec[4]] << 2); + AE_L32_XP(d1, (ae_int32 *)p_inp4, inp_stride[p_5D_permute_vec[4]] << 2); + + tmp0 = AE_SEL32_HH(d0, d1); + + AE_SA32X2_IP(tmp0, a_out, (ae_int32x2 *)p_out); + } + AE_SA64POS_FP(a_out, p_out); +#pragma loop_count max=3 + for(itr4 = 0; itr4 < (out_dim4 & 1); itr4++) + { + *p_out++ = *p_inp4; + } + } + } + } + } + } + + return 0; +} \ No newline at end of file diff --git a/examples/portable/executor_runner/executor_runner.cpp b/examples/portable/executor_runner/executor_runner.cpp index 93c150c0b9..514a82c0ff 100644 --- a/examples/portable/executor_runner/executor_runner.cpp +++ b/examples/portable/executor_runner/executor_runner.cpp @@ -32,6 +32,8 @@ static uint8_t method_allocator_pool[4 * 1024U * 1024U]; // 4 MB +static uint8_t temp_allocator_pool[1024U * 1024U]; + DEFINE_string( model_path, "model.pte", @@ -120,6 +122,10 @@ int main(int argc, char** argv) { MemoryAllocator method_allocator{ MemoryAllocator(sizeof(method_allocator_pool), method_allocator_pool)}; + // Temporary memory required by kernels + MemoryAllocator temp_allocator{ + MemoryAllocator(sizeof(temp_allocator_pool), temp_allocator_pool)}; + // The memory-planned buffers will back the mutable tensors used by the // method. The sizes of these buffers were determined ahead of time during the // memory-planning pasees. @@ -144,7 +150,8 @@ int main(int argc, char** argv) { // Assemble all of the allocators into the MemoryManager that the Executor // will use. - MemoryManager memory_manager(&method_allocator, &planned_memory); + MemoryManager memory_manager( + &method_allocator, &planned_memory, &temp_allocator); // // Load the method from the program, using the provided allocators. Running @@ -172,6 +179,7 @@ int main(int argc, char** argv) { // Run the model. Error status = method->execute(); + ET_CHECK_MSG( status == Error::Ok, "Execution of method %s failed with status 0x%" PRIx32,