From 216389c8e32010b15895b4def1a76c3eae209c04 Mon Sep 17 00:00:00 2001 From: dijopaul Date: Wed, 23 Oct 2024 06:51:40 -0700 Subject: [PATCH] Adding mean and where ops optimized on HiFi --- backends/cadence/aot/functions_hifi.yaml | 7 +- backends/cadence/hifi/kernels/CMakeLists.txt | 2 + backends/cadence/hifi/kernels/kernels.h | 28 + .../cadence/hifi/operators/CMakeLists.txt | 12 +- backends/cadence/hifi/operators/op_mean.cpp | 170 ++++ backends/cadence/hifi/operators/op_where.cpp | 176 ++++ .../nnlib/xa_nn_elm_where_f32xf32_f32.c | 838 ++++++++++++++++++ .../third-party/nnlib/xa_nn_reduce_32_32.c | 647 ++++++++++++++ 8 files changed, 1870 insertions(+), 10 deletions(-) create mode 100644 backends/cadence/hifi/operators/op_mean.cpp create mode 100644 backends/cadence/hifi/operators/op_where.cpp create mode 100644 backends/cadence/hifi/third-party/nnlib/xa_nn_elm_where_f32xf32_f32.c create mode 100644 backends/cadence/hifi/third-party/nnlib/xa_nn_reduce_32_32.c diff --git a/backends/cadence/aot/functions_hifi.yaml b/backends/cadence/aot/functions_hifi.yaml index 70b2dd0207..8cf0c2de0a 100644 --- a/backends/cadence/aot/functions_hifi.yaml +++ b/backends/cadence/aot/functions_hifi.yaml @@ -62,6 +62,11 @@ - arg_meta: null kernel_name: torch::executor::full_out +- op: mean.out + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::mean_dim_out + - op: mul.out kernels: - arg_meta: null @@ -105,7 +110,7 @@ - op: where.self_out kernels: - arg_meta: null - kernel_name: torch::executor::where_out + kernel_name: cadence::impl::HiFi::where_out # custom ops - func: cadence::quantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) diff --git a/backends/cadence/hifi/kernels/CMakeLists.txt b/backends/cadence/hifi/kernels/CMakeLists.txt index 8fee7e8536..9321cc544e 100644 --- a/backends/cadence/hifi/kernels/CMakeLists.txt +++ b/backends/cadence/hifi/kernels/CMakeLists.txt @@ -13,6 +13,8 @@ add_library( ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_f32_broadcast.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_mode_f32_broadcast.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_mul_f32_broadcast.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_where_f32xf32_f32.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_reduce_32_32.c ) # Let files say "include ". set(_common_include_directories ${EXECUTORCH_ROOT}/..) diff --git a/backends/cadence/hifi/kernels/kernels.h b/backends/cadence/hifi/kernels/kernels.h index a206635a28..2087c9761b 100644 --- a/backends/cadence/hifi/kernels/kernels.h +++ b/backends/cadence/hifi/kernels/kernels.h @@ -55,6 +55,34 @@ extern "C" WORD32 xa_nn_elm_mul_broadcast_4D_f32xf32_f32( const FLOAT32* __restrict__ p_inp2, const WORD32* const p_inp2_shape); +extern "C" WORD32 xa_nn_elm_where_f32xf32_f32( + FLOAT32* __restrict__ p_out, + const FLOAT32* __restrict__ p_inp1, + const FLOAT32* __restrict__ p_inp2, + const unsigned char* __restrict__ p_condition, + WORD32 num_elm); + +extern "C" WORD32 xa_nn_elm_where_broadcast_4D_f32xf32_f32( + FLOAT32* __restrict__ p_out, + const WORD32* const p_out_shape, + const FLOAT32* __restrict__ p_inp1, + const WORD32* const p_inp1_shape, + const FLOAT32* __restrict__ p_inp2, + const WORD32* const p_inp2_shape, + const unsigned char* __restrict__ p_condition, + const WORD32* const p_condition_shape); + +extern "C" WORD32 xa_nn_reduce_mean_4D_f32_f32( + FLOAT32* __restrict__ p_out, + const WORD32* const p_out_shape, + const FLOAT32* __restrict__ p_inp, + const WORD32* const p_inp_shape, + const WORD32* __restrict__ p_axis, + WORD32 num_out_dims, + WORD32 num_inp_dims, + WORD32 num_axis_dims, + void* __restrict__ p_scratch_in); + namespace cadence { namespace impl { namespace HiFi { diff --git a/backends/cadence/hifi/operators/CMakeLists.txt b/backends/cadence/hifi/operators/CMakeLists.txt index cbbb279e5d..dbe5867550 100644 --- a/backends/cadence/hifi/operators/CMakeLists.txt +++ b/backends/cadence/hifi/operators/CMakeLists.txt @@ -22,19 +22,12 @@ endif() set(_aten_ops__srcs "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_add.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_div.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mean.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mul.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sigmoid.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sub.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_tanh.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/activation_ops_util.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/copy_ops_util.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/broadcast_util.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/index_util.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/kernel_ops_util.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/matmul_ops_util.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/reduce_util.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/repeat_util.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/slice_util.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_where.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_bmm.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_cat.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_clone.cpp" @@ -57,6 +50,7 @@ set(_aten_ops__srcs "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/matmul_ops_util.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/reduce_util.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/repeat_util.cpp" + "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/slice_util.cpp" ) add_library(aten_ops_cadence ${_aten_ops__srcs}) target_link_libraries(aten_ops_cadence PUBLIC executorch) diff --git a/backends/cadence/hifi/operators/op_mean.cpp b/backends/cadence/hifi/operators/op_mean.cpp new file mode 100644 index 0000000000..478e10da71 --- /dev/null +++ b/backends/cadence/hifi/operators/op_mean.cpp @@ -0,0 +1,170 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include + +#include + +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::aten::RuntimeContext; +using executorch::runtime::ArrayRef; +using torch::executor::Error; +using torch::executor::optional; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +int prepare_data( + const Tensor& in, + Tensor& out, + optional> dim_list, + int* inp_shape, + int* out_shape, + int* p_axis, + int num_inp_dims, + int num_out_dims) { + for (int i = 0; i < num_inp_dims; i++) { + inp_shape[i] = in.size(i); + } + + for (int i = 0; i < num_out_dims; i++) { + out_shape[i] = out.size(i); + } + + int num_axis_dims = 0; + for (const auto& d : dim_list.value()) { + if (d < 0) { + p_axis[num_axis_dims] = num_inp_dims + d; + num_axis_dims++; + } else { + p_axis[num_axis_dims] = d; + num_axis_dims++; + } + } + + return num_axis_dims; +} + +Tensor& mean_dim_out( + RuntimeContext& ctx, + const Tensor& in, + optional> dim_list, + bool keepdim, + optional dtype, + Tensor& out) { + ET_KERNEL_CHECK( + ctx, + torch::executor::check_mean_dim_args(in, dim_list, keepdim, dtype, out), + InvalidArgument, + out); + + ET_KERNEL_CHECK( + ctx, + torch::executor::resize_reduction_out(in, dim_list, keepdim, out) == + Error::Ok, + InvalidArgument, + out); + + constexpr auto name = "mean.out"; + constexpr int kNnlibMaxDim = 4; + + bool optimized = 1; + + if (out.scalar_type() != ScalarType::Float) + optimized = 0; + + if (in.dim() > kNnlibMaxDim) + optimized = 0; + + if (optimized) { + float* __restrict__ p_out = out.mutable_data_ptr(); + const float* __restrict__ p_inp = + (const float* __restrict__)in.const_data_ptr(); + + int num_elm = in.numel(); + + int num_inp_dims = in.dim(); + int num_out_dims = out.dim(); + + int inp_shape[kNnlibMaxDim]; + int out_shape[kNnlibMaxDim]; + int p_axis[kNnlibMaxDim]; + + for (int i = 0; i < kNnlibMaxDim; i++) { + out_shape[i] = 1; + inp_shape[i] = 1; + p_axis[i] = 1; + } + + int num_axis_dims = prepare_data( + in, + out, + dim_list, + inp_shape, + out_shape, + p_axis, + num_inp_dims, + num_out_dims); + + if (num_axis_dims == num_inp_dims) { + num_out_dims = 1; + out_shape[0] = 1; + } + + int scratch_size = xa_nn_reduce_getsize_nhwc( + -3, inp_shape, num_inp_dims, p_axis, num_axis_dims, 1); + + void* __restrict__ p_scratch_in = (void* __restrict__)malloc(scratch_size); + + xa_nn_reduce_mean_4D_f32_f32( + p_out, + out_shape, + p_inp, + inp_shape, + p_axis, + num_out_dims, + num_inp_dims, + num_axis_dims, + p_scratch_in); + + return out; + } + + ET_SWITCH_REALHB_TYPES(in.scalar_type(), ctx, name, CTYPE_IN, [&] { + ET_SWITCH_FLOATH_TYPES(out.scalar_type(), ctx, name, CTYPE_OUT, [&] { + CTYPE_OUT* out_data = out.mutable_data_ptr(); + const size_t num = torch::executor::get_reduced_dim_product(in, dim_list); + + for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) { + CTYPE_OUT sum = 0; + if (in.numel() > 0) { + sum = torch::executor::map_reduce_over_dim_list( + [](CTYPE_IN v) { return static_cast(v); }, + [](CTYPE_OUT outv, CTYPE_OUT acc) { return acc + outv; }, + in, + dim_list, + out_ix); + } + out_data[out_ix] = sum / static_cast(num); + } + }); + }); + + return out; +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence diff --git a/backends/cadence/hifi/operators/op_where.cpp b/backends/cadence/hifi/operators/op_where.cpp new file mode 100644 index 0000000000..06bd0bc3c9 --- /dev/null +++ b/backends/cadence/hifi/operators/op_where.cpp @@ -0,0 +1,176 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include + +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::aten::RuntimeContext; +using torch::executor::Error; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +Tensor& where_out( + RuntimeContext& ctx, + const Tensor& cond, + const Tensor& a, + const Tensor& b, + Tensor& out) { + ScalarType cond_type = cond.scalar_type(); + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType common_type = executorch::runtime::promoteTypes(a_type, b_type); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); + + // Determine output size and resize for dynamic shapes + ET_KERNEL_CHECK( + ctx, + torch::executor::resize_to_broadcast_target_size(a, b, cond, out) == + Error::Ok, + InvalidArgument, + out); + + constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ + constexpr auto name = "where.self_out"; + + ET_CHECK_MSG( + cond_type == ScalarType::Bool || cond_type == ScalarType::Byte, + "Unhandled dtype %s for where.self_out", + torch::executor::toString(cond_type)); + + int a_dim = a.dim(), b_dim = b.dim(), con_dim = cond.dim(), + out_dim = out.dim(); + bool optimized = 1; + /*find broadcast*/ + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool cond_is_broadcasted = !out.sizes().equals(cond.sizes()); + const bool broadcast = + (a_is_broadcasted || b_is_broadcasted || cond_is_broadcasted); + + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = cond.dim() > max_dim ? cond.dim() : max_dim; + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + if ((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) + optimized = 0; + + if ((a_dim == 0) || (b_dim == 0) || (con_dim == 0)) + optimized = 0; + + if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) + optimized = 0; + + if (optimized) { + const float* a_data = a.const_data_ptr(); + const float* b_data = b.const_data_ptr(); + float* out_data = out.mutable_data_ptr(); + const unsigned char* con = cond.const_data_ptr(); + + if (broadcast == 1) { + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; + int con_shape[kNnlibMaxDim]; + + for (int i = 0; i < kNnlibMaxDim; i++) { + con_shape[i] = 1; + out_shape[i] = 1; + inp1_shape[i] = 1; + inp2_shape[i] = 1; + } + + int off_o = kNnlibMaxDim - out.dim(); + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); + int off_c = kNnlibMaxDim - cond.dim(); + + for (int i = 0; i < out.dim(); i++) + out_shape[i + off_o] = out.size(i); + for (int i = 0; i < a.dim(); i++) + inp1_shape[i + off_a] = a.size(i); + for (int i = 0; i < b.dim(); i++) + inp2_shape[i + off_b] = b.size(i); + for (int i = 0; i < cond.dim(); i++) + con_shape[i + off_c] = cond.size(i); + + if (con_shape[0] != out_shape[0] || con_shape[1] != out_shape[1] || + con_shape[2] != out_shape[2] || con_shape[3] != out_shape[3]) { + void* p_scratch = + malloc(out_shape[0] * out_shape[1] * out_shape[2] * out_shape[3]); + const unsigned char* p_brd_cond = (const unsigned char*)p_scratch; + xa_nn_broadcast_8_8( + (WORD8* __restrict__)p_brd_cond, + out_shape, + (const WORD8* __restrict__)con, + con_shape, + 4); + + for (int i = 0; i < 4; i++) { + con_shape[i] = out_shape[i]; + } + xa_nn_elm_where_broadcast_4D_f32xf32_f32( + out_data, + out_shape, + a_data, + inp1_shape, + b_data, + inp2_shape, + p_brd_cond, + con_shape); + free(p_scratch); + } else { + xa_nn_elm_where_broadcast_4D_f32xf32_f32( + out_data, + out_shape, + a_data, + inp1_shape, + b_data, + inp2_shape, + con, + con_shape); + } + } else { + xa_nn_elm_where_f32xf32_f32(out_data, a_data, b_data, con, out.numel()); + } + return out; + } + ET_SWITCH_REALHB_TYPES(a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_REALHB_TYPES(b_type, ctx, name, CTYPE_B, [&]() { + using CTYPE_OUT = + typename torch::executor::promote_types::type; + torch::executor:: + apply_ternary_elementwise_fn( + [](const CTYPE_A val_a, + const CTYPE_B val_b, + const uint8_t val_c) { + CTYPE_OUT a_casted = static_cast(val_a); + CTYPE_OUT b_casted = static_cast(val_b); + return val_c ? a_casted : b_casted; + }, + a, + b, + cond, + out); + }); + }); + return out; +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_where_f32xf32_f32.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_where_f32xf32_f32.c new file mode 100644 index 0000000000..6a7f6d0f77 --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_where_f32xf32_f32.c @@ -0,0 +1,838 @@ +/******************************************************************************* +* Copyright (c) 2018-2024 Cadence Design Systems, Inc. +* +* Permission is hereby granted, free of charge, to any person obtaining +* a copy of this software and associated documentation files (the +* "Software"), to use this Software with Cadence processor cores only and +* not with any other processors and platforms, subject to +* the following conditions: +* +* The above copyright notice and this permission notice shall be included +* in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +******************************************************************************/ +#include "xa_type_def.h" +#include "nnlib-hifi4/xa_nnlib/algo/common/include/xa_nnlib_common_fpu.h" +#include "nnlib-hifi4/xa_nnlib/algo/common/include/xa_nn_common.h" +#include "nnlib-hifi4/xa_nnlib/algo/common/include/xa_nnlib_err_chk.h" +#include "nnlib-hifi4/xa_nnlib/algo/kernels/basic/hifi4/xa_nn_basic_state.h" +#include "xa_nnlib_kernels_api.h" + + +#if !HAVE_VFPU +DISCARD_FUN_FOR_NONVOID_RETURN( + WORD32, xa_nn_elm_where_f32xf32_f32, + ( + FLOAT32 *p_out, + const FLOAT32 *p_inp1, + const FLOAT32 *p_inp2, + const unsigned char *__restrict__ condition, + WORD32 num_elm + ) + ) +#else +WORD32 xa_nn_elm_where_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + const unsigned char *__restrict__ p_condition, + WORD32 num_elm) +{ + + /* NULL pointer checks */ + XA_NNLIB_ARG_CHK_PTR(p_out, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp1, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp2, -1); + /* Pointer alignment checks */ + XA_NNLIB_ARG_CHK_ALIGN(p_out, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp1, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp2, sizeof(FLOAT32), -1); + /* Basic Parameter checks */ + XA_NNLIB_ARG_CHK_COND((num_elm <= 0), -1); + + int i; + xtfloatx2 *inp1 = (xtfloatx2 *)p_inp1; + xtfloatx2 *inp2 = (xtfloatx2 *)p_inp2; + xtfloatx2 *out = (xtfloatx2 *)p_out; + unsigned char *condition = p_condition; + xtfloatx2 x1, x2, y; + unsigned char con1, con2; + xtbool2 con = int32_rtor_xtbool2(0x00000003); + + if(((((unsigned)p_out)&7) == 0) && ((((unsigned)p_inp1)&7) == 0) && ((((unsigned)p_inp2)&7) == 0)) + { + for(i=0;i < num_elm>>1;i++) + { + XT_LSX2IP(x1, inp1, 2*sizeof(FLOAT32)); + XT_LSX2IP(x2, inp2, 2*sizeof(FLOAT32)); + con1 = XT_L8UI(condition, 0); + condition++; + con2 = XT_L8UI(condition, 0); + condition++; + con = AE_MOVBA1X2(con1, con2); + XT_MOVT_SX2 (y, x1, con); + XT_MOVF_SX2 (y, x2, con); + XT_SSX2IP( y, out, 2*sizeof(FLOAT32)); + } + } + else + { + ae_valign inp1_a, inp2_a, out_a; + + inp1_a = XT_LASX2PP(inp1); + inp2_a = XT_LASX2PP(inp2); + out_a = AE_ZALIGN64(); + /* Each iteration of loop is independent so safe to use concurrent pragma */ +#pragma concurrent + for(i=0;i < num_elm>>1;i++) + { + XT_LASX2IP(x1, inp1_a, inp1); + XT_LASX2IP(x2, inp2_a, inp2); + con1 = XT_L8UI(condition, 0); + condition++; + con2 = XT_L8UI(condition, 0); + condition++; + con = AE_MOVBA1X2(con1, con2); + XT_MOVT_SX2 (y, x1, con); + XT_MOVF_SX2 (y, x2, con); + XT_SASX2IP(y, out_a, out); + } + XT_SASX2POSFP(out_a, out); + } + // Remainder Loop + if (num_elm & 1) + { + xtfloat a1, a2, a; + con1 = XT_L8UI(condition, 0); + xtbool s = AE_MOVBA(con1); + XT_LSIP(a1, (xtfloat *)inp1, 0); + XT_LSIP(a2, (xtfloat *)inp2, 0); + XT_MOVT_S(a, a1, s); + XT_MOVF_S(a, a2, s); + XT_SSI(a, (xtfloat *)out, 0); + } +} + +static void internal_elm_where_broadcast_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + const unsigned char * __restrict__ p_condition, + WORD32 num_elm, + xtbool sign_flag) +{ + int i; + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + unsigned char *condition = p_condition; + + const int num_simd2_ops = num_elm >> 1; + const int num_scalar_ops = num_elm & 1; + + xtfloat a0_7, out; + xtfloatx2 x1, x2, y; + x2 = XT_LSI((xtfloat *)p_b, 0); + + unsigned char con1, con2; + xtbool2 con = int32_rtor_xtbool2(0x00000003); + + /* For out = condition ? inp2 :inp1 */ + if(sign_flag){ + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i> 1; + const int num_scalar_ops = num_elm & 1; + + xtfloat a0_7, out; + xtfloatx2 x1, x2, y; + x2 = XT_LSI((xtfloat *)p_b, 0); + x1 = XT_LSI((xtfloat *)p_a, 0); + + unsigned char con1, con2; + xtbool2 con = int32_rtor_xtbool2(0x00000003); + + if((((unsigned)p_c)&7) == 0) + { + for(i=0; i> 1; + num_scalar_ops = in_lc & 1; + } + else + { + num_simd2_ops = (in_lc >> 2) << 1; + num_scalar_ops = in_lc & 3; + } + + xtfloatx2 x1, x2, y; + xtfloat a0, b0, c0; + unsigned char con1, con2; + xtbool2 con = int32_rtor_xtbool2(0x00000003); + /* For out = condition ? inp2 :inp1 */ + if(sign_flag){ + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + condition = &p_condition[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + con1 = XT_L8UI(condition, 0); + condition++; + con2 = XT_L8UI(condition, 0); + condition++; + con = AE_MOVBA1X2(con1, con2); + XT_MOVT_SX2 (y, x2, con); + XT_MOVF_SX2 (y, x1, con); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + con1 = XT_L8UI(condition, 0); + condition++; + con2 = XT_L8UI(condition, 0); + condition++; + con = AE_MOVBA1X2(con1, con2); + XT_MOVT_SX2 (y, x2, con); + XT_MOVF_SX2 (y, x1, con); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, 0); + XT_LSIP(b0, (xtfloat *)p_b, 0); + con1 = XT_L8UI(condition, 0); + xtbool s = AE_MOVBA(con1); + XT_MOVT_S(c0, b0, s); + XT_MOVF_S(c0, a0, s); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } + /* For out = condition ? inp1 :inp2 */ + else + { + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + condition = &p_condition[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + con1 = XT_L8UI(condition, 0); + condition++; + con2 = XT_L8UI(condition, 0); + condition++; + con = AE_MOVBA1X2(con1, con2); + XT_MOVT_SX2 (y, x1, con); + XT_MOVF_SX2 (y, x2, con); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + con1 = XT_L8UI(condition, 0); + condition++; + con2 = XT_L8UI(condition, 0); + condition++; + con = AE_MOVBA1X2(con1, con2); + XT_MOVT_SX2 (y, x1, con); + XT_MOVF_SX2 (y, x2, con); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, 0); + XT_LSIP(b0, (xtfloat *)p_b, 0); + con1 = XT_L8UI(condition, 0); + xtbool s = AE_MOVBA(con1); + XT_MOVT_S(c0, a0, s); + XT_MOVF_S(c0, b0, s); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } +} + +static void internal_elm_where_broadcast_both_2D_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + const unsigned char * __restrict__ p_condition, + WORD32 out_lc, + WORD32 in_lc) +{ + int i, j; + + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + unsigned char *condition = p_condition; + + int num_simd2_ops; + int num_scalar_ops; + + if(out_lc) + { + num_simd2_ops = in_lc >> 1; + num_scalar_ops = in_lc & 1; + } + else + { + num_simd2_ops = (in_lc >> 2) << 1; + num_scalar_ops = in_lc & 3; + } + + xtfloatx2 x1, x2, y; + xtfloat a0, b0, c0; + unsigned char con1, con2; + xtbool2 con = int32_rtor_xtbool2(0x00000003); + + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)p_inp1; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + condition = &p_condition[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + con1 = XT_L8UI(condition, 0); + condition++; + con2 = XT_L8UI(condition, 0); + condition++; + con = AE_MOVBA1X2(con1, con2); + XT_MOVT_SX2 (y, x1, con); + XT_MOVF_SX2 (y, x2, con); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + con1 = XT_L8UI(condition, 0); + condition++; + con2 = XT_L8UI(condition, 0); + condition++; + con = AE_MOVBA1X2(con1, con2); + XT_MOVT_SX2 (y, x1, con); + XT_MOVF_SX2 (y, x2, con); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, 0); + XT_LSIP(b0, (xtfloat *)p_b, 0); + con1 = XT_L8UI(condition, 0); + xtbool s = AE_MOVBA(con1); + XT_MOVT_S(c0, a0, s); + XT_MOVF_S(c0, b0, s); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } +} + +WORD32 xa_nn_elm_where_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp1, + const WORD32 *const p_inp1_shape, + const FLOAT32 * __restrict__ p_inp2, + const WORD32 *const p_inp2_shape, + const unsigned char *__restrict__ p_condition, + const WORD32 *const p_condition_shape + ) +{ + /* NULL pointer checks */ + XA_NNLIB_ARG_CHK_PTR(p_out, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp1, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp2, -1); + XA_NNLIB_ARG_CHK_PTR(p_condition, -1); + XA_NNLIB_ARG_CHK_PTR(p_out_shape, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp1_shape, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp2_shape, -1); + XA_NNLIB_ARG_CHK_PTR(p_condition_shape, -1); + /* Pointer alignment checks */ + XA_NNLIB_ARG_CHK_ALIGN(p_out, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp1, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp2, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_condition, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_out_shape, sizeof(WORD32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp1_shape, sizeof(WORD32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp2_shape, sizeof(WORD32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_condition_shape, sizeof(WORD32), -1); + + /* Check shapes */ + int i; + xtbool sign_flag; + for(i = 0; i < 4; i++) + { + if((p_inp1_shape[i] != p_inp2_shape[i]) && ((p_inp1_shape[i] != 1) && (p_inp2_shape[i] != 1))) + { + return -1; + } + } + WORD32 inp1_strides[4], inp2_strides[4]; + inp1_strides[3] = 1; + inp2_strides[3] = 1; + for(i = 2; i >= 0; i--) + { + ae_int32x2 d_str, d_shape; + d_str = AE_MOVDA32X2(inp1_strides[i + 1], inp2_strides[i + 1]); + d_shape = AE_MOVDA32X2(p_inp1_shape[i + 1], p_inp2_shape[i + 1]); + d_str = AE_MULP32X2(d_str, d_shape); + inp1_strides[i] = AE_MOVAD32_H(d_str); + inp2_strides[i] = AE_MOVAD32_L(d_str); + } + + int need_broadcast = 0; + int inp1_const = 1, inp2_const = 1; + for(i = 0; i < 4; i++) + { + if(p_inp1_shape[i] == 1) + { + inp1_strides[i] = 0; + need_broadcast = 1; + } + else + { + inp1_const &= 0; + } + if(p_inp2_shape[i] == 1) + { + inp2_strides[i] = 0; + need_broadcast = 1; + } + else + { + inp2_const &= 0; + } + } + + int itr0, itr1, itr2; + FLOAT32 *p_out_tmp = p_out; + const unsigned char *__restrict p_condition_temp = p_condition; + const FLOAT32 *__restrict__ p_inp1_tmp = p_inp1; + const FLOAT32 *__restrict__ p_inp2_tmp = p_inp2; + + if(need_broadcast == 0) + { + sign_flag = 0; + internal_elm_where_broadcast_2D_f32xf32_f32( + p_out, + p_inp1, + p_inp2, + p_condition, + 1, + p_out_shape[0] * inp1_strides[0], + sign_flag); + } + else if((inp1_strides[3] == 1)&& (inp2_strides[3] == 1)) + { + WORD32 in_lc, out_lc; + sign_flag = 0; + in_lc = p_out_shape[2] * p_out_shape[3]; + out_lc = 1; + if((inp1_strides[2] == 0) && (inp2_strides[2] == 0)) + { + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + internal_elm_where_broadcast_both_2D_f32xf32_f32( + p_out_tmp, + p_inp1_tmp0, + p_inp2_tmp0, + p_condition_temp, + out_lc, + in_lc); + p_out_tmp += in_lc * out_lc; + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + p_condition_temp += in_lc * out_lc; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + else + { + if(inp1_strides[2] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[2]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + else if(inp2_strides[2] == 0) + { + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + internal_elm_where_broadcast_2D_f32xf32_f32( + p_out_tmp, + p_inp1_tmp0, + p_inp2_tmp0, + p_condition_temp, + out_lc, + in_lc, + sign_flag); + p_out_tmp += in_lc * out_lc; + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + p_condition_temp += in_lc * out_lc; + } + + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + } + else if(inp1_const == 1 || inp2_const == 1) + { + if((inp1_const == 1)&&(inp2_const == 1)) + { + internal_elm_where_broadcast_both_f32xf32_f32( + p_out_tmp, + p_inp1_tmp, + p_inp2_tmp, + p_condition_temp, + p_out_shape[0] * p_out_shape[1] * p_out_shape[2] * p_out_shape[3]); + } + else + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + sign_flag = 1; + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + } + internal_elm_where_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp, + p_inp2_tmp, + p_condition_temp, + p_out_shape[0] * p_out_shape[1] * p_out_shape[2] * p_out_shape[3], + sign_flag); + } + } + else + { + sign_flag = 0; + if((inp1_strides[3] == 0) && (inp2_strides[3] == 0)) + { + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + const FLOAT32 *__restrict__ p_inp1_tmp1 = p_inp1_tmp0; + const FLOAT32 *__restrict__ p_inp2_tmp1 = p_inp2_tmp0; + for(itr2 = 0; itr2 < p_out_shape[2]; itr2++) + { + { + internal_elm_where_broadcast_both_f32xf32_f32( + p_out_tmp, + p_inp1_tmp1, + p_inp2_tmp1, + p_condition_temp, + p_out_shape[3]); + } + p_out_tmp += p_out_shape[3]; + p_inp1_tmp1 += inp1_strides[2]; + p_inp2_tmp1 += inp2_strides[2]; + p_condition_temp += p_out_shape[3]; + } + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + else + { + if(inp1_strides[3] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[3]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + tmp_strides[2] = inp1_strides[2]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + inp1_strides[2] = inp2_strides[2]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + inp2_strides[2] = tmp_strides[2]; + } + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + const FLOAT32 *__restrict__ p_inp1_tmp1 = p_inp1_tmp0; + const FLOAT32 *__restrict__ p_inp2_tmp1 = p_inp2_tmp0; + for(itr2 = 0; itr2 < p_out_shape[2]; itr2++) + { + { + internal_elm_where_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp1, + p_inp2_tmp1, + p_condition_temp, + p_out_shape[3], + sign_flag); + } + p_out_tmp += p_out_shape[3]; + p_inp1_tmp1 += inp1_strides[2]; + p_inp2_tmp1 += inp2_strides[2]; + p_condition_temp += p_out_shape[3]; + } + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + } + return 0; +} + +#endif \ No newline at end of file diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_reduce_32_32.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_reduce_32_32.c new file mode 100644 index 0000000000..5978a92d26 --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_reduce_32_32.c @@ -0,0 +1,647 @@ +#include "xa_nnlib_common.h" +#include +//#include "xa_nn_basic_state.h" +#include "xa_nnlib_common_macros.h" + +#define ALIGNMENT_8 8 + +#define ALIGN_PTR(x, bytes) ((((unsigned)(x))+(bytes-1))&(~(bytes-1))) + +static void vecmean16_inpx3(const xtfloatx2 *p_src1, const xtfloat* p_src2, const xtfloat* p_src3, xtfloatx2 *p_dst, int N){ + int i = 0; + ae_valign align_src1, align_dst; + ae_valign align_src2, align_src3; + align_src1 = AE_LA64_PP(p_src1); + align_src2 = AE_LA64_PP(p_src2); + align_src3 = AE_LA64_PP(p_src3); + align_dst = AE_ZALIGN64(); + + for(i=0; i < (N >> 2); i++) + { + xtfloatx2 j1_h, j1_l, j2_h, j2_l; + + xtfloatx2 wout1, wout2; + XT_LASX2IP(wout1, align_src1, p_src1); + XT_LASX2IP(wout2, align_src1, p_src1); + + XT_LASX2IP(j1_h, align_src2, (xtfloatx2 *)p_src2); + XT_LASX2IP(j1_l, align_src2, (xtfloatx2 *)p_src2); + XT_LASX2IP(j2_h, align_src3, (xtfloatx2 *)p_src3); + XT_LASX2IP(j2_l, align_src3, (xtfloatx2 *)p_src3); + + j1_h = XT_ADD_SX2(j1_h, j2_h); + j1_l = XT_ADD_SX2(j1_l, j2_l); + wout1 = XT_ADD_SX2(wout1, j1_h); + wout2 = XT_ADD_SX2(wout2, j1_l); + + XT_SASX2IP(wout1, align_dst, p_dst); + XT_SASX2IP(wout2, align_dst, p_dst); + } + AE_SA64POS_FP(align_dst, p_dst); // finalize the stream + + //Remainder Loop + for(i=0; i < (N & 3); i++) + { + xtfloat j1, j2; + xtfloat wout1; + XT_LSXP(wout1, (xtfloat *)p_src1, sizeof(xtfloat)); + j1 = (xtfloat) *(p_src2 + i); + j2 = (xtfloat) *(p_src3 + i); + + j1 = XT_ADD_S(j1, j2); + wout1 = XT_ADD_S(wout1, j1); + XT_SSXP(wout1, (xtfloat *)p_dst, sizeof(xtfloat)); + } +} + +static void vecmean16_inpx2(const xtfloatx2 *p_src1, const xtfloat* p_src2, xtfloatx2 *p_dst, int N){ + ae_valign align_src1, align_dst; + ae_valign align_src2; + align_src1 = AE_LA64_PP(p_src1); + align_src2 = AE_LA64_PP(p_src2); + align_dst = AE_ZALIGN64(); + + int i = 0; + for(i=0; i < (N >> 2); i++) + { + xtfloatx2 j1, j2; + xtfloatx2 wout1, wout2; + XT_LASX2IP(wout1, align_src1, p_src1); + XT_LASX2IP(wout2, align_src1, p_src1); + + XT_LASX2IP(j1, align_src2, (xtfloatx2 *)p_src2); + XT_LASX2IP(j2, align_src2, (xtfloatx2 *)p_src2); + + wout1 = XT_ADD_SX2(wout1, j1); + wout2 = XT_ADD_SX2(wout2, j2); + + XT_SASX2IP(wout1, align_dst, p_dst); + XT_SASX2IP(wout2, align_dst, p_dst); + } + AE_SA64POS_FP(align_dst, p_dst); // finalize the stream + + //Remainder Loop + for(i=0; i < (N & 3); i++) + { + xtfloat j1; + xtfloat wout1; + XT_LSXP(wout1, (xtfloat *)p_src1, sizeof(xtfloat)); + j1 = (xtfloat) *(p_src2 + i); + wout1 = XT_ADD_S(wout1, j1); + XT_SSXP(wout1, (xtfloat *)p_dst, sizeof(xtfloat)); + } +} + +static void vecmean32_inpx3(const xtfloatx2* p_src1, const xtfloatx2* p_wsrc2, const xtfloatx2* p_wsrc3, xtfloatx2 *p_dst, int N){ + ae_valign align_src1, align_src2, align_src3, align_dst; + align_src1 = AE_LA64_PP(p_src1); + align_src2 = AE_LA64_PP(p_wsrc2); + align_src3 = AE_LA64_PP(p_wsrc3); + align_dst = AE_ZALIGN64(); + + int i = 0; + for(i=0; i < (N >> 2); i++) + { + xtfloatx2 j1, j2, j3, j4; + xtfloatx2 wj1, wj2; + xtfloatx2 wout1, wout2; + XT_LASX2IP(wout1, align_src1, p_src1); + XT_LASX2IP(wout2, align_src1, p_src1); + XT_LASX2IP(j1, align_src2, p_wsrc2); + XT_LASX2IP(j2, align_src3, p_wsrc3); + XT_LASX2IP(j3, align_src2, p_wsrc2); + XT_LASX2IP(j4, align_src3, p_wsrc3); + + wj1 = XT_ADD_SX2(j1, j2); + wj2 = XT_ADD_SX2(j3, j4); + wout1 = XT_ADD_SX2(wout1, wj1); + wout2 = XT_ADD_SX2(wout2, wj2); + XT_SASX2IP(wout1, align_dst, p_dst); + XT_SASX2IP(wout2, align_dst, p_dst); + } + AE_SA64POS_FP(align_dst, p_dst); // finalize the stream + + //Remainder Loop + for(i=0; i < (N & 3); i++) + { + xtfloat j1, j2; + xtfloat wj1; + xtfloat wout1; + XT_LSXP(wout1, (xtfloat *)p_src1, 4); + XT_LSXP(j1, (xtfloat *)p_wsrc2, 4); + XT_LSXP(j2, (xtfloat *)p_wsrc3, 4); + wj1 = XT_ADD_S(j1, j2); + wout1 = XT_ADD_S(wout1, wj1); + XT_SSXP(wout1, (xtfloat *)p_dst, sizeof(xtfloat)); + } +} + +static void vecmean32_inpx2(const xtfloatx2* p_src1, const xtfloatx2* p_wsrc2, xtfloatx2 *p_dst, int N){ + ae_valign align_src1, align_src2, align_dst; + align_src1 = AE_LA64_PP(p_src1); + align_src2 = AE_LA64_PP(p_wsrc2); + align_dst = AE_ZALIGN64(); + + int i = 0; + for(i=0; i < (N >> 2); i++) + { + xtfloatx2 j1, j2; + xtfloatx2 wout1, wout2; + XT_LASX2IP(wout1, align_src1, p_src1); + XT_LASX2IP(wout2, align_src1, p_src1); + XT_LASX2IP(j1, align_src2, p_wsrc2); + XT_LASX2IP(j2, align_src2, p_wsrc2); + wout1 = XT_ADD_SX2(wout1, j1); + wout2 = XT_ADD_SX2(wout2, j2); + XT_SASX2IP(wout1, align_dst, p_dst); + XT_SASX2IP(wout2, align_dst, p_dst); + } + AE_SA64POS_FP(align_dst, p_dst); // finalize the stream + + //Remainder Loop + for(i=0; i < (N & 3); i++) + { + xtfloat j1; + xtfloat wout1; + XT_LSXP(wout1, (xtfloat *)p_src1, 4); + XT_LSXP(j1, (xtfloat *)p_wsrc2, 4); + wout1 = XT_ADD_S(wout1, j1); + XT_SSXP(wout1, (xtfloat *)p_dst, sizeof(WORD32)); + } +} + +static inline void xa_nn_reduce_sum_4D_f32_f32(const FLOAT32 * __restrict__ p_inp + ,const WORD32 *const p_4D_inp_shape + ,const WORD32 * __restrict__ p_axis_data + ,WORD32 num_inp_dims + ,WORD32 num_axis_dims + ,pVOID p_scratch_in) +{ + xtfloat *p_in = (xtfloat *)(p_inp); + xtfloat *p_scratch = (xtfloat *)(p_scratch_in); + + int temp_inp_n = p_4D_inp_shape[0]; + int temp_inp_h = p_4D_inp_shape[1]; + int temp_inp_w = p_4D_inp_shape[2]; + int temp_inp_c = p_4D_inp_shape[3]; + + int itr_axis = 0, itr_n = 0, itr_h = 0, itr_w = 0, itr_c = 0; + xtfloat *p_src2, *p_src3; + xtfloatx2 *p_src1; + xtfloatx2 * p_dst; + ae_valign align_src2; + + int axis_dims_count = num_axis_dims; + if(axis_dims_count) + { + switch(p_axis_data[itr_axis]) + { + case 0: { + int plane_size = temp_inp_h * temp_inp_w * temp_inp_c; + for(itr_n=0; itr_n < (temp_inp_n & ~(2 - 1)); itr_n += 2) + { + p_src1 = (xtfloatx2 *)p_scratch; + p_src2 = p_in + itr_n * plane_size; + p_src3 = p_in + (itr_n + 1) * plane_size; + p_dst = (xtfloatx2 *)p_scratch; + vecmean16_inpx3(p_src1, p_src2, p_src3, p_dst, plane_size); + } + + if(temp_inp_n & 1) + { + p_src1 = (xtfloatx2 *)p_scratch; + p_src2 = (p_in + itr_n * plane_size); + p_dst = (xtfloatx2 *)p_scratch; + vecmean16_inpx2(p_src1, p_src2, p_dst, plane_size); + } + temp_inp_n = 1; + }break; + case 1: { + int plane_size = temp_inp_h * temp_inp_w * temp_inp_c; + int wc_plane_size = temp_inp_w * temp_inp_c; + for(itr_n=0; itr_n < (temp_inp_n); itr_n++) + { + p_src1 = (xtfloatx2 *)(p_scratch + (itr_n * wc_plane_size)); + for(itr_h=0; itr_h < (temp_inp_h & ~(2 - 1)); itr_h += 2) + { + p_src2 = p_in + (itr_n * plane_size) + (itr_h * wc_plane_size); + p_src3 = p_in + (itr_n * plane_size) + ((itr_h + 1) * wc_plane_size); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * wc_plane_size)); + vecmean16_inpx3(p_src1, p_src2, p_src3, p_dst, wc_plane_size); + p_src1 = (xtfloatx2 *)(p_scratch + (itr_n * wc_plane_size)); + } + + if(temp_inp_h & 1) + { + p_src2 = p_in + (itr_n * plane_size) + (itr_h * wc_plane_size); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * wc_plane_size)); + vecmean16_inpx2(p_src1, p_src2, p_dst, wc_plane_size); + } + } + temp_inp_h = 1; + }break; + case 2:{ + int plane_size = temp_inp_h * temp_inp_w * temp_inp_c; + int wc_plane_size = temp_inp_w * temp_inp_c; + int hc_plane_size = temp_inp_h * temp_inp_c; + + for(itr_n=0; itr_n < (temp_inp_n); itr_n++) + { + for(itr_h=0; itr_h < (temp_inp_h); itr_h++) + { + p_src1 = (xtfloatx2 *)(p_scratch + (((itr_n * hc_plane_size) + itr_h * temp_inp_c))); + for(itr_w=0; itr_w < (temp_inp_w & ~(2 - 1)); itr_w += 2) + { + p_src2 = p_in + (itr_n * plane_size) + (itr_h * wc_plane_size) + (itr_w * temp_inp_c); + p_src3 = p_in + (itr_n * plane_size) + (itr_h * wc_plane_size) + ((itr_w + 1) * temp_inp_c); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * hc_plane_size) + itr_h * temp_inp_c); + vecmean16_inpx3(p_src1, p_src2, p_src3, p_dst, temp_inp_c); + p_src1 = (xtfloatx2 *)(p_scratch + (itr_n * hc_plane_size) + (itr_h * temp_inp_c)); + } + + if(temp_inp_w & 1) + { + p_src2 = p_in + (itr_n * plane_size) + (itr_h * wc_plane_size) + (itr_w * temp_inp_c); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * hc_plane_size) + itr_h * temp_inp_c); + vecmean16_inpx2(p_src1, p_src2, p_dst, temp_inp_c); + } + } + } + temp_inp_w = 1; + }break; + case 3: { + int plane_size = temp_inp_h * temp_inp_w * temp_inp_c; + int wc_plane_size = temp_inp_w * temp_inp_c; + int hw_plane_size = temp_inp_h * temp_inp_w; + int rem_c = (temp_inp_c & 7); + + for(itr_n=0; itr_n < (temp_inp_n); itr_n++) + { + for(itr_h=0; itr_h < (temp_inp_h); itr_h++) + { + for(itr_w=0; itr_w < (temp_inp_w); itr_w++) + { + p_src1 = (xtfloatx2 *)(p_scratch + (((itr_n * hw_plane_size) + (itr_h * temp_inp_w) + itr_w))); + p_src2 = p_in + (itr_n * plane_size) + (itr_h * wc_plane_size) + (itr_w * temp_inp_c); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * hw_plane_size) + (itr_h * temp_inp_w) + itr_w); + align_src2 = AE_LA64_PP(p_src2); + + for(itr_c=0; itr_c < (temp_inp_c >> 3); itr_c++) + { + xtfloatx2 j11, j12, j21, j22, i1; + i1 = XT_LSX((xtfloat *)p_src1, 0); + XT_LASX2IP(j11, align_src2, (xtfloatx2 *)p_src2); + XT_LASX2IP(j12, align_src2, (xtfloatx2 *)p_src2); + XT_LASX2IP(j21, align_src2, (xtfloatx2 *)p_src2); + XT_LASX2IP(j22, align_src2, (xtfloatx2 *)p_src2); + + j11 = XT_ADD_SX2(j11, j12); + j21 = XT_ADD_SX2(j21, j22); + + xtfloatx2 t1 = XT_SEL32_HH_SX2(j11, j11); + xtfloatx2 t2 = XT_SEL32_HH_SX2(j21, j21); + + j11 = XT_ADD_SX2(j11, t1); + j21 = XT_ADD_SX2(j21, t2); + + j11 = XT_ADD_SX2(j11, j21); + i1 = XT_ADD_SX2(i1, j11); + + XT_SSX(i1, (xtfloat *)p_dst, 0); + + p_src1 = p_dst; + } + //Remainder Loop + for(itr_c=0; itr_c < rem_c ; itr_c++) + { + xtfloat j1; + xtfloat i1; + i1 = XT_LSX((xtfloat *)p_src1, 0); + j1 = *p_src2++; + + i1 = XT_ADD_S(i1, j1); + XT_SSX(i1, (xtfloat *)p_dst, 0); + } + } + } + } + temp_inp_c = 1; + }break; + default: + break; + } + + axis_dims_count--; + itr_axis++; + } + + while(axis_dims_count) + { + ae_valign align_src; + xtfloat *p_scr_in = p_scratch; + xtfloatx2 *p_wsrc2, *p_wsrc3; + switch(p_axis_data[itr_axis]) + { + case 0: { + int plane_size = temp_inp_h * temp_inp_w * temp_inp_c; + for(itr_n=1; itr_n < ((temp_inp_n -1) & ~(2 - 1)); itr_n += 2) + { + p_src1 = (xtfloatx2 *)p_scratch; + p_wsrc2 = (xtfloatx2 *)(p_scr_in + itr_n * plane_size); + p_wsrc3 = (xtfloatx2 *)(p_scr_in + (itr_n + 1) * plane_size); + p_dst = (xtfloatx2 *)p_scratch; + vecmean32_inpx3(p_src1, p_wsrc2, p_wsrc3, p_dst, plane_size); + } + + if((temp_inp_n - 1) & 1) + { + p_src1 = (xtfloatx2 *)p_scratch; + p_wsrc2 = (xtfloatx2 *)(p_scr_in + itr_n * plane_size); + p_dst = (xtfloatx2 *)p_scratch; + vecmean32_inpx2(p_src1, p_wsrc2, p_dst, plane_size); + } + temp_inp_n = 1; + }break; + case 1: { + int plane_size = temp_inp_h * temp_inp_w * temp_inp_c; + int wc_plane_size = temp_inp_w * temp_inp_c; + for(itr_n=0; itr_n < (temp_inp_n); itr_n++) + { + p_src1 = (xtfloatx2 *)(p_scratch + + (itr_n * plane_size)); + for(itr_h = 1; itr_h < ((temp_inp_h - 1) & ~(2 - 1)); itr_h += 2) + { + p_wsrc2 = (xtfloatx2 *)(p_scr_in + (itr_n * plane_size) + (itr_h * wc_plane_size)); + p_wsrc3 = (xtfloatx2 *)(p_scr_in + (itr_n * plane_size) + ((itr_h + 1) * wc_plane_size)); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * wc_plane_size)); + vecmean32_inpx3(p_src1, p_wsrc2, p_wsrc3, p_dst, wc_plane_size); + p_src1 = (xtfloatx2 *)(p_scratch + (itr_n * wc_plane_size)); + } + + if((temp_inp_h - 1) & 1) + { + p_wsrc2 = (xtfloatx2 *)(p_scr_in + (itr_n * plane_size) + (itr_h * wc_plane_size)); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * wc_plane_size)); + vecmean32_inpx2(p_src1, p_wsrc2, p_dst, plane_size); + } + } + temp_inp_h = 1; + }break; + case 2:{ + int plane_size = temp_inp_h * temp_inp_w * temp_inp_c; + int wc_plane_size = temp_inp_w * temp_inp_c; + int hc_plane_size = temp_inp_h * temp_inp_c; + for(itr_n=0; itr_n < (temp_inp_n); itr_n++) + { + for(itr_h=0; itr_h < (temp_inp_h); itr_h++) + { + p_src1 = (xtfloatx2 *)(p_scratch + ((itr_n * plane_size) + (itr_h * wc_plane_size))); + for(itr_w = 1; itr_w < ((temp_inp_w - 1) & ~(2 - 1)); itr_w += 2) + { + p_wsrc2 = (xtfloatx2 *)(p_scr_in + (itr_n * plane_size) + (itr_h * wc_plane_size) + (itr_w * temp_inp_c)); + p_wsrc3 = (xtfloatx2 *)(p_scr_in + (itr_n * plane_size) + (itr_h * wc_plane_size) + ((itr_w + 1) * temp_inp_c)); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * hc_plane_size) + itr_h * temp_inp_c); + vecmean32_inpx3(p_src1, p_wsrc2, p_wsrc3, p_dst, temp_inp_c); + p_src1 = (xtfloatx2 *)(p_scratch + (itr_n * hc_plane_size) + (itr_h * temp_inp_c)); + } + + if((temp_inp_w - 1) & 1) + { + p_wsrc2 = (xtfloatx2 *)(p_scr_in + (itr_n * plane_size) + (itr_h * wc_plane_size) + (itr_w * temp_inp_c)); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * hc_plane_size) + itr_h * temp_inp_c); + vecmean32_inpx2(p_src1, p_wsrc2, p_dst, temp_inp_c); + } + } + } + temp_inp_w = 1; + }break; + case 3: { + int plane_size = temp_inp_h * temp_inp_w * temp_inp_c; + int wc_plane_size = temp_inp_w * temp_inp_c; + int hw_plane_size = temp_inp_h * temp_inp_w; + int rem_c = ((temp_inp_c) & 3); + for(itr_n=0; itr_n < (temp_inp_n); itr_n++) + { + for(itr_h=0; itr_h < (temp_inp_h); itr_h++) + { + for(itr_w=0; itr_w < (temp_inp_w); itr_w++) + { + p_wsrc2 = (xtfloatx2 *)(p_scr_in + (itr_n * plane_size) + (itr_h * wc_plane_size) + (itr_w * temp_inp_c)); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * hw_plane_size) + (itr_h * temp_inp_w) + itr_w); + align_src = AE_LA64_PP(p_wsrc2); + xtfloatx2 i1 = AE_MOVXTFLOATX2_FROMF32X2(AE_MOVDA32(0)); + for(itr_c = 0; itr_c < (temp_inp_c >> 2); itr_c++) + { + xtfloatx2 j1, j2; + XT_LASX2IP(j1, align_src, p_wsrc2); + XT_LASX2IP(j2, align_src, p_wsrc2); + + xtfloatx2 t1 = XT_SEL32_HH_SX2(j1, j1); + xtfloatx2 t2 = XT_SEL32_HH_SX2(j2, j2); + + j1 = XT_ADD_SX2(t1, j1); + j2 = XT_ADD_SX2(t2, j2); + + i1 = XT_ADD_SX2(i1, j1); + i1 = XT_ADD_SX2(i1, j2); + } + + //Remainder Loop + for(itr_c=0; itr_c < rem_c; itr_c++) + { + xtfloat j1; + XT_LSXP(j1, (xtfloat *)p_wsrc2, sizeof(xtfloat)); + i1 = XT_ADD_S(i1, j1); + } + XT_SSX(i1, (xtfloat *)p_dst, 0); + } + } + } + temp_inp_c = 1; + }break; + default: + break; + } + axis_dims_count--; + itr_axis++; + } +} + +WORD32 xa_nn_reduce_mean_4D_f32_f32( + FLOAT32 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp, + const WORD32 *const p_inp_shape, + const WORD32 * __restrict__ p_axis, + WORD32 num_out_dims, + WORD32 num_inp_dims, + WORD32 num_axis_dims, + void * __restrict__ p_scratch_in) +{ + /* NULL pointer checks */ + XA_NNLIB_ARG_CHK_PTR(p_out, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp, -1); + XA_NNLIB_ARG_CHK_PTR(p_axis, -1); + XA_NNLIB_ARG_CHK_PTR(p_out_shape, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp_shape, -1); + + /* Invalid input checks */ + XA_NNLIB_ARG_CHK_COND(((num_inp_dims <= 0) || (num_inp_dims > 4)), -1); + XA_NNLIB_ARG_CHK_COND(((num_out_dims <= 0) || (num_out_dims > 4)), -1); + XA_NNLIB_ARG_CHK_COND(((num_axis_dims < 0) || (num_axis_dims > 4)), -1); + + int axis_itr = 0, inp_itr = 0, out_itr = 0; + int num_elm_in_axis = 1; + int current, past = -1; + for(axis_itr=0; axis_itr < num_axis_dims; axis_itr++) + { + current = p_axis[axis_itr]; + XA_NNLIB_ARG_CHK_COND(((current < 0) || (current > (num_inp_dims - 1))), -1); + XA_NNLIB_ARG_CHK_COND((p_inp_shape[current] > 1024), -1); + + /* Avoid calculation in case of repeated axis dims*/ + if(current != past) + { + num_elm_in_axis *= p_inp_shape[current]; + past = current; + } + } + + for(inp_itr=0; inp_itr < num_inp_dims; inp_itr++) + { + XA_NNLIB_ARG_CHK_COND((p_inp_shape[inp_itr] <= 0), -1); + } + + int out_length = 1; + for(out_itr=0; out_itr < num_out_dims; out_itr++) + { + XA_NNLIB_ARG_CHK_COND((p_out_shape[out_itr] <= 0), -1); + out_length *= p_out_shape[out_itr]; + } + + /* Pointer alignment checks */ + XA_NNLIB_ARG_CHK_ALIGN(p_out, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_axis, sizeof(WORD32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_out_shape, sizeof(WORD32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp_shape, sizeof(WORD32), -1); + + FLOAT32 *p_in = (FLOAT32 *)(p_inp); + WORD32 *p_scratch = (WORD32 *)(ALIGN_PTR(p_scratch_in, ALIGNMENT_8)); + + // Changing order of axis data so that reduce max will be first computed + // across largest inp shape dim in axis. This is required to + // minimize the scratch usage. + int inp_length = 1, p_axis_data[4] = {0}, inp_shape_max; + if(num_axis_dims) + { + inp_shape_max = p_inp_shape[p_axis[0]]; + axis_itr = 1; + int max_axis_itr = 0; + int temp_p_axis_0 = p_axis[0]; + for(axis_itr = 0; axis_itr < num_axis_dims; axis_itr++) + { + p_axis_data[axis_itr] = p_axis[axis_itr]; + } + for(axis_itr = 1; axis_itr < num_axis_dims; axis_itr++) + { + if(p_inp_shape[p_axis[axis_itr]] > inp_shape_max) + { + inp_shape_max = p_inp_shape[p_axis[axis_itr]]; + max_axis_itr = axis_itr; + } + } + p_axis_data[0] = p_axis_data[max_axis_itr]; + p_axis_data[max_axis_itr] = temp_p_axis_0; + + inp_itr = 0; + for(inp_itr=0; inp_itr < num_inp_dims; inp_itr++) + { + inp_length *= p_inp_shape[inp_itr]; + } + + memset(p_scratch, 0, ((inp_length / inp_shape_max) * sizeof(WORD32))); //TODO: Alternate approach for memset? + } + + // Promoting lesser dim tensors to 4D tensors. Also modifying axis + // data accordingly. + int p_4D_inp_shape[4] = {1, 1, 1, 1}; + int itr = num_inp_dims - 1; + int count = 3; + while(itr >= 0) + { + p_4D_inp_shape[count] = p_inp_shape[itr]; + itr--; + count--; + } + for(itr = 0; itr < num_axis_dims; itr++) + { + p_axis_data[itr] = p_axis_data[itr] + (4 - num_inp_dims); + } + ae_valign align_out = AE_ZALIGN64(); + + if(num_axis_dims) + { + if(num_elm_in_axis > 1) + { + xa_nn_reduce_sum_4D_f32_f32(p_in, + p_4D_inp_shape, + p_axis_data, + num_inp_dims, + num_axis_dims, + p_scratch); + itr = 0; + xtfloatx2 *p_src1 = (xtfloatx2 *)(p_scratch); + + float div = 1; + + for(int i = 0; i < num_axis_dims; i++) + { + div = div * (float)p_4D_inp_shape[p_axis_data[i]]; + } + + float mul = 1 / div; + + xtfloatx2 multiplier = XT_LSX((xtfloat *)&mul, 0); + + for(itr = 0; itr < (out_length >> 3); itr++) + { + xtfloatx2 temp1, temp2, temp3, temp4; + + temp2 = XT_LSX2X(p_src1, 8); + temp3 = XT_LSX2X(p_src1, 16); + temp4 = XT_LSX2X(p_src1, 24); + XT_LSX2XP(temp1, p_src1, 32); + + temp1 = XT_MUL_SX2(temp1, multiplier); + temp2 = XT_MUL_SX2(temp2, multiplier); + temp3 = XT_MUL_SX2(temp3, multiplier); + temp4 = XT_MUL_SX2(temp4, multiplier); + + XT_SASX2IP(temp1, align_out, (xtfloatx2 *)p_out); + XT_SASX2IP(temp2, align_out, (xtfloatx2 *)p_out); + XT_SASX2IP(temp3, align_out, (xtfloatx2 *)p_out); + XT_SASX2IP(temp4, align_out, (xtfloatx2 *)p_out); + } + AE_SA64POS_FP(align_out, p_out); + + for(itr = 0; itr < (out_length & 7); itr++) + { + xtfloat temp1; + XT_LSXP(temp1, (xtfloat *)p_src1, 4); + temp1 = XT_MUL_S(temp1, multiplier); + XT_SSXP(temp1, (xtfloat *)p_out, 4); + } + } + else + { + + memcpy(p_out, p_inp, inp_length * sizeof(FLOAT32)); + } + } + else + { + memcpy(p_out, p_inp, inp_length * sizeof(FLOAT32)); + } + + return 0; +}