Skip to content

Commit

Permalink
adding pow, remainder, minimum, maximum operators (#33)
Browse files Browse the repository at this point in the history
* adding pow, remainder, minimum, maximum operators

* adding pow, remainder, minimum, maximum operators
  • Loading branch information
nishpoonia authored Nov 7, 2024
1 parent 9b71aed commit 07743ab
Show file tree
Hide file tree
Showing 12 changed files with 3,451 additions and 1 deletion.
32 changes: 31 additions & 1 deletion backends/cadence/aot/functions_hifi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,11 +62,21 @@
- arg_meta: null
kernel_name: torch::executor::full_out

- op: maximum.out
kernels:
- arg_meta: null
kernel_name: impl::HiFi::maximum_out

- op: mean.out
kernels:
- arg_meta: null
kernel_name: cadence::impl::HiFi::mean_dim_out


- op: minimum.out
kernels:
- arg_meta: null
kernel_name: impl::HiFi::minimum_out

- op: mul.out
kernels:
- arg_meta: null
Expand All @@ -77,6 +87,26 @@
- arg_meta: null
kernel_name: torch::executor::permute_copy_out

- op: pow.Scalar_out
kernels:
- arg_meta: null
kernel_name: impl::HiFi::pow_Scalar_out

- op: pow.Tensor_Scalar_out
kernels:
- arg_meta: null
kernel_name: impl::HiFi::pow_Tensor_Scalar_out

- op: pow.Tensor_Tensor_out
kernels:
- arg_meta: null
kernel_name: impl::HiFi::pow_Tensor_Tensor_out

- op: rsqrt.out
kernels:
- arg_meta: null
kernel_name: impl::HiFi::rsqrt_out

- op: sigmoid.out
kernels:
- arg_meta: null
Expand Down
3 changes: 3 additions & 0 deletions backends/cadence/hifi/kernels/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,13 @@ add_library(
cadence_kernels
kernels.cpp
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/matmul_asym8uxasym8u_asym8u.cpp
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_broadcast_32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_add_f32_broadcast.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_f32_broadcast.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_mode_f32_broadcast.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_minimum_maximum_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_mul_f32_broadcast.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_pow_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_where_f32xf32_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_reduce_32_32.c
)
Expand Down
42 changes: 42 additions & 0 deletions backends/cadence/hifi/kernels/kernels.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,14 @@
#include "xa_nnlib_kernels_api.h"

/* Potential NNLIB function/APIs */

extern "C" WORD32 xa_nn_broadcast_32_32(
WORD32* __restrict__ p_out,
const int* const out_shape,
WORD32* __restrict__ p_in,
const int* const in_shape,
int num_dims);

extern "C" WORD32 xa_nn_elm_add_broadcast_4D_f32xf32_f32(
FLOAT32* __restrict__ p_out,
const WORD32* const p_out_shape,
Expand Down Expand Up @@ -47,6 +55,34 @@ extern "C" WORD32 xa_nn_elm_div_mode_broadcast_4D_f32xf32_f32(
const WORD32* const p_inp2_shape,
WORD32 mode);

extern "C" WORD32 xa_nn_elm_maximum_f32xf32_f32(
FLOAT32* __restrict__ p_out,
const FLOAT32* __restrict__ p_inp1,
const FLOAT32* __restrict__ p_inp2,
WORD32 num_elm);

extern "C" WORD32 xa_nn_elm_maximum_broadcast_4D_f32xf32_f32(
FLOAT32* __restrict__ p_out,
const WORD32* const p_out_shape,
const FLOAT32* __restrict__ p_inp1,
const WORD32* const p_inp1_shape,
const FLOAT32* __restrict__ p_inp2,
const WORD32* const p_inp2_shape);

extern "C" WORD32 xa_nn_elm_minimum_f32xf32_f32(
FLOAT32* __restrict__ p_out,
const FLOAT32* __restrict__ p_inp1,
const FLOAT32* __restrict__ p_inp2,
WORD32 num_elm);

extern "C" WORD32 xa_nn_elm_minimum_broadcast_4D_f32xf32_f32(
FLOAT32* __restrict__ p_out,
const WORD32* const p_out_shape,
const FLOAT32* __restrict__ p_inp1,
const WORD32* const p_inp1_shape,
const FLOAT32* __restrict__ p_inp2,
const WORD32* const p_inp2_shape);

extern "C" WORD32 xa_nn_elm_mul_broadcast_4D_f32xf32_f32(
FLOAT32* __restrict__ p_out,
const WORD32* const p_out_shape,
Expand All @@ -55,6 +91,12 @@ extern "C" WORD32 xa_nn_elm_mul_broadcast_4D_f32xf32_f32(
const FLOAT32* __restrict__ p_inp2,
const WORD32* const p_inp2_shape);

extern "C" void xa_nn_elm_pow_f32(
FLOAT32* restrict z,
const FLOAT32* restrict x,
const FLOAT32* restrict y,
WORD32 N);

extern "C" WORD32 xa_nn_elm_where_f32xf32_f32(
FLOAT32* __restrict__ p_out,
const FLOAT32* __restrict__ p_inp1,
Expand Down
4 changes: 4 additions & 0 deletions backends/cadence/hifi/operators/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,12 @@ endif()
set(_aten_ops__srcs
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_add.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_div.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_maximum.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mean.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_minimum.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mul.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_pow.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_rsqrt.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sigmoid.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sub.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_tanh.cpp"
Expand Down
172 changes: 172 additions & 0 deletions backends/cadence/hifi/operators/op_maximum.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

#include <executorch/backends/cadence/hifi/kernels/kernels.h>
#include <executorch/kernels/portable/cpu/scalar_utils.h>
#include <executorch/kernels/portable/cpu/util/broadcast_util.h>
#include <executorch/kernels/portable/cpu/util/math_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>

using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::aten::RuntimeContext;
using executorch::runtime::can_cast;
using executorch::runtime::canCast;
using executorch::runtime::CppTypeToScalarType;
using executorch::runtime::promoteTypes;
using torch::executor::apply_binary_elementwise_fn;
using torch::executor::Error;
using torch::executor::resize_to_broadcast_target_size;

namespace impl {
namespace HiFi {
namespace native {
namespace {

template <
bool can_cast,
typename CTYPE_A,
typename CTYPE_B,
typename CTYPE_IN,
typename CTYPE_OUT>
struct MaximumInner;

template <
typename CTYPE_A,
typename CTYPE_B,
typename CTYPE_IN,
typename CTYPE_OUT>
struct MaximumInner<true, CTYPE_A, CTYPE_B, CTYPE_IN, CTYPE_OUT> {
static void run(const Tensor& a, const Tensor& b, Tensor& out) {
apply_binary_elementwise_fn<CTYPE_A, CTYPE_B, CTYPE_OUT>(
// NOLINTNEXTLINE(facebook-hte-ConstantArgumentPassByValue)
[](const CTYPE_A val_a, const CTYPE_B val_b) {
CTYPE_IN a_casted = static_cast<CTYPE_IN>(val_a);
CTYPE_IN b_casted = static_cast<CTYPE_IN>(val_b);
CTYPE_IN value =
torch::executor::native::utils::max_override(a_casted, b_casted);

return static_cast<CTYPE_OUT>(value);
},
a,
b,
out);
}
};

struct ReportCanCastBug {
static void run(const Tensor&, const Tensor&, Tensor&) {
ET_DCHECK_MSG(false, "BUG: canCast should have been checked above");
}
};

template <
typename CTYPE_A,
typename CTYPE_B,
typename CTYPE_IN,
typename CTYPE_OUT>
struct MaximumInner<false, CTYPE_A, CTYPE_B, CTYPE_IN, CTYPE_OUT>
: public ReportCanCastBug {};

} // namespace

Tensor& maximum_out(
RuntimeContext& ctx,
const Tensor& a,
const Tensor& b,
Tensor& out) {
(void)ctx;

ET_KERNEL_CHECK(
ctx,
resize_to_broadcast_target_size(a, b, out) == Error::Ok,
InvalidArgument,
out);

constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */

ScalarType a_type = a.scalar_type();
ScalarType b_type = b.scalar_type();
ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true);
ScalarType out_type = out.scalar_type();

ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out);

bool optimized = true;
/*find broadcast*/
bool a_is_broadcasted = !out.sizes().equals(a.sizes());
bool b_is_broadcasted = !out.sizes().equals(b.sizes());
bool broadcast = (a_is_broadcasted || b_is_broadcasted);

int max_dim = a.dim() > b.dim() ? a.dim() : b.dim();
max_dim = out.dim() > max_dim ? out.dim() : max_dim;

if ((a_type != ScalarType::Float) || (b_type != ScalarType::Float))
optimized = false;
if ((broadcast == true) && (max_dim > kNnlibMaxDim))
optimized = false;

if (optimized) {
float* a_data = a.mutable_data_ptr<float>();
float* b_data = b.mutable_data_ptr<float>();
float* out_data = out.mutable_data_ptr<float>();

if (broadcast == true) {
int out_shape[kNnlibMaxDim];
int inp1_shape[kNnlibMaxDim];
int inp2_shape[kNnlibMaxDim];

for (int i = 0; i < kNnlibMaxDim; i++) {
out_shape[i] = 1;
inp1_shape[i] = 1;
inp2_shape[i] = 1;
}

int off_o = kNnlibMaxDim - out.dim();
int off_a = kNnlibMaxDim - a.dim();
int off_b = kNnlibMaxDim - b.dim();

for (int i = 0; i < out.dim(); i++) {
out_shape[i + off_o] = out.size(i);
}

for (int i = 0; i < a.dim(); i++)
inp1_shape[i + off_a] = a.size(i);

for (int i = 0; i < b.dim(); i++)
inp2_shape[i + off_b] = b.size(i);

xa_nn_elm_maximum_broadcast_4D_f32xf32_f32(
out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape);
} else {
xa_nn_elm_maximum_f32xf32_f32(out_data, a_data, b_data, out.numel());
}
return out;
}
ET_SWITCH_REALHB_TYPES(a_type, ctx, "maximum.out", CTYPE_A, [&]() {
ET_SWITCH_REALHB_TYPES(b_type, ctx, "maximum.out", CTYPE_B, [&]() {
using CTYPE_IN = typename torch::executor::
promote_types<CTYPE_A, CTYPE_B, /*half_to_float*/ true>::type;
ET_DCHECK(CppTypeToScalarType<CTYPE_IN>::value == common_type);
ET_SWITCH_REALHB_TYPES(out_type, ctx, "maximum.out", CTYPE_OUT, [&]() {
MaximumInner<
can_cast<CTYPE_IN, CTYPE_OUT>::value,
CTYPE_A,
CTYPE_B,
CTYPE_IN,
CTYPE_OUT>::run(a, b, out);
});
});
});

return out;
}

} // namespace native
} // namespace HiFi
} // namespace impl
Loading

0 comments on commit 07743ab

Please sign in to comment.