Skip to content

Commit

Permalink
Adding atan2, softmax, clamp and remainder ops
Browse files Browse the repository at this point in the history
  • Loading branch information
nishpoonia committed Nov 22, 2024
1 parent d730ed8 commit 41d64d2
Show file tree
Hide file tree
Showing 11 changed files with 3,352 additions and 2 deletions.
17 changes: 16 additions & 1 deletion backends/cadence/aot/functions_hifi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,12 @@
- op: _softmax.out
kernels:
- arg_meta: null
kernel_name: torch::executor::softmax_out
kernel_name: cadence::impl::HiFi::softmax_out

- op: atan2.out
kernels:
- arg_meta: null
kernel_name: cadence::impl::HiFi::atan2_out

- op: add.out
kernels:
Expand All @@ -37,6 +42,11 @@
- arg_meta: null
kernel_name: cadence::impl::HiFi::cat_out

- op: clamp.Tensor_out
kernels:
- arg_meta: null
kernel_name: cadence::impl::HiFi::clamp_tensor_out

- op: clone.out
kernels:
- arg_meta: null
Expand Down Expand Up @@ -102,6 +112,11 @@
- arg_meta: null
kernel_name: impl::HiFi::pow_Tensor_Tensor_out

- op: remainder.Tensor_out
kernels:
- arg_meta: null
kernel_name: cadence::impl::HiFi::remainder_Tensor_out

- op: rsqrt.out
kernels:
- arg_meta: null
Expand Down
3 changes: 3 additions & 0 deletions backends/cadence/hifi/kernels/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,14 @@ add_library(
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_broadcast_32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_concat_32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_add_f32_broadcast.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_atan2_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_clamp_f32_broadcast.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_f32_broadcast.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_mode_f32_broadcast.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_minimum_maximum_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_mul_f32_broadcast.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_pow_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_remainder_broadcast_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_where_f32xf32_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_reduce_32_32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_transpose_32.c
Expand Down
34 changes: 34 additions & 0 deletions backends/cadence/hifi/kernels/kernels.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,26 @@ extern "C" WORD32 xa_nn_elm_add_broadcast_4D_f32xf32_f32(
const FLOAT32* __restrict__ p_inp2,
const WORD32* const p_inp2_shape);

extern "C" void
xa_nn_elm_atan2_f32(FLOAT32* z, const FLOAT32* y, const FLOAT32* x, WORD32 N);

extern "C" WORD32 xa_nn_elm_clamp_f32xf32xf32_f32(
FLOAT32* __restrict__ p_out,
const FLOAT32* __restrict__ p_inp,
const FLOAT32* __restrict__ p_min,
const FLOAT32* __restrict__ p_max,
WORD32 num_elm);

extern "C" WORD32 xa_nn_elm_clamp_broadcast_4D_f32Xf32xf32_f32(
FLOAT32* __restrict__ p_out,
const WORD32* const p_out_shape,
const FLOAT32* __restrict__ p_inp,
const WORD32* const p_inp_shape,
const FLOAT32* __restrict__ p_min,
const WORD32* const p_min_shape,
const FLOAT32* __restrict__ p_max,
const WORD32* const p_max_shape);

extern "C" WORD32 xa_nn_elm_div_broadcast_4D_f32xf32_f32(
FLOAT32* __restrict__ p_out,
const WORD32* const p_out_shape,
Expand Down Expand Up @@ -107,6 +127,20 @@ extern "C" void xa_nn_elm_pow_f32(
const FLOAT32* restrict y,
WORD32 N);

extern "C" WORD32 xa_nn_elm_remainder_f32xf32_f32(
FLOAT32* __restrict__ p_out,
const FLOAT32* __restrict__ p_inp1,
const FLOAT32* __restrict__ p_inp2,
WORD32 num_elm);

extern "C" WORD32 xa_nn_elm_remainder_broadcast_4D_f32xf32_f32(
FLOAT32* __restrict__ p_out,
const WORD32* const p_out_shape,
const FLOAT32* __restrict__ p_inp1,
const WORD32* const p_inp1_shape,
const FLOAT32* __restrict__ p_inp2,
const WORD32* const p_inp2_shape);

extern "C" WORD32 xa_nn_elm_where_f32xf32_f32(
FLOAT32* __restrict__ p_out,
const FLOAT32* __restrict__ p_inp1,
Expand Down
5 changes: 4 additions & 1 deletion backends/cadence/hifi/operators/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,9 @@ endif()
# ATen compliant ops that are needed to run this model.
set(_aten_ops__srcs
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_add.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_atan2.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_cat.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_clamp.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_div.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_full.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_maximum.cpp"
Expand All @@ -30,7 +32,9 @@ set(_aten_ops__srcs
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mul.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_permute_copy.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_pow.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_remainder.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_rsqrt.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_softmax.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sigmoid.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sub.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_tanh.cpp"
Expand All @@ -39,7 +43,6 @@ set(_aten_ops__srcs
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_clone.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_embedding.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_slice_copy.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_softmax.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_split_with_sizes_copy.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_to_copy.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_view_copy.cpp"
Expand Down
195 changes: 195 additions & 0 deletions backends/cadence/hifi/operators/op_atan2.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,195 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

#include <executorch/backends/cadence/hifi/kernels/kernels.h>
#include <executorch/kernels/portable/cpu/util/broadcast_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <cmath>

using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::runtime::KernelRuntimeContext;
using executorch::runtime::tensors_have_same_dim_order;
using torch::executor::Error;
using torch::executor::resize_to_broadcast_target_size;

namespace cadence {
namespace impl {
namespace HiFi {
namespace native {

Tensor& atan2_out(
KernelRuntimeContext& ctx,
const Tensor& a,
const Tensor& b,
Tensor& out) {
// Determine output size and resize for dynamic shapes
ET_KERNEL_CHECK(
ctx,
resize_to_broadcast_target_size(a, b, out) == Error::Ok,
InvalidArgument,
out);

ET_KERNEL_CHECK(
ctx, tensors_have_same_dim_order(a, b, out), InvalidArgument, out);

ScalarType a_type = a.scalar_type();
ScalarType b_type = b.scalar_type();
ScalarType out_type = out.scalar_type();

constexpr auto name = "atan2.out";
constexpr int kNnlibMaxDim = 16;
int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim();
bool optimized = true;

const bool a_is_broadcasted = !out.sizes().equals(a.sizes());
const bool b_is_broadcasted = !out.sizes().equals(b.sizes());
const bool broadcast = (a_is_broadcasted && b_is_broadcasted);
int max_dim = a.dim() > b.dim() ? a.dim() : b.dim();
max_dim = out.dim() > max_dim ? out.dim() : max_dim;

if (out_type != ScalarType::Float)
optimized = false;

if (max_dim > kNnlibMaxDim)
optimized = false;

WORD32 num_elm = out.numel();

if (optimized) {
if (broadcast) {
WORD32* __restrict__ ptr1 =
(WORD32* __restrict__)malloc(num_elm * sizeof(WORD32));
WORD32* __restrict__ ptr2 =
(WORD32* __restrict__)malloc(num_elm * sizeof(WORD32));

WORD32* __restrict__ pin1 =
(WORD32* __restrict__)a.const_data_ptr<float>();
WORD32* __restrict__ pin2 =
(WORD32* __restrict__)b.const_data_ptr<float>();

WORD32 p_out_shape[kNnlibMaxDim];
WORD32 p_inp1_shape[kNnlibMaxDim];
WORD32 p_inp2_shape[kNnlibMaxDim];

for (int i = 0; i < out_dim; i++)
p_out_shape[i] = out.size(i);
for (int i = 0; i < a_dim; i++)
p_inp1_shape[i] = a.size(i);
for (int i = 0; i < b_dim; i++)
p_inp2_shape[i] = b.size(i);

WORD32 ret_val = xa_nn_broadcast_32_32(ptr1, p_out_shape, pin1, p_inp1_shape, out_dim);

ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out);

ret_val = xa_nn_broadcast_32_32(ptr2, p_out_shape, pin2, p_inp2_shape, out_dim);

ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out);

FLOAT32* __restrict__ p_out =
(FLOAT32* __restrict__)out.mutable_data_ptr<float>();
const FLOAT32* __restrict__ p_inp1 = (const FLOAT32* __restrict__)ptr1;
const FLOAT32* __restrict__ p_inp2 = (const FLOAT32* __restrict__)ptr2;

xa_nn_elm_atan2_f32(p_out, p_inp1, p_inp2, num_elm);

free(ptr1);
free(ptr2);
} else if (a_is_broadcasted && (!b_is_broadcasted)) {
FLOAT32* __restrict__ ptr1 =
(FLOAT32* __restrict__)malloc(num_elm * sizeof(WORD32));

FLOAT32* __restrict__ pin1 =
(FLOAT32* __restrict__)a.const_data_ptr<float>();

WORD32 p_out_shape[kNnlibMaxDim];
WORD32 p_inp1_shape[kNnlibMaxDim];

for (int i = 0; i < out_dim; i++)
p_out_shape[i] = out.size(i);
for (int i = 0; i < a_dim; i++)
p_inp1_shape[i] = a.size(i);

WORD32 ret_val = xa_nn_broadcast_32_32(
(WORD32*)ptr1, p_out_shape, (WORD32*)pin1, p_inp1_shape, out_dim);

ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out);

FLOAT32* __restrict__ p_out =
(FLOAT32* __restrict__)out.mutable_data_ptr<float>();
const FLOAT32* __restrict__ p_inp1 = (const FLOAT32* __restrict__)ptr1;
const FLOAT32* __restrict__ p_inp2 =
(const FLOAT32* __restrict__)b.const_data_ptr<float>();

xa_nn_elm_atan2_f32(p_out, p_inp1, p_inp2, num_elm);

free(ptr1);
} else if (b_is_broadcasted && (!a_is_broadcasted)) {
WORD32* __restrict__ ptr1 =
(WORD32* __restrict__)malloc(num_elm * sizeof(WORD32));

WORD32* __restrict__ pin1 =
(WORD32* __restrict__)b.const_data_ptr<float>();

WORD32 p_out_shape[kNnlibMaxDim];
WORD32 p_inp1_shape[kNnlibMaxDim];

for (int i = 0; i < out_dim; i++)
p_out_shape[i] = out.size(i);
for (int i = 0; i < b_dim; i++)
p_inp1_shape[i] = b.size(i);

xa_nn_broadcast_32_32(ptr1, p_out_shape, pin1, p_inp1_shape, out_dim);

FLOAT32* __restrict__ p_out =
(FLOAT32* __restrict__)out.mutable_data_ptr<float>();
const FLOAT32* __restrict__ p_inp1 =
(const FLOAT32* __restrict__)a.const_data_ptr<float>();
const FLOAT32* __restrict__ p_inp2 = (const FLOAT32* __restrict__)ptr1;

xa_nn_elm_atan2_f32(p_out, p_inp1, p_inp2, num_elm);

free(ptr1);
} else {
FLOAT32* __restrict__ p_out =
(FLOAT32* __restrict__)out.mutable_data_ptr<float>();
const FLOAT32* __restrict__ p_inp1 =
(const FLOAT32* __restrict__)a.const_data_ptr<float>();
const FLOAT32* __restrict__ p_inp2 =
(const FLOAT32* __restrict__)b.const_data_ptr<float>();

xa_nn_elm_atan2_f32(p_out, p_inp1, p_inp2, num_elm);
}
return out;
}

ET_SWITCH_REALHB_TYPES(a_type, ctx, name, CTYPE_A, [&]() {
ET_SWITCH_REALHB_TYPES(b_type, ctx, name, CTYPE_B, [&]() {
ET_SWITCH_FLOATH_TYPES(out_type, ctx, name, CTYPE_OUT, [&]() {
torch::executor::
apply_binary_elementwise_fn<CTYPE_A, CTYPE_B, CTYPE_OUT>(
[](const CTYPE_A val_a, const CTYPE_B val_b) {
CTYPE_OUT casted_a = static_cast<CTYPE_OUT>(val_a);
CTYPE_OUT casted_b = static_cast<CTYPE_OUT>(val_b);
return static_cast<CTYPE_OUT>(std::atan2(casted_a, casted_b));
},
a,
b,
out);
});
});
});

return out;
}

} // namespace native
} // namespace HiFi
} // namespace impl
} // namespace cadence
Loading

0 comments on commit 41d64d2

Please sign in to comment.