Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding where operator #14

Merged
merged 2 commits into from
Oct 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions backends/cadence/aot/functions_hifi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,11 @@
- arg_meta: null
kernel_name: torch::executor::full_out

- op: mean.out
kernels:
- arg_meta: null
kernel_name: impl::HiFi::mean_dim_out

- op: mul.out
kernels:
- arg_meta: null
Expand Down Expand Up @@ -105,13 +110,8 @@
- op: where.self_out
kernels:
- arg_meta: null
kernel_name: torch::executor::where_out

- op: mean.out
kernels:
- arg_meta: null
kernel_name: impl::HiFi::mean_dim_out

kernel_name: impl::HiFi::where_out

# custom ops
- func: cadence::quantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
variants: function
Expand Down
1 change: 1 addition & 0 deletions backends/cadence/hifi/kernels/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ add_library(
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_f32_broadcast.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_mode_f32_broadcast.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_mul_f32_broadcast.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_where_f32xf32_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_reduce_32_32.c
)

Expand Down
15 changes: 15 additions & 0 deletions backends/cadence/hifi/kernels/kernels.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,21 @@ extern "C" WORD32 xa_nn_reduce_mean_4D_f32_f32(FLOAT32 * __restrict__ p_out,
WORD32 num_axis_dims,
void * __restrict__ p_scratch_in);

extern "C" WORD32 xa_nn_elm_where_f32xf32_f32(FLOAT32 * __restrict__ p_out,
const FLOAT32 * __restrict__ p_inp1,
const FLOAT32 * __restrict__ p_inp2,
const unsigned char *__restrict__ p_condition,
WORD32 num_elm);

extern "C" WORD32 xa_nn_elm_where_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict__ p_out,
const WORD32 *const p_out_shape,
const FLOAT32 * __restrict__ p_inp1,
const WORD32 *const p_inp1_shape,
const FLOAT32 * __restrict__ p_inp2,
const WORD32 *const p_inp2_shape,
const unsigned char *__restrict__ p_condition,
const WORD32 *const p_condition_shape);

namespace impl {
namespace HiFi {
namespace kernels {
Expand Down
4 changes: 2 additions & 2 deletions backends/cadence/hifi/operators/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,12 @@ endif()
set(_aten_ops__srcs
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_add.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_div.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mean.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mul.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sigmoid.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sub.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_tanh.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mean.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_where.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_bmm.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_cat.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_clone.cpp"
Expand All @@ -38,7 +39,6 @@ set(_aten_ops__srcs
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_split_with_sizes_copy.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_to_copy.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_view_copy.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_where.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/pattern/unary_ufunc_realhb_to_floath.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/util/activation_ops_util.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/util/broadcast_util.cpp"
Expand Down
155 changes: 155 additions & 0 deletions backends/cadence/hifi/operators/op_where.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

#include <executorch/kernels/portable/cpu/util/broadcast_util.h>
#include <executorch/kernels/portable/cpu/util/functional_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <executorch/backends/cadence/hifi/kernels/kernels.h>

using exec_aten::ScalarType;
using exec_aten::Tensor;
using torch::executor::Error;
using executorch::aten::RuntimeContext;

namespace impl {
namespace HiFi {
namespace native {

Tensor& where_out(
RuntimeContext& ctx,
const Tensor& cond,
const Tensor& a,
const Tensor& b,
Tensor& out) {
ScalarType cond_type = cond.scalar_type();
ScalarType a_type = a.scalar_type();
ScalarType b_type = b.scalar_type();
ScalarType common_type = promoteTypes(a_type, b_type);
ScalarType out_type = out.scalar_type();

ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out);

// Determine output size and resize for dynamic shapes
ET_KERNEL_CHECK(
ctx,
resize_to_broadcast_target_size(a, b, cond, out) == Error::Ok,
InvalidArgument,
out);

constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */
constexpr auto name = "where.self_out";

ET_CHECK_MSG(
cond_type == ScalarType::Bool || cond_type == ScalarType::Byte,
"Unhandled dtype %s for where.self_out",
torch::executor::toString(cond_type));

int a_dim = a.dim(), b_dim = b.dim(), con_dim = cond.dim(), out_dim = out.dim();
bool optimized = 1;
/*find broadcast*/
const bool a_is_broadcasted = !out.sizes().equals(a.sizes());
const bool b_is_broadcasted = !out.sizes().equals(b.sizes());
const bool cond_is_broadcasted = !out.sizes().equals(cond.sizes());
const bool broadcast = (a_is_broadcasted || b_is_broadcasted || cond_is_broadcasted);

int max_dim = a.dim() > b.dim() ? a.dim() : b.dim();
max_dim = cond.dim() > max_dim ? cond.dim() : max_dim;
max_dim = out.dim() > max_dim ? out.dim() : max_dim;

if((a_type != ScalarType::Float) || (b_type != ScalarType::Float))
optimized = 0;

if((a_dim == 0) || (b_dim == 0) || (con_dim == 0))
optimized = 0;

if((broadcast == 1) && (max_dim > kNnlibMaxDim))
optimized = 0;

if(optimized)
{
const float* a_data = a.const_data_ptr<float>();
const float* b_data = b.const_data_ptr<float>();
float* out_data = out.mutable_data_ptr<float>();
const unsigned char* con = cond.const_data_ptr<uint8_t>();

if(broadcast == 1)
{
int out_shape[kNnlibMaxDim];
int inp1_shape[kNnlibMaxDim];
int inp2_shape[kNnlibMaxDim];
int con_shape[kNnlibMaxDim];

for(int i = 0; i < kNnlibMaxDim; i++)
{
con_shape[i] = 1;
out_shape[i] = 1;
inp1_shape[i] = 1;
inp2_shape[i] = 1;
}

int off_o = kNnlibMaxDim - out.dim();
int off_a = kNnlibMaxDim - a.dim();
int off_b = kNnlibMaxDim - b.dim();
int off_c = kNnlibMaxDim - cond.dim();

for(int i = 0; i < out.dim(); i++)
out_shape[i+off_o] = out.size(i);
for(int i = 0; i < a.dim(); i++)
inp1_shape[i+off_a] = a.size(i);
for(int i = 0; i < b.dim(); i++)
inp2_shape[i+off_b] = b.size(i);
for(int i = 0; i < cond.dim(); i++)
con_shape[i+off_c] = cond.size(i);

if(con_shape[0] != out_shape[0] || con_shape[1] != out_shape[1] || con_shape[2] != out_shape[2] || con_shape[3] != out_shape[3])
{
void* p_scratch = malloc(out_shape[0]*out_shape[1]*out_shape[2]*out_shape[3]);
const unsigned char *p_brd_cond = (const unsigned char*)p_scratch;
xa_nn_broadcast_8_8((WORD8* __restrict__) p_brd_cond, out_shape, (const WORD8* __restrict__) con, con_shape, 4);

for(int i = 0; i < 4; i++)
{
con_shape[i] = out_shape[i];
}
xa_nn_elm_where_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape,
b_data, inp2_shape, p_brd_cond, con_shape);
free(p_scratch);
}
else
{
xa_nn_elm_where_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape, con, con_shape);
}
}
else
{
xa_nn_elm_where_f32xf32_f32(out_data, a_data, b_data, con, out.numel());
}
return out;
}
ET_SWITCH_REALHB_TYPES(a_type, ctx, name, CTYPE_A, [&]() {
ET_SWITCH_REALHB_TYPES(b_type, ctx, name, CTYPE_B, [&]() {
using CTYPE_OUT = typename torch::executor::promote_types<CTYPE_A, CTYPE_B>::type;
torch::executor::apply_ternary_elementwise_fn<CTYPE_A, CTYPE_B, uint8_t, CTYPE_OUT>(
[](const CTYPE_A val_a, const CTYPE_B val_b, const uint8_t val_c) {
CTYPE_OUT a_casted = static_cast<CTYPE_OUT>(val_a);
CTYPE_OUT b_casted = static_cast<CTYPE_OUT>(val_b);
return val_c ? a_casted : b_casted;
},
a,
b,
cond,
out);
});
});
return out;
}

} // namespace native
} // namespace HiFi
} // namespace impl

Loading
Loading