Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding add operator including broadcast #3

Merged
merged 3 commits into from
Jul 31, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions backends/cadence/aot/functions_hifi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,11 @@
- arg_meta: null
kernel_name: torch::executor::rsqrt_out

- op: empty.out
kernels:
- arg_meta: null
kernel_name: torch::executor::empty_out

# custom ops
- func: cadence::quantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
variants: function
Expand All @@ -120,6 +125,10 @@
- arg_meta: null
kernel_name: impl::HiFi::dequantize_per_tensor_out

- func: cadence::quantized_conv.out(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] padding, int[] dilation, int groups, int input_zero_point, Tensor weight_zero_point, Tensor bias_scale, float out_scale, int out_zero_point, Tensor out_multiplier, Tensor out_shift, bool channel_last=False, *, Tensor(a!) out) -> Tensor(a!)
kernels:
- arg_meta: null
kernel_name: impl::HiFi::quantized_conv_out

- func: cadence::quantized_layer_norm.out(Tensor input, Tensor in_scale, Tensor in_zero_point, int[] normalized_shape, Tensor weight, Tensor bias, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!)
kernels:
Expand All @@ -131,3 +140,7 @@
- arg_meta: null
kernel_name: impl::HiFi::quantized_linear_out

- func: cadence::quantized_relu.out(Tensor X, Tensor X_zero_point, *, Tensor(a!) out) -> Tensor(a!)
kernels:
- arg_meta: null
kernel_name: impl::HiFi::quantized_relu_out
2 changes: 2 additions & 0 deletions backends/cadence/hifi/kernels/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ add_library(
cadence_kernels
kernels.cpp
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/matmul_asym8uxasym8u_asym8u.cpp
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_add_f32_broadcast.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_mul_f32_broadcast.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_where_f32xf32_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_broadcast_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_floor_div_broadcast_f32.c
Expand Down
15 changes: 15 additions & 0 deletions backends/cadence/hifi/kernels/kernels.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,21 @@ extern "C" WORD32 xa_nn_elm_where_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict_
const WORD32 *const p_condition_shape
);

/* new functions in nnlib */
extern "C" WORD32 xa_nn_elm_add_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict__ p_out,
const WORD32 *const p_out_shape,
const FLOAT32 * __restrict__ p_inp1,
const WORD32 *const p_inp1_shape,
const FLOAT32 * __restrict__ p_inp2,
const WORD32 *const p_inp2_shape);

extern "C" WORD32 xa_nn_elm_mul_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict__ p_out,
const WORD32 *const p_out_shape,
const FLOAT32 * __restrict__ p_inp1,
const WORD32 *const p_inp1_shape,
const FLOAT32 * __restrict__ p_inp2,
const WORD32 *const p_inp2_shape);

namespace impl {
namespace HiFi {
namespace kernels {
Expand Down
11 changes: 8 additions & 3 deletions backends/cadence/hifi/operators/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ endif()

# ATen compliant ops that are needed to run this model.
set(_aten_ops__srcs
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_add.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_add.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_embedding.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_full.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_where.cpp"
Expand All @@ -36,18 +36,22 @@ set(_aten_ops__srcs
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_bmm.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_cat.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_clone.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_div.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mul.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_div.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_rsqrt.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_mul.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_permute_copy.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_sigmoid.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_slice_copy.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_softmax.cpp"
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_softmax.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_split_with_sizes_copy.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_sub.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_to_copy.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_where.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_empty.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/pattern/unary_ufunc_realhb_to_floath.cpp")

add_library(aten_ops_cadence ${_aten_ops__srcs})
target_link_libraries(aten_ops_cadence PUBLIC executorch)
target_link_libraries(aten_ops_cadence PRIVATE cadence_kernels)
Expand All @@ -63,7 +67,8 @@ target_include_directories(aten_ops_cadence PUBLIC ${ROOT_DIR}/..
add_library(
custom_ops "quantized_linear_out.cpp"
"quantized_layer_norm.cpp"
"quantize_per_tensor.cpp" "dequantize_per_tensor.cpp")
"quantize_per_tensor.cpp" "dequantize_per_tensor.cpp"
"quantized_conv_out.cpp" "quantized_relu_out.cpp")
target_include_directories(custom_ops PUBLIC ${ROOT_DIR}/..
${CMAKE_BINARY_DIR}
${_common_include_directories})
Expand Down
110 changes: 110 additions & 0 deletions backends/cadence/hifi/operators/op_add.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

#include <executorch/kernels/portable/cpu/scalar_utils.h>
#include <executorch/kernels/portable/cpu/util/broadcast_util.h>
#include <executorch/kernels/portable/cpu/util/functional_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <executorch/runtime/platform/assert.h>
#include "kernels.h"

namespace torch {
namespace executor {
namespace native {

#define NNLIB_MAX_DIM 4 /* Add fallback if broadcast and dim > 4 */

Tensor& add_out(
RuntimeContext& ctx,
const Tensor& a,
const Tensor& b,
const Scalar& alpha,
Tensor& out) {
(void)ctx;

ScalarType a_type = a.scalar_type();
ScalarType b_type = b.scalar_type();
ScalarType common_type = promoteTypes(a_type, b_type);
ScalarType out_type = out.scalar_type();

ET_CHECK_MSG(a_type == ScalarType::Float, "Input tensor not a float.\n");
ET_CHECK_MSG(b_type == ScalarType::Float, "Input tensor not a float.\n");
ET_CHECK_MSG(out_type == ScalarType::Float, "Output tensor not a float.\n");

ET_CHECK(canCast(common_type, out_type));

using CTYPE_A = float;
using CTYPE_B = float;
using CTYPE_IN = float;
using CTYPE_OUT = float;
CTYPE_IN alpha_val;
ET_EXTRACT_SCALAR(alpha, alpha_val);


if((alpha_val == 1.0) && (out_type == ScalarType::Float))
{
/*logic to find broadcast*/
const int a_is_broadcasted = !out.sizes().equals(a.sizes());
const int b_is_broadcasted = !out.sizes().equals(b.sizes());
const int broadcast = (a_is_broadcasted || b_is_broadcasted);

const float* const a_data = a.const_data_ptr<float>();
const float* const b_data = b.const_data_ptr<float>();
float* const out_data = out.mutable_data_ptr<float>();
if(broadcast == 1)
{
int out_shape[NNLIB_MAX_DIM];
int inp1_shape[NNLIB_MAX_DIM];
int inp2_shape[NNLIB_MAX_DIM];

for(int i = 0; i < NNLIB_MAX_DIM; i++)
{
out_shape[i] = 1;
inp1_shape[i] = 1;
inp2_shape[i] = 1;
}

int off_o = NNLIB_MAX_DIM - out.dim();
int off_a = NNLIB_MAX_DIM - a.dim();
int off_b = NNLIB_MAX_DIM - b.dim();

for(int i = 0; i < out.dim(); i++)
out_shape[i+off_o] = out.size(i);
for(int i = 0; i < a.dim(); i++)
inp1_shape[i+off_a] = a.size(i);
for(int i = 0; i < b.dim(); i++)
inp2_shape[i+off_b] = b.size(i);

xa_nn_elm_add_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape,
b_data, inp2_shape);
}
else
xa_nn_elm_add_f32xf32_f32(out_data, a_data, b_data, out.numel());

}
else
{
apply_binary_elementwise_fn<CTYPE_A, CTYPE_B, CTYPE_OUT>(
[alpha_val](const CTYPE_A val_a, const CTYPE_B val_b) {
CTYPE_IN a_casted = static_cast<CTYPE_IN>(val_a);
CTYPE_IN b_casted = static_cast<CTYPE_IN>(val_b);
CTYPE_IN value = a_casted + alpha_val * b_casted;

return static_cast<CTYPE_OUT>(value);
},
a,
b,
out);
}

return out;
}

} // namespace native
} // namespace executor
} // namespace torch
Loading
Loading