Skip to content

Commit

Permalink
adding ET_KERNEL_CHECK for allocate_temp_memory
Browse files Browse the repository at this point in the history
  • Loading branch information
nishpoonia committed Dec 20, 2024
1 parent 18cf518 commit b5c4583
Show file tree
Hide file tree
Showing 6 changed files with 33 additions and 0 deletions.
11 changes: 11 additions & 0 deletions backends/cadence/hifi/operators/op_atan2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,15 @@ Tensor& atan2_out(
WORD32* __restrict__ ptr1 =
(WORD32* __restrict__)kernels::allocate_temp_memory(
ctx, num_elm * sizeof(WORD32));

ET_KERNEL_CHECK(ctx, ptr1 != nullptr, MemoryAllocationFailed, out);

WORD32* __restrict__ ptr2 =
(WORD32* __restrict__)kernels::allocate_temp_memory(
ctx, num_elm * sizeof(WORD32));

ET_KERNEL_CHECK(ctx, ptr2 != nullptr, MemoryAllocationFailed, out);

WORD32* __restrict__ pin1 =
(WORD32* __restrict__)a.const_data_ptr<float>();
WORD32* __restrict__ pin2 =
Expand Down Expand Up @@ -108,9 +113,13 @@ Tensor& atan2_out(
(FLOAT32* __restrict__)kernels::allocate_temp_memory(
ctx, num_elm * sizeof(WORD32));

ET_KERNEL_CHECK(ctx, ptr1 != nullptr, MemoryAllocationFailed, out);

FLOAT32* __restrict__ pin1 =
(FLOAT32* __restrict__)a.const_data_ptr<float>();

ET_KERNEL_CHECK(ctx, pin1 != nullptr, MemoryAllocationFailed, out);

WORD32 p_out_shape[kNnlibMaxDim];
WORD32 p_inp1_shape[kNnlibMaxDim];

Expand All @@ -137,6 +146,8 @@ Tensor& atan2_out(
(WORD32* __restrict__)kernels::allocate_temp_memory(
ctx, num_elm * sizeof(WORD32));

ET_KERNEL_CHECK(ctx, ptr1 != nullptr, MemoryAllocationFailed, out);

WORD32* __restrict__ pin1 =
(WORD32* __restrict__)b.const_data_ptr<float>();

Expand Down
3 changes: 3 additions & 0 deletions backends/cadence/hifi/operators/op_clamp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,9 @@ Tensor& clamp_tensor_out(
ctx,
(out_shape[0] * out_shape[1] * out_shape[2] * out_shape[3]) *
sizeof(int));

ET_KERNEL_CHECK(ctx, p_scratch != nullptr, MemoryAllocationFailed, out);

const FLOAT32* p_brd_cond = (const FLOAT32*)p_scratch;
xa_nn_broadcast_32_32(
(WORD32*)p_brd_cond, out_shape, (WORD32*)inp_data, inp_shape, 4);
Expand Down
2 changes: 2 additions & 0 deletions backends/cadence/hifi/operators/op_mean.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,8 @@ Tensor& mean_dim_out(
(void* __restrict__)kernels::allocate_temp_memory(
ctx, scratch_size * sizeof(int));

ET_KERNEL_CHECK(ctx, p_scratch_in != nullptr, MemoryAllocationFailed, out);

xa_nn_reduce_mean_4D_f32_f32(
p_out,
out_shape,
Expand Down
9 changes: 9 additions & 0 deletions backends/cadence/hifi/operators/op_pow.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -122,10 +122,15 @@ Tensor& pow_Tensor_Tensor_out(
WORD32* __restrict__ ptr1 =
(WORD32* __restrict__)kernels::allocate_temp_memory(
ctx, num_elm * sizeof(int));

ET_KERNEL_CHECK(ctx, ptr1 != nullptr, MemoryAllocationFailed, out);

WORD32* __restrict__ ptr2 =
(WORD32* __restrict__)kernels::allocate_temp_memory(
ctx, num_elm * sizeof(int));

ET_KERNEL_CHECK(ctx, ptr2 != nullptr, MemoryAllocationFailed, out);

WORD32* __restrict__ pin1 =
(WORD32* __restrict__)a.const_data_ptr<float>();
WORD32* __restrict__ pin2 =
Expand Down Expand Up @@ -158,6 +163,8 @@ Tensor& pow_Tensor_Tensor_out(
(FLOAT32* __restrict__)kernels::allocate_temp_memory(
ctx, num_elm * sizeof(int));

ET_KERNEL_CHECK(ctx, ptr1 != nullptr, MemoryAllocationFailed, out);

FLOAT32* __restrict__ pin1 =
(FLOAT32* __restrict__)a.const_data_ptr<float>();

Expand Down Expand Up @@ -185,6 +192,8 @@ Tensor& pow_Tensor_Tensor_out(
(WORD32* __restrict__)kernels::allocate_temp_memory(
ctx, num_elm * sizeof(int));

ET_KERNEL_CHECK(ctx, ptr1 != nullptr, MemoryAllocationFailed, out);

WORD32* __restrict__ pin1 =
(WORD32* __restrict__)b.const_data_ptr<float>();

Expand Down
5 changes: 5 additions & 0 deletions backends/cadence/hifi/operators/op_softmax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,14 @@ Tensor& softmax_out(

int* p_out =
(int*)kernels::allocate_temp_memory(ctx, out.numel() * sizeof(int));

ET_KERNEL_CHECK(ctx, p_out != nullptr, MemoryAllocationFailed, out);

int* p_out1 =
(int*)kernels::allocate_temp_memory(ctx, out.numel() * sizeof(int));

ET_KERNEL_CHECK(ctx, p_out1 != nullptr, MemoryAllocationFailed, out);

WORD32 ret_val = xa_nn_transpose_32_32(
p_out,
p_out_shape,
Expand Down
3 changes: 3 additions & 0 deletions backends/cadence/hifi/operators/op_where.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,9 @@ Tensor& where_out(
ctx,
(out_shape[0] * out_shape[1] * out_shape[2] * out_shape[3]) *
sizeof(int));

ET_KERNEL_CHECK(ctx, p_scratch != nullptr, MemoryAllocationFailed, out);

const unsigned char* p_brd_cond = (const unsigned char*)p_scratch;
xa_nn_broadcast_8_8(
(WORD8* __restrict__)p_brd_cond,
Expand Down

0 comments on commit b5c4583

Please sign in to comment.