Skip to content

Commit

Permalink
Code cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
Rushi-cad committed Nov 5, 2024
1 parent e012bac commit 2f2e6fd
Show file tree
Hide file tree
Showing 5 changed files with 18 additions and 34 deletions.
4 changes: 2 additions & 2 deletions backends/cadence/hifi/operators/op_atan2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,14 @@ Tensor& atan2_out(

const bool a_is_broadcasted = !out.sizes().equals(a.sizes());
const bool b_is_broadcasted = !out.sizes().equals(b.sizes());
const bool broadcast = (a_is_broadcasted || b_is_broadcasted);
const bool broadcast = (a_is_broadcasted && b_is_broadcasted);
int max_dim = a.dim() > b.dim() ? a.dim() : b.dim();
max_dim = out.dim() > max_dim ? out.dim() : max_dim;

if (out_type != ScalarType::Float)
optimized = 0;

if ((broadcast == 1) && (max_dim > kNnlibMaxDim))
if (max_dim > kNnlibMaxDim)
optimized = 0;

WORD32 num_elm = out.numel();
Expand Down
4 changes: 2 additions & 2 deletions backends/cadence/hifi/operators/op_bitwise_and.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,14 +55,14 @@ Tensor& bitwise_and_Tensor_out(

const bool a_is_broadcasted = !out.sizes().equals(a.sizes());
const bool b_is_broadcasted = !out.sizes().equals(b.sizes());
const bool broadcast = (a_is_broadcasted || b_is_broadcasted);
const bool broadcast = (a_is_broadcasted && b_is_broadcasted);
int max_dim = a.dim() > b.dim() ? a.dim() : b.dim();
max_dim = out.dim() > max_dim ? out.dim() : max_dim;

if (out_type != ScalarType::Bool)
optimized = 0;

if ((broadcast == 1) && (max_dim > kNnlibMaxDim))
if (max_dim > kNnlibMaxDim)
optimized = 0;

WORD32 num_elm = out.numel();
Expand Down
4 changes: 2 additions & 2 deletions backends/cadence/hifi/operators/op_bitwise_or.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,14 +55,14 @@ Tensor& bitwise_or_Tensor_out(

const bool a_is_broadcasted = !out.sizes().equals(a.sizes());
const bool b_is_broadcasted = !out.sizes().equals(b.sizes());
const bool broadcast = (a_is_broadcasted || b_is_broadcasted);
const bool broadcast = (a_is_broadcasted && b_is_broadcasted);
int max_dim = a.dim() > b.dim() ? a.dim() : b.dim();
max_dim = out.dim() > max_dim ? out.dim() : max_dim;

if (out_type != ScalarType::Bool)
optimized = 0;

if ((broadcast == 1) && (max_dim > kNnlibMaxDim))
if (max_dim > kNnlibMaxDim)
optimized = 0;

WORD32 num_elm = out.numel();
Expand Down
4 changes: 2 additions & 2 deletions backends/cadence/hifi/operators/op_bitwise_xor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,14 +55,14 @@ Tensor& bitwise_xor_Tensor_out(

const bool a_is_broadcasted = !out.sizes().equals(a.sizes());
const bool b_is_broadcasted = !out.sizes().equals(b.sizes());
const bool broadcast = (a_is_broadcasted || b_is_broadcasted);
const bool broadcast = (a_is_broadcasted && b_is_broadcasted);
int max_dim = a.dim() > b.dim() ? a.dim() : b.dim();
max_dim = out.dim() > max_dim ? out.dim() : max_dim;

if (out_type != ScalarType::Bool)
optimized = 0;

if ((broadcast == 1) && (max_dim > kNnlibMaxDim))
if (max_dim > kNnlibMaxDim)
optimized = 0;

WORD32 num_elm = out.numel();
Expand Down
36 changes: 10 additions & 26 deletions backends/cadence/hifi/operators/op_pow.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,14 +101,14 @@ Tensor& pow_Tensor_Tensor_out(

const bool a_is_broadcasted = !out.sizes().equals(a.sizes());
const bool b_is_broadcasted = !out.sizes().equals(b.sizes());
const bool broadcast = (a_is_broadcasted || b_is_broadcasted);
const bool broadcast = (a_is_broadcasted && b_is_broadcasted);
int max_dim = a.dim() > b.dim() ? a.dim() : b.dim();
max_dim = out.dim() > max_dim ? out.dim() : max_dim;

if (out_type != ScalarType::Float)
optimized = 0;

if ((broadcast == 1) && (max_dim > kNnlibMaxDim))
if (max_dim > kNnlibMaxDim)
optimized = 0;

WORD32 num_elm = out.numel();
Expand Down Expand Up @@ -156,21 +156,13 @@ Tensor& pow_Tensor_Tensor_out(
FLOAT32* __restrict__ pin1 =
(FLOAT32* __restrict__)a.const_data_ptr<float>();

WORD32 p_out_shape[max_dim];
WORD32 p_inp1_shape[max_dim];

for (int i = 0; i < max_dim; i++) {
p_inp1_shape[i] = 1;
p_out_shape[i] = 1;
}

int off_o = max_dim - out_dim;
int off_a = max_dim - a_dim;
WORD32 p_out_shape[kNnlibMaxDim];
WORD32 p_inp1_shape[kNnlibMaxDim];

for (int i = 0; i < out_dim; i++)
p_out_shape[i + off_o] = out.size(i);
p_out_shape[i] = out.size(i);
for (int i = 0; i < a_dim; i++)
p_inp1_shape[i + off_a] = a.size(i);
p_inp1_shape[i] = a.size(i);

xa_nn_broadcast_32_32(
(WORD32*)ptr1, p_out_shape, (WORD32*)pin1, p_inp1_shape, out_dim);
Expand All @@ -191,21 +183,13 @@ Tensor& pow_Tensor_Tensor_out(
WORD32* __restrict__ pin1 =
(WORD32* __restrict__)b.const_data_ptr<float>();

WORD32 p_out_shape[max_dim];
WORD32 p_inp1_shape[max_dim];

for (int i = 0; i < max_dim; i++) {
p_inp1_shape[i] = 1;
p_out_shape[i] = 1;
}

int off_o = max_dim - out_dim;
int off_b = max_dim - b_dim;
WORD32 p_out_shape[kNnlibMaxDim];
WORD32 p_inp1_shape[kNnlibMaxDim];

for (int i = 0; i < out_dim; i++)
p_out_shape[i + off_o] = out.size(i);
p_out_shape[i] = out.size(i);
for (int i = 0; i < b_dim; i++)
p_inp1_shape[i + off_b] = b.size(i);
p_inp1_shape[i] = b.size(i);

xa_nn_broadcast_32_32(ptr1, p_out_shape, pin1, p_inp1_shape, out_dim);

Expand Down

0 comments on commit 2f2e6fd

Please sign in to comment.