From 2f2e6fdc2d7bc8fd0279dc9dbf16003b798e5b61 Mon Sep 17 00:00:00 2001 From: Rushi-cad Date: Tue, 5 Nov 2024 06:23:30 -0800 Subject: [PATCH] Code cleanup --- backends/cadence/hifi/operators/op_atan2.cpp | 4 +-- .../cadence/hifi/operators/op_bitwise_and.cpp | 4 +-- .../cadence/hifi/operators/op_bitwise_or.cpp | 4 +-- .../cadence/hifi/operators/op_bitwise_xor.cpp | 4 +-- backends/cadence/hifi/operators/op_pow.cpp | 36 ++++++------------- 5 files changed, 18 insertions(+), 34 deletions(-) diff --git a/backends/cadence/hifi/operators/op_atan2.cpp b/backends/cadence/hifi/operators/op_atan2.cpp index 5d6f7c360a..27a209c03f 100644 --- a/backends/cadence/hifi/operators/op_atan2.cpp +++ b/backends/cadence/hifi/operators/op_atan2.cpp @@ -46,14 +46,14 @@ Tensor& atan2_out( const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); - const bool broadcast = (a_is_broadcasted || b_is_broadcasted); + const bool broadcast = (a_is_broadcasted && b_is_broadcasted); int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; if (out_type != ScalarType::Float) optimized = 0; - if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) + if (max_dim > kNnlibMaxDim) optimized = 0; WORD32 num_elm = out.numel(); diff --git a/backends/cadence/hifi/operators/op_bitwise_and.cpp b/backends/cadence/hifi/operators/op_bitwise_and.cpp index b6c726d349..3d5a07abeb 100644 --- a/backends/cadence/hifi/operators/op_bitwise_and.cpp +++ b/backends/cadence/hifi/operators/op_bitwise_and.cpp @@ -55,14 +55,14 @@ Tensor& bitwise_and_Tensor_out( const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); - const bool broadcast = (a_is_broadcasted || b_is_broadcasted); + const bool broadcast = (a_is_broadcasted && b_is_broadcasted); int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; if (out_type != ScalarType::Bool) optimized = 0; - if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) + if (max_dim > kNnlibMaxDim) optimized = 0; WORD32 num_elm = out.numel(); diff --git a/backends/cadence/hifi/operators/op_bitwise_or.cpp b/backends/cadence/hifi/operators/op_bitwise_or.cpp index 9a51dc1b05..c51b37cab4 100644 --- a/backends/cadence/hifi/operators/op_bitwise_or.cpp +++ b/backends/cadence/hifi/operators/op_bitwise_or.cpp @@ -55,14 +55,14 @@ Tensor& bitwise_or_Tensor_out( const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); - const bool broadcast = (a_is_broadcasted || b_is_broadcasted); + const bool broadcast = (a_is_broadcasted && b_is_broadcasted); int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; if (out_type != ScalarType::Bool) optimized = 0; - if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) + if (max_dim > kNnlibMaxDim) optimized = 0; WORD32 num_elm = out.numel(); diff --git a/backends/cadence/hifi/operators/op_bitwise_xor.cpp b/backends/cadence/hifi/operators/op_bitwise_xor.cpp index 8539aaf99d..269cb1a24f 100644 --- a/backends/cadence/hifi/operators/op_bitwise_xor.cpp +++ b/backends/cadence/hifi/operators/op_bitwise_xor.cpp @@ -55,14 +55,14 @@ Tensor& bitwise_xor_Tensor_out( const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); - const bool broadcast = (a_is_broadcasted || b_is_broadcasted); + const bool broadcast = (a_is_broadcasted && b_is_broadcasted); int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; if (out_type != ScalarType::Bool) optimized = 0; - if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) + if (max_dim > kNnlibMaxDim) optimized = 0; WORD32 num_elm = out.numel(); diff --git a/backends/cadence/hifi/operators/op_pow.cpp b/backends/cadence/hifi/operators/op_pow.cpp index c7dda1c2a0..a20af645a1 100644 --- a/backends/cadence/hifi/operators/op_pow.cpp +++ b/backends/cadence/hifi/operators/op_pow.cpp @@ -101,14 +101,14 @@ Tensor& pow_Tensor_Tensor_out( const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); - const bool broadcast = (a_is_broadcasted || b_is_broadcasted); + const bool broadcast = (a_is_broadcasted && b_is_broadcasted); int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; if (out_type != ScalarType::Float) optimized = 0; - if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) + if (max_dim > kNnlibMaxDim) optimized = 0; WORD32 num_elm = out.numel(); @@ -156,21 +156,13 @@ Tensor& pow_Tensor_Tensor_out( FLOAT32* __restrict__ pin1 = (FLOAT32* __restrict__)a.const_data_ptr(); - WORD32 p_out_shape[max_dim]; - WORD32 p_inp1_shape[max_dim]; - - for (int i = 0; i < max_dim; i++) { - p_inp1_shape[i] = 1; - p_out_shape[i] = 1; - } - - int off_o = max_dim - out_dim; - int off_a = max_dim - a_dim; + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_inp1_shape[kNnlibMaxDim]; for (int i = 0; i < out_dim; i++) - p_out_shape[i + off_o] = out.size(i); + p_out_shape[i] = out.size(i); for (int i = 0; i < a_dim; i++) - p_inp1_shape[i + off_a] = a.size(i); + p_inp1_shape[i] = a.size(i); xa_nn_broadcast_32_32( (WORD32*)ptr1, p_out_shape, (WORD32*)pin1, p_inp1_shape, out_dim); @@ -191,21 +183,13 @@ Tensor& pow_Tensor_Tensor_out( WORD32* __restrict__ pin1 = (WORD32* __restrict__)b.const_data_ptr(); - WORD32 p_out_shape[max_dim]; - WORD32 p_inp1_shape[max_dim]; - - for (int i = 0; i < max_dim; i++) { - p_inp1_shape[i] = 1; - p_out_shape[i] = 1; - } - - int off_o = max_dim - out_dim; - int off_b = max_dim - b_dim; + WORD32 p_out_shape[kNnlibMaxDim]; + WORD32 p_inp1_shape[kNnlibMaxDim]; for (int i = 0; i < out_dim; i++) - p_out_shape[i + off_o] = out.size(i); + p_out_shape[i] = out.size(i); for (int i = 0; i < b_dim; i++) - p_inp1_shape[i + off_b] = b.size(i); + p_inp1_shape[i] = b.size(i); xa_nn_broadcast_32_32(ptr1, p_out_shape, pin1, p_inp1_shape, out_dim);