-
Notifications
You must be signed in to change notification settings - Fork 12.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[X86] isShuffleFoldableLoad - only check that the SDValue has one use #126900
Conversation
We don't need the entire load node to have oneuse, just the loaded value - prevents load chains from interfering with shuffle commutation
@llvm/pr-subscribers-backend-x86 Author: Simon Pilgrim (RKSimon) ChangesWe don't need the entire load node to have oneuse, just the loaded value - prevents load chains from interfering with shuffle commutation Patch is 26.92 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/126900.diff 6 Files Affected:
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 77c426f214675..f84fe118662fb 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -12480,7 +12480,7 @@ static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
/// This is particularly important because the set of instructions varies
/// significantly based on whether the operand is a load or not.
static bool isShuffleFoldableLoad(SDValue V) {
- return V->hasOneUse() &&
+ return V.hasOneUse() &&
ISD::isNON_EXTLoad(peekThroughOneUseBitcasts(V).getNode());
}
diff --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
index e66dd426caa12..16f0614743463 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -1665,10 +1665,9 @@ define void @vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2(ptr %i
;
; AVX512F-FAST-LABEL: vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2:
; AVX512F-FAST: # %bb.0:
-; AVX512F-FAST-NEXT: vmovdqa 32(%rdi), %ymm0
-; AVX512F-FAST-NEXT: vpmovsxbd {{.*#+}} ymm1 = [8,1,2,3,8,5,6,7]
-; AVX512F-FAST-NEXT: vpermi2d (%rdi), %ymm0, %ymm1
-; AVX512F-FAST-NEXT: vpaddb (%rsi), %ymm1, %ymm0
+; AVX512F-FAST-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,9,10,11,0,13,14,15]
+; AVX512F-FAST-NEXT: vpermd (%rdi), %zmm0, %zmm0
+; AVX512F-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
; AVX512F-FAST-NEXT: vmovdqa %ymm0, (%rdx)
; AVX512F-FAST-NEXT: vzeroupper
; AVX512F-FAST-NEXT: retq
@@ -1684,10 +1683,9 @@ define void @vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2(ptr %i
;
; AVX512DQ-FAST-LABEL: vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2:
; AVX512DQ-FAST: # %bb.0:
-; AVX512DQ-FAST-NEXT: vmovdqa 32(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT: vpmovsxbd {{.*#+}} ymm1 = [8,1,2,3,8,5,6,7]
-; AVX512DQ-FAST-NEXT: vpermi2d (%rdi), %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT: vpaddb (%rsi), %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,9,10,11,0,13,14,15]
+; AVX512DQ-FAST-NEXT: vpermd (%rdi), %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
; AVX512DQ-FAST-NEXT: vmovdqa %ymm0, (%rdx)
; AVX512DQ-FAST-NEXT: vzeroupper
; AVX512DQ-FAST-NEXT: retq
@@ -1703,10 +1701,9 @@ define void @vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2(ptr %i
;
; AVX512BW-FAST-LABEL: vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2:
; AVX512BW-FAST: # %bb.0:
-; AVX512BW-FAST-NEXT: vmovdqa 32(%rdi), %ymm0
-; AVX512BW-FAST-NEXT: vpmovsxbd {{.*#+}} ymm1 = [8,1,2,3,8,5,6,7]
-; AVX512BW-FAST-NEXT: vpermi2d (%rdi), %ymm0, %ymm1
-; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm1, %zmm0
+; AVX512BW-FAST-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,9,10,11,0,13,14,15]
+; AVX512BW-FAST-NEXT: vpermd (%rdi), %zmm0, %zmm0
+; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm0, %zmm0
; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rdx)
; AVX512BW-FAST-NEXT: vzeroupper
; AVX512BW-FAST-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
index ae3e5445bf266..d83a61e18d1ab 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
@@ -201,8 +201,8 @@ define void @load_i32_stride6_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm4
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm5
; AVX512-FCP-NEXT: vpermi2d %xmm5, %xmm4, %xmm3
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [3,5,0,0]
-; AVX512-FCP-NEXT: vpermi2d %xmm5, %xmm4, %xmm6
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [7,1,0,0]
+; AVX512-FCP-NEXT: vpermi2d %xmm4, %xmm5, %xmm6
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [4,2,0,0]
; AVX512-FCP-NEXT: vblendps {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
; AVX512-FCP-NEXT: vpermps %ymm1, %ymm4, %ymm4
@@ -260,8 +260,8 @@ define void @load_i32_stride6_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm4
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm5
; AVX512DQ-FCP-NEXT: vpermi2d %xmm5, %xmm4, %xmm3
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [3,5,0,0]
-; AVX512DQ-FCP-NEXT: vpermi2d %xmm5, %xmm4, %xmm6
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [7,1,0,0]
+; AVX512DQ-FCP-NEXT: vpermi2d %xmm4, %xmm5, %xmm6
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [4,2,0,0]
; AVX512DQ-FCP-NEXT: vblendps {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpermps %ymm1, %ymm4, %ymm4
@@ -319,8 +319,8 @@ define void @load_i32_stride6_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm4
; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm5
; AVX512BW-FCP-NEXT: vpermi2d %xmm5, %xmm4, %xmm3
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [3,5,0,0]
-; AVX512BW-FCP-NEXT: vpermi2d %xmm5, %xmm4, %xmm6
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [7,1,0,0]
+; AVX512BW-FCP-NEXT: vpermi2d %xmm4, %xmm5, %xmm6
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [4,2,0,0]
; AVX512BW-FCP-NEXT: vblendps {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
; AVX512BW-FCP-NEXT: vpermps %ymm1, %ymm4, %ymm4
@@ -378,8 +378,8 @@ define void @load_i32_stride6_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm4
; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %xmm5
; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm5, %xmm4, %xmm3
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [3,5,0,0]
-; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm5, %xmm4, %xmm6
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [7,1,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpermi2d %xmm4, %xmm5, %xmm6
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm4 = [4,2,0,0]
; AVX512DQ-BW-FCP-NEXT: vblendps {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
; AVX512DQ-BW-FCP-NEXT: vpermps %ymm1, %ymm4, %ymm4
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
index 00af58544e25c..e4eeaeb3e1a6d 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
@@ -403,13 +403,12 @@ define void @PR39483() {
;
; X86-AVX512-LABEL: PR39483:
; X86-AVX512: # %bb.0: # %entry
-; X86-AVX512-NEXT: vmovups 0, %zmm0
-; X86-AVX512-NEXT: vmovups 64, %ymm1
-; X86-AVX512-NEXT: vpmovsxbd {{.*#+}} ymm2 = [2,5,8,11,14,17,20,23]
-; X86-AVX512-NEXT: vpermi2ps %zmm1, %zmm0, %zmm2
-; X86-AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; X86-AVX512-NEXT: vmulps %ymm0, %ymm2, %ymm1
-; X86-AVX512-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; X86-AVX512-NEXT: vmovups 64, %ymm0
+; X86-AVX512-NEXT: vpmovsxbd {{.*#+}} ymm1 = [18,21,24,27,30,1,4,7]
+; X86-AVX512-NEXT: vpermt2ps 0, %zmm1, %zmm0
+; X86-AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX512-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; X86-AVX512-NEXT: vaddps %ymm1, %ymm0, %ymm0
; X86-AVX512-NEXT: vmovups %ymm0, (%eax)
;
; X64-AVX1-LABEL: PR39483:
@@ -444,13 +443,12 @@ define void @PR39483() {
;
; X64-AVX512-LABEL: PR39483:
; X64-AVX512: # %bb.0: # %entry
-; X64-AVX512-NEXT: vmovups 0, %zmm0
-; X64-AVX512-NEXT: vmovups 64, %ymm1
-; X64-AVX512-NEXT: vpmovsxbd {{.*#+}} ymm2 = [2,5,8,11,14,17,20,23]
-; X64-AVX512-NEXT: vpermi2ps %zmm1, %zmm0, %zmm2
-; X64-AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; X64-AVX512-NEXT: vmulps %ymm0, %ymm2, %ymm1
-; X64-AVX512-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; X64-AVX512-NEXT: vmovups 64, %ymm0
+; X64-AVX512-NEXT: vpmovsxbd {{.*#+}} ymm1 = [18,21,24,27,30,1,4,7]
+; X64-AVX512-NEXT: vpermt2ps 0, %zmm1, %zmm0
+; X64-AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; X64-AVX512-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; X64-AVX512-NEXT: vaddps %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vmovups %ymm0, (%rax)
entry:
%wide.vec = load <24 x float>, ptr null, align 4
diff --git a/llvm/test/CodeGen/X86/vselect-avx.ll b/llvm/test/CodeGen/X86/vselect-avx.ll
index d07b7b574eba7..17315c436188a 100644
--- a/llvm/test/CodeGen/X86/vselect-avx.ll
+++ b/llvm/test/CodeGen/X86/vselect-avx.ll
@@ -377,14 +377,14 @@ define void @vselect_concat_splat() {
; AVX512-NEXT: vmovaps %ymm2, %ymm3
; AVX512-NEXT: vpermi2ps %ymm1, %ymm0, %ymm3
; AVX512-NEXT: vmovups 32, %xmm4
-; AVX512-NEXT: vmovups 0, %ymm5
-; AVX512-NEXT: vxorps %xmm6, %xmm6, %xmm6
-; AVX512-NEXT: vcmpneqps %xmm6, %xmm3, %k0
+; AVX512-NEXT: vxorps %xmm5, %xmm5, %xmm5
+; AVX512-NEXT: vcmpneqps %xmm5, %xmm3, %k0
; AVX512-NEXT: kshiftlw $4, %k0, %k1
; AVX512-NEXT: korw %k1, %k0, %k1
-; AVX512-NEXT: vpermt2ps %ymm4, %ymm2, %ymm5
; AVX512-NEXT: vpermt2ps %ymm1, %ymm2, %ymm0
-; AVX512-NEXT: vmovaps %ymm5, %ymm0 {%k1}
+; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm1 = [8,11,14,1,9,12,15,2]
+; AVX512-NEXT: vpermi2ps 0, %ymm4, %ymm1
+; AVX512-NEXT: vmovaps %ymm1, %ymm0 {%k1}
; AVX512-NEXT: vmovups %ymm0, (%rax)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
index 8cd114b807ea1..5ba2257e2b49e 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -1665,10 +1665,9 @@ define void @vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2(ptr %i
;
; AVX512F-FAST-LABEL: vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2:
; AVX512F-FAST: # %bb.0:
-; AVX512F-FAST-NEXT: vmovdqa 32(%rdi), %ymm0
-; AVX512F-FAST-NEXT: vpmovsxbd {{.*#+}} ymm1 = [8,1,2,3,8,5,6,7]
-; AVX512F-FAST-NEXT: vpermi2d (%rdi), %ymm0, %ymm1
-; AVX512F-FAST-NEXT: vpaddb (%rsi), %ymm1, %ymm0
+; AVX512F-FAST-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,9,10,11,0,13,14,15]
+; AVX512F-FAST-NEXT: vpermd (%rdi), %zmm0, %zmm0
+; AVX512F-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
; AVX512F-FAST-NEXT: vmovdqa %ymm0, (%rdx)
; AVX512F-FAST-NEXT: vzeroupper
; AVX512F-FAST-NEXT: retq
@@ -1684,10 +1683,9 @@ define void @vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2(ptr %i
;
; AVX512DQ-FAST-LABEL: vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2:
; AVX512DQ-FAST: # %bb.0:
-; AVX512DQ-FAST-NEXT: vmovdqa 32(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT: vpmovsxbd {{.*#+}} ymm1 = [8,1,2,3,8,5,6,7]
-; AVX512DQ-FAST-NEXT: vpermi2d (%rdi), %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT: vpaddb (%rsi), %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,9,10,11,0,13,14,15]
+; AVX512DQ-FAST-NEXT: vpermd (%rdi), %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
; AVX512DQ-FAST-NEXT: vmovdqa %ymm0, (%rdx)
; AVX512DQ-FAST-NEXT: vzeroupper
; AVX512DQ-FAST-NEXT: retq
@@ -1703,10 +1701,9 @@ define void @vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2(ptr %i
;
; AVX512BW-FAST-LABEL: vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2:
; AVX512BW-FAST: # %bb.0:
-; AVX512BW-FAST-NEXT: vmovdqa 32(%rdi), %ymm0
-; AVX512BW-FAST-NEXT: vpmovsxbd {{.*#+}} ymm1 = [8,1,2,3,8,5,6,7]
-; AVX512BW-FAST-NEXT: vpermi2d (%rdi), %ymm0, %ymm1
-; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm1, %zmm0
+; AVX512BW-FAST-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,9,10,11,0,13,14,15]
+; AVX512BW-FAST-NEXT: vpermd (%rdi), %zmm0, %zmm0
+; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm0, %zmm0
; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rdx)
; AVX512BW-FAST-NEXT: vzeroupper
; AVX512BW-FAST-NEXT: retq
@@ -2941,11 +2938,10 @@ define void @vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12(ptr %i
;
; AVX512BW-LABEL: vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm0 = [32,57,32,59,32,61,32,63,32,9,32,11,32,13,32,15,32,17,32,19,32,21,32,23,0,0,0,0,0,0,0,0]
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm2 = [0,25,0,27,0,29,0,31,0,41,0,43,0,45,0,47,0,49,0,51,0,53,0,55,0,0,0,0,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm2
-; AVX512BW-NEXT: vpaddb (%rsi), %zmm2, %zmm0
+; AVX512BW-NEXT: vpermt2w (%rdi), %zmm0, %zmm1
+; AVX512BW-NEXT: vpaddb (%rsi), %zmm1, %zmm0
; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rdx)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -3096,11 +3092,10 @@ define void @vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8(ptr %in.
;
; AVX512BW-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm0 = [32,57,58,32,60,61,32,63,8,32,10,11,32,13,14,32,16,17,32,19,20,32,22,23,0,0,0,0,0,0,0,0]
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm2 = [0,25,26,0,28,29,0,31,40,0,42,43,0,45,46,0,48,49,0,51,52,0,54,55,0,0,0,0,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm2
-; AVX512BW-NEXT: vpaddb (%rsi), %zmm2, %zmm0
+; AVX512BW-NEXT: vpermt2w (%rdi), %zmm0, %zmm1
+; AVX512BW-NEXT: vpaddb (%rsi), %zmm1, %zmm0
; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rdx)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -3272,11 +3267,10 @@ define void @vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6(ptr %in.
;
; AVX512BW-LABEL: vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm0 = [32,57,58,59,32,61,62,63,32,9,10,11,32,13,14,15,32,17,18,19,32,21,22,23,0,0,0,0,0,0,0,0]
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm2 = [0,25,26,27,0,29,30,31,0,41,42,43,0,45,46,47,0,49,50,51,0,53,54,55,0,0,0,0,0,0,0,0]
-; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm2
-; AVX512BW-NEXT: vpaddb (%rsi), %zmm2, %zmm0
+; AVX512BW-NEXT: vpermt2w (%rdi), %zmm0, %zmm1
+; AVX512BW-NEXT: vpaddb (%rsi), %zmm1, %zmm0
; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rdx)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -3786,13 +3780,12 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
;
; AVX512F-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512F-NEXT: vpmovsxbd {{.*#+}} zmm0 = [16,29,16,31,16,5,16,7,16,9,16,11,0,0,0,0]
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512F-NEXT: vpmovsxbd {{.*#+}} zmm2 = [0,13,0,15,0,21,0,23,0,25,0,27,0,0,0,0]
-; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
-; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0
+; AVX512F-NEXT: vpermt2d (%rdi), %zmm0, %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm0
; AVX512F-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0
-; AVX512F-NEXT: vpaddb (%rsi), %ymm2, %ymm1
+; AVX512F-NEXT: vpaddb (%rsi), %ymm1, %ymm1
; AVX512F-NEXT: vmovdqa %ymm1, (%rdx)
; AVX512F-NEXT: vmovdqa %ymm0, 32(%rdx)
; AVX512F-NEXT: vzeroupper
@@ -3800,13 +3793,12 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
;
; AVX512DQ-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm0 = [16,29,16,31,16,5,16,7,16,9,16,11,0,0,0,0]
; AVX512DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm2 = [0,13,0,15,0,21,0,23,0,25,0,27,0,0,0,0]
-; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
-; AVX512DQ-NEXT: vextracti64x4 $1, %zmm2, %ymm0
+; AVX512DQ-NEXT: vpermt2d (%rdi), %zmm0, %zmm1
+; AVX512DQ-NEXT: vextracti64x4 $1, %zmm1, %ymm0
; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0
-; AVX512DQ-NEXT: vpaddb (%rsi), %ymm2, %ymm1
+; AVX512DQ-NEXT: vpaddb (%rsi), %ymm1, %ymm1
; AVX512DQ-NEXT: vmovdqa %ymm1, (%rdx)
; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rdx)
; AVX512DQ-NEXT: vzeroupper
@@ -3814,11 +3806,10 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
;
; AVX512BW-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm0 = [16,29,16,31,16,5,16,7,16,9,16,11,0,0,0,0]
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm2 = [0,13,0,15,0,21,0,23,0,25,0,27,0,0,0,0]
-; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
-; AVX512BW-NEXT: vpaddb (%rsi), %zmm2, %zmm0
+; AVX512BW-NEXT: vpermt2d (%rdi), %zmm0, %zmm1
+; AVX512BW-NEXT: vpaddb (%rsi), %zmm1, %zmm0
; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rdx)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -3940,13 +3931,12 @@ define void @vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4(ptr %in.
;
; AVX512F-LABEL: vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512F-NEXT: vpmovsxbd {{.*#+}} zmm0 = [16,29,30,16,4,5,16,7,8,16,10,11,0,0,0,0]
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512F-NEXT: vpmovsxbd {{.*#+}} zmm2 = [0,13,14,0,20,21,0,23,24,0,26,27,0,0,0,0]
-; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
-; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0
+; AVX512F-NEXT: vpermt2d (%rdi), %zmm0, %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm0
; AVX512F-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0
-; AVX512F-NEXT: vpaddb (%rsi), %ymm2, %ymm1
+; AVX512F-NEXT: vpaddb (%rsi), %ymm1, %ymm1
; AVX512F-NEXT: vmovdqa %ymm1, (%rdx)
; AVX512F-NEXT: vmovdqa %ymm0, 32(%rdx)
; AVX512F-NEXT: vzeroupper
@@ -3954,13 +3944,12 @@ define void @vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4(ptr %in.
;
; AVX512DQ-LABEL: vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm0 = [16,29,30,16,4,5,16,7,8,16,10,11,0,0,0,0]
; AVX512DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm2 = [0,13,14,0,20,21,0,23,24,0,26,27,0,0,0,0]
-; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
-; AVX512DQ-NEXT: vextracti64x4 $1, %zmm2, %ymm0
+; AVX512DQ-NEXT: vpermt2d (%rdi), %zmm0, %zmm1
+; AVX512DQ-NEXT: vextracti64x4 $1, %zmm1, %ymm0
; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0
-; AVX512DQ-NEXT: vpaddb (%rsi), %ymm2, %ymm1
+; AVX512DQ-NEXT: vpaddb (%rsi), %ymm1, %ymm1
; AVX512DQ-NEXT: vmovdqa %ymm1, (%rdx)
; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rdx)
; AVX512DQ-NEXT: vzeroupper
@@ -3968,11 +3957,10 @@ define void @vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4(ptr %in.
;
; AVX512BW-LABEL: vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm0 = [16,29,30,16,4,5,16,7,8,16,10,11,0,0,0,0]
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm2 = [0,13,14,0,20,21,0,23,24,0,26,27,0,0,0,0]
-; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
-; AVX512BW-NEXT: vpaddb (%rsi), %zmm2, %zmm0
+; AVX512BW-NEXT: vpermt2d (%rdi), %zmm0, %zmm1
+; AVX512BW-NEXT: vpaddb (%rsi), %zmm1, %zmm0
; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rdx)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -4052,13 +4040,12 @@ define void @vec384_i32_widen_to_i128_factor4_broadcast_to_v3i128_factor3(ptr %i
;
; AVX512F-LABEL: vec384_i32_widen_to_i128_factor4_broadcast_to_v3i128_factor3:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512F-NEXT: vpmovsxbd {{.*#+}} zmm0 = [16,29,30,31,16,5,6,7,16,9,10,11,0,0,0,0]
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512F-NEXT: vpmovsxbd {{.*#+}} zmm2 = [0,13,...
[truncated]
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM.
We don't need the entire load node to have oneuse, just the loaded value - prevents load chains from interfering with shuffle commutation