[X86] Add masked variable tests for funnel undef/zero argument combines
I've avoided 'modulo' masks as we'll SimplifyDemandedBits those in the future, and we just need to check that the shift variable is 'in range'
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@353644 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/X86/funnel-shift.ll b/test/CodeGen/X86/funnel-shift.ll
index 55c1b3c..531885d 100644
--- a/test/CodeGen/X86/funnel-shift.ll
+++ b/test/CodeGen/X86/funnel-shift.ll
@@ -378,6 +378,28 @@
ret i32 %res
}
+define i32 @fshl_i32_undef0_msk(i32 %a0, i32 %a1) nounwind {
+; X32-SSE2-LABEL: fshl_i32_undef0_msk:
+; X32-SSE2: # %bb.0:
+; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-SSE2-NEXT: andl $7, %ecx
+; X32-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X32-SSE2-NEXT: shldl %cl, %eax, %eax
+; X32-SSE2-NEXT: retl
+;
+; X64-AVX2-LABEL: fshl_i32_undef0_msk:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: movl %esi, %ecx
+; X64-AVX2-NEXT: andl $7, %ecx
+; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX2-NEXT: shldl %cl, %edi, %eax
+; X64-AVX2-NEXT: retq
+ %m = and i32 %a1, 7
+ %res = call i32 @llvm.fshl.i32(i32 undef, i32 %a0, i32 %m)
+ ret i32 %res
+}
+
define i32 @fshl_i32_undef0_cst(i32 %a0) nounwind {
; X32-SSE2-LABEL: fshl_i32_undef0_cst:
; X32-SSE2: # %bb.0:
@@ -412,6 +434,29 @@
ret i32 %res
}
+define i32 @fshl_i32_undef1_msk(i32 %a0, i32 %a1) nounwind {
+; X32-SSE2-LABEL: fshl_i32_undef1_msk:
+; X32-SSE2: # %bb.0:
+; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-SSE2-NEXT: andl $7, %ecx
+; X32-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X32-SSE2-NEXT: shldl %cl, %eax, %eax
+; X32-SSE2-NEXT: retl
+;
+; X64-AVX2-LABEL: fshl_i32_undef1_msk:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: movl %esi, %ecx
+; X64-AVX2-NEXT: movl %edi, %eax
+; X64-AVX2-NEXT: andl $7, %ecx
+; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX2-NEXT: shldl %cl, %eax, %eax
+; X64-AVX2-NEXT: retq
+ %m = and i32 %a1, 7
+ %res = call i32 @llvm.fshl.i32(i32 %a0, i32 undef, i32 %m)
+ ret i32 %res
+}
+
define i32 @fshl_i32_undef1_cst(i32 %a0) nounwind {
; X32-SSE2-LABEL: fshl_i32_undef1_cst:
; X32-SSE2: # %bb.0:
@@ -464,6 +509,29 @@
ret i32 %res
}
+define i32 @fshr_i32_undef0_msk(i32 %a0, i32 %a1) nounwind {
+; X32-SSE2-LABEL: fshr_i32_undef0_msk:
+; X32-SSE2: # %bb.0:
+; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-SSE2-NEXT: andl $7, %ecx
+; X32-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X32-SSE2-NEXT: shrdl %cl, %eax, %eax
+; X32-SSE2-NEXT: retl
+;
+; X64-AVX2-LABEL: fshr_i32_undef0_msk:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: movl %esi, %ecx
+; X64-AVX2-NEXT: movl %edi, %eax
+; X64-AVX2-NEXT: andl $7, %ecx
+; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX2-NEXT: shrdl %cl, %eax, %eax
+; X64-AVX2-NEXT: retq
+ %m = and i32 %a1, 7
+ %res = call i32 @llvm.fshr.i32(i32 undef, i32 %a0, i32 %m)
+ ret i32 %res
+}
+
define i32 @fshr_i32_undef0_cst(i32 %a0) nounwind {
; X32-SSE2-LABEL: fshr_i32_undef0_cst:
; X32-SSE2: # %bb.0:
@@ -498,6 +566,28 @@
ret i32 %res
}
+define i32 @fshr_i32_undef1_msk(i32 %a0, i32 %a1) nounwind {
+; X32-SSE2-LABEL: fshr_i32_undef1_msk:
+; X32-SSE2: # %bb.0:
+; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-SSE2-NEXT: andl $7, %ecx
+; X32-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X32-SSE2-NEXT: shrdl %cl, %eax, %eax
+; X32-SSE2-NEXT: retl
+;
+; X64-AVX2-LABEL: fshr_i32_undef1_msk:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: movl %esi, %ecx
+; X64-AVX2-NEXT: andl $7, %ecx
+; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX2-NEXT: shrdl %cl, %edi, %eax
+; X64-AVX2-NEXT: retq
+ %m = and i32 %a1, 7
+ %res = call i32 @llvm.fshr.i32(i32 %a0, i32 undef, i32 %m)
+ ret i32 %res
+}
+
define i32 @fshr_i32_undef1_cst(i32 %a0) nounwind {
; X32-SSE2-LABEL: fshr_i32_undef1_cst:
; X32-SSE2: # %bb.0: