x86] adjust test constants to maintain coverage; NFC
Increment (add 1) could be transformed to sub -1, and we'd lose coverage for these patterns.
llvm-svn: 305646
diff --git a/llvm/test/CodeGen/X86/avx2-vbroadcast.ll b/llvm/test/CodeGen/X86/avx2-vbroadcast.ll
index ba47e2b..971d03a 100644
--- a/llvm/test/CodeGen/X86/avx2-vbroadcast.ll
+++ b/llvm/test/CodeGen/X86/avx2-vbroadcast.ll
@@ -653,7 +653,7 @@
; X64-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0
; X64-AVX512VL-NEXT: retq
entry:
- %g = add <8 x i32> %in, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %g = add <8 x i32> %in, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
ret <8 x i32> %g
}
diff --git a/llvm/test/CodeGen/X86/avx512-arith.ll b/llvm/test/CodeGen/X86/avx512-arith.ll
index 26be208..d96b588 100644
--- a/llvm/test/CodeGen/X86/avx512-arith.ll
+++ b/llvm/test/CodeGen/X86/avx512-arith.ll
@@ -348,7 +348,7 @@
; CHECK: ## BB#0:
; CHECK-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
- %x = add <8 x i64> %i, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %x = add <8 x i64> %i, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
ret <8 x i64> %x
}
@@ -394,7 +394,7 @@
; CHECK: ## BB#0:
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; CHECK-NEXT: retq
- %x = add <16 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %x = add <16 x i32> %i, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
ret <16 x i32> %x
}
@@ -446,7 +446,7 @@
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
- %x = add <16 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %x = add <16 x i32> %i, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
%r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %i
ret <16 x i32> %r
}
@@ -473,7 +473,7 @@
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
- %x = add <16 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %x = add <16 x i32> %i, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
%r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> zeroinitializer
ret <16 x i32> %r
}
diff --git a/llvm/test/CodeGen/X86/avx512-logic.ll b/llvm/test/CodeGen/X86/avx512-logic.ll
index 7153c1f..6e08753 100644
--- a/llvm/test/CodeGen/X86/avx512-logic.ll
+++ b/llvm/test/CodeGen/X86/avx512-logic.ll
@@ -11,8 +11,8 @@
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,
- i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <16 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2,
+ i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
%x = and <16 x i32> %a2, %b
ret <16 x i32> %x
}
@@ -25,8 +25,8 @@
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,
- i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <16 x i32> %a, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3,
+ i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
%b2 = xor <16 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1,
i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%x = and <16 x i32> %a2, %b2
@@ -41,8 +41,8 @@
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,
- i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <16 x i32> %a, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4,
+ i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
%x = or <16 x i32> %a2, %b
ret <16 x i32> %x
}
@@ -55,8 +55,8 @@
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,
- i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <16 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5,
+ i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
%x = xor <16 x i32> %a2, %b
ret <16 x i32> %x
}
@@ -69,7 +69,7 @@
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <8 x i64> %a, <i64 6, i64 6, i64 6, i64 6, i64 6, i64 6, i64 6, i64 6>
%x = and <8 x i64> %a2, %b
ret <8 x i64> %x
}
@@ -82,7 +82,7 @@
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <8 x i64> %a, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7>
%b2 = xor <8 x i64> %b, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
%x = and <8 x i64> %a2, %b2
ret <8 x i64> %x
@@ -96,7 +96,7 @@
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <8 x i64> %a, <i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8>
%x = or <8 x i64> %a2, %b
ret <8 x i64> %x
}
@@ -109,7 +109,7 @@
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <8 x i64> %a, <i64 9, i64 9, i64 9, i64 9, i64 9, i64 9, i64 9, i64 9>
%x = xor <8 x i64> %a2, %b
ret <8 x i64> %x
}
diff --git a/llvm/test/CodeGen/X86/avx512vl-logic.ll b/llvm/test/CodeGen/X86/avx512vl-logic.ll
index 83fa8d4..6e697cf 100644
--- a/llvm/test/CodeGen/X86/avx512vl-logic.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-logic.ll
@@ -12,7 +12,7 @@
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
%x = and <8 x i32> %a2, %b
ret <8 x i32> %x
}
@@ -25,7 +25,7 @@
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <8 x i32> %a, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
%b2 = xor <8 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%x = and <8 x i32> %a2, %b2
ret <8 x i32> %x
@@ -39,7 +39,7 @@
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <8 x i32> %a, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
%x = or <8 x i32> %a2, %b
ret <8 x i32> %x
}
@@ -52,7 +52,7 @@
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
%x = xor <8 x i32> %a2, %b
ret <8 x i32> %x
}
@@ -65,7 +65,7 @@
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <4 x i64> %a, <i64 6, i64 6, i64 6, i64 6>
%x = and <4 x i64> %a2, %b
ret <4 x i64> %x
}
@@ -78,7 +78,7 @@
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
%b2 = xor <4 x i64> %b, <i64 -1, i64 -1, i64 -1, i64 -1>
%x = and <4 x i64> %a2, %b2
ret <4 x i64> %x
@@ -92,7 +92,7 @@
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <4 x i64> %a, <i64 21, i64 21, i64 21, i64 21>
%x = or <4 x i64> %a2, %b
ret <4 x i64> %x
}
@@ -105,7 +105,7 @@
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <4 x i64> %a, <i64 22, i64 22, i64 22, i64 22>
%x = xor <4 x i64> %a2, %b
ret <4 x i64> %x
}
@@ -120,7 +120,7 @@
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i32> %a, <i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <4 x i32> %a, <i32 8, i32 8, i32 8, i32 8>
%x = and <4 x i32> %a2, %b
ret <4 x i32> %x
}
@@ -133,7 +133,7 @@
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i32> %a, <i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <4 x i32> %a, <i32 9, i32 9, i32 9, i32 9>
%b2 = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
%x = and <4 x i32> %a2, %b2
ret <4 x i32> %x
@@ -147,7 +147,7 @@
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i32> %a, <i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <4 x i32> %a, <i32 10, i32 10, i32 10, i32 10>
%x = or <4 x i32> %a2, %b
ret <4 x i32> %x
}
@@ -160,7 +160,7 @@
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i32> %a, <i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <4 x i32> %a, <i32 11, i32 11, i32 11, i32 11>
%x = xor <4 x i32> %a2, %b
ret <4 x i32> %x
}
@@ -173,7 +173,7 @@
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <2 x i64> %a, <i64 1, i64 1>
+ %a2 = add <2 x i64> %a, <i64 12, i64 12>
%x = and <2 x i64> %a2, %b
ret <2 x i64> %x
}
@@ -186,7 +186,7 @@
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <2 x i64> %a, <i64 1, i64 1>
+ %a2 = add <2 x i64> %a, <i64 13, i64 13>
%b2 = xor <2 x i64> %b, <i64 -1, i64 -1>
%x = and <2 x i64> %a2, %b2
ret <2 x i64> %x
@@ -200,7 +200,7 @@
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <2 x i64> %a, <i64 1, i64 1>
+ %a2 = add <2 x i64> %a, <i64 14, i64 14>
%x = or <2 x i64> %a2, %b
ret <2 x i64> %x
}
@@ -213,7 +213,7 @@
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <2 x i64> %a, <i64 1, i64 1>
+ %a2 = add <2 x i64> %a, <i64 15, i64 15>
%x = xor <2 x i64> %a2, %b
ret <2 x i64> %x
}