[X86] Add test cases showing failure to fold a global variable address into the gather addressing mode when using the target specific intrinsics. NFC

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@351522 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/X86/avx2-gather.ll b/test/CodeGen/X86/avx2-gather.ll
index a7da2fc..9983e7d 100644
--- a/test/CodeGen/X86/avx2-gather.ll
+++ b/test/CodeGen/X86/avx2-gather.ll
@@ -145,3 +145,30 @@
   %res = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> zeroinitializer, i8* %arg0, <4 x i32> %arg1, <2 x double> %mask, i8 2)
   ret <2 x double> %res
 }
+
+@x = global [1024 x float] zeroinitializer, align 16
+
+define <4 x float> @gather_global(<4 x i64>, i32* nocapture readnone) {
+; X32-LABEL: gather_global:
+; X32:       # %bb.0:
+; X32-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; X32-NEXT:    movl $x, %eax
+; X32-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X32-NEXT:    vgatherqps %xmm2, (%eax,%ymm0,4), %xmm1
+; X32-NEXT:    vmovaps %xmm1, %xmm0
+; X32-NEXT:    vzeroupper
+; X32-NEXT:    retl
+;
+; X64-LABEL: gather_global:
+; X64:       # %bb.0:
+; X64-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; X64-NEXT:    movl $x, %eax
+; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X64-NEXT:    vgatherqps %xmm2, (%rax,%ymm0,4), %xmm1
+; X64-NEXT:    vmovaps %xmm1, %xmm0
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+  %3 = tail call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> zeroinitializer, i8* bitcast ([1024 x float]* @x to i8*), <4 x i64> %0, <4 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>, i8 4)
+  ret <4 x float> %3
+}
+declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float>, i8*, <4 x i64>, <4 x float>, i8)
diff --git a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
index 7228a12..038abca 100644
--- a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
+++ b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
@@ -875,6 +875,21 @@
   ret <16 x float> %res6
 }
 
+@x = global [1024 x float] zeroinitializer, align 16
+
+define <8 x float> @gather_global(<8 x i64>, i32* nocapture readnone) {
+; CHECK-LABEL: gather_global:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    kxnorw %k0, %k0, %k1
+; CHECK-NEXT:    movl $x, %eax
+; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT:    vgatherqps (%rax,%zmm0,4), %ymm1 {%k1}
+; CHECK-NEXT:    vmovaps %ymm1, %ymm0
+; CHECK-NEXT:    retq
+  %3 = tail call <8 x float> @llvm.x86.avx512.mask.gather.qps.512(<8 x float> zeroinitializer, i8* bitcast ([1024 x float]* @x to i8*), <8 x i64> %0, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i32 4)
+  ret <8 x float> %3
+}
+
 declare <16 x float> @llvm.x86.avx512.mask.gather.dps.512(<16 x float>, i8*, <16 x i32>, <16 x i1>, i32)
 declare <8 x double> @llvm.x86.avx512.mask.gather.dpd.512(<8 x double>, i8*, <8 x i32>, <8 x i1>, i32)
 declare <8 x float> @llvm.x86.avx512.mask.gather.qps.512(<8 x float>, i8*, <8 x i64>, <8 x i1>, i32)