[X86] Remove tests for non-existant intrinsics. NFC (#196237)

There is no PSRAQ instruction until AVX512. The incorrect intrinsic
names were just being interpreted as a call to an external functional.
diff --git a/llvm/test/CodeGen/X86/blend-of-shift.ll b/llvm/test/CodeGen/X86/blend-of-shift.ll
index cf382c7..b824e8b 100644
--- a/llvm/test/CodeGen/X86/blend-of-shift.ll
+++ b/llvm/test/CodeGen/X86/blend-of-shift.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2,X64,X64-SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX2,X64,X64-AVX2
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2,X86,X86-SSE2
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX2,X86,X86-AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
 
 ;------------------------------ 32-bit shuffles -------------------------------;
 
@@ -178,77 +178,6 @@
   %i5 = shufflevector <4 x i32> %i3, <4 x i32> %i4, <4 x i32> <i32 7, i32 6, i32 1, i32 0>
   ret <4 x i32> %i5
 }
-define <4 x i32> @shuffle_i32_of_ashr_i64(<2 x i64> %x, <2 x i64> %y) nounwind {
-; X64-SSE2-LABEL: shuffle_i32_of_ashr_i64:
-; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    subq $40, %rsp
-; X64-SSE2-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-SSE2-NEXT:    movl $63, %edi
-; X64-SSE2-NEXT:    callq llvm.x86.sse2.psrai.q@PLT
-; X64-SSE2-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; X64-SSE2-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-SSE2-NEXT:    movl $63, %edi
-; X64-SSE2-NEXT:    callq llvm.x86.sse2.psrai.q@PLT
-; X64-SSE2-NEXT:    shufps $27, (%rsp), %xmm0 # 16-byte Folded Reload
-; X64-SSE2-NEXT:    # xmm0 = xmm0[3,2],mem[1,0]
-; X64-SSE2-NEXT:    addq $40, %rsp
-; X64-SSE2-NEXT:    retq
-;
-; X64-AVX2-LABEL: shuffle_i32_of_ashr_i64:
-; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    subq $40, %rsp
-; X64-AVX2-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX2-NEXT:    movl $63, %edi
-; X64-AVX2-NEXT:    callq llvm.x86.sse2.psrai.q@PLT
-; X64-AVX2-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX2-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX2-NEXT:    movl $63, %edi
-; X64-AVX2-NEXT:    callq llvm.x86.sse2.psrai.q@PLT
-; X64-AVX2-NEXT:    vshufps $27, (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX2-NEXT:    # xmm0 = xmm0[3,2],mem[1,0]
-; X64-AVX2-NEXT:    addq $40, %rsp
-; X64-AVX2-NEXT:    retq
-;
-; X86-SSE2-LABEL: shuffle_i32_of_ashr_i64:
-; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    subl $32, %esp
-; X86-SSE2-NEXT:    movups %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-SSE2-NEXT:    pushl $63
-; X86-SSE2-NEXT:    calll llvm.x86.sse2.psrai.q@PLT
-; X86-SSE2-NEXT:    addl $4, %esp
-; X86-SSE2-NEXT:    movups %xmm0, (%esp) # 16-byte Spill
-; X86-SSE2-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-SSE2-NEXT:    pushl $63
-; X86-SSE2-NEXT:    calll llvm.x86.sse2.psrai.q@PLT
-; X86-SSE2-NEXT:    addl $4, %esp
-; X86-SSE2-NEXT:    movups (%esp), %xmm1 # 16-byte Reload
-; X86-SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,2],xmm1[1,0]
-; X86-SSE2-NEXT:    addl $32, %esp
-; X86-SSE2-NEXT:    retl
-;
-; X86-AVX2-LABEL: shuffle_i32_of_ashr_i64:
-; X86-AVX2:       # %bb.0:
-; X86-AVX2-NEXT:    subl $32, %esp
-; X86-AVX2-NEXT:    vmovups %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX2-NEXT:    pushl $63
-; X86-AVX2-NEXT:    calll llvm.x86.sse2.psrai.q@PLT
-; X86-AVX2-NEXT:    addl $4, %esp
-; X86-AVX2-NEXT:    vmovups %xmm0, (%esp) # 16-byte Spill
-; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX2-NEXT:    pushl $63
-; X86-AVX2-NEXT:    calll llvm.x86.sse2.psrai.q@PLT
-; X86-AVX2-NEXT:    addl $4, %esp
-; X86-AVX2-NEXT:    vshufps $27, (%esp), %xmm0, %xmm0 # 16-byte Folded Reload
-; X86-AVX2-NEXT:    # xmm0 = xmm0[3,2],mem[1,0]
-; X86-AVX2-NEXT:    addl $32, %esp
-; X86-AVX2-NEXT:    retl
-  %i1 = tail call <2 x i64> @llvm.x86.sse2.psrai.q(<2 x i64> %x, i32 63)
-  %i2 = tail call <2 x i64> @llvm.x86.sse2.psrai.q(<2 x i64> %y, i32 63)
-  %i3 = bitcast <2 x i64> %i1 to <4 x i32>
-  %i4 = bitcast <2 x i64> %i2 to <4 x i32>
-  %i5 = shufflevector <4 x i32> %i3, <4 x i32> %i4, <4 x i32> <i32 7, i32 6, i32 1, i32 0>
-  ret <4 x i32> %i5
-}
 
 ;------------------------------ 64-bit shuffles -------------------------------;
 
@@ -430,88 +359,3 @@
   %i5 = shufflevector <2 x i64> %i3, <2 x i64> %i4, <2 x i32> <i32 3, i32 0>
   ret <2 x i64> %i5
 }
-define <2 x i64> @shuffle_i64_of_ashr_i64(<2 x i64> %x, <2 x i64> %y) nounwind {
-; X64-SSE2-LABEL: shuffle_i64_of_ashr_i64:
-; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    subq $40, %rsp
-; X64-SSE2-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-SSE2-NEXT:    movl $63, %edi
-; X64-SSE2-NEXT:    callq llvm.x86.sse2.psrai.q@PLT
-; X64-SSE2-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; X64-SSE2-NEXT:    movapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-SSE2-NEXT:    movl $63, %edi
-; X64-SSE2-NEXT:    callq llvm.x86.sse2.psrai.q@PLT
-; X64-SSE2-NEXT:    shufpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
-; X64-SSE2-NEXT:    # xmm0 = xmm0[1],mem[0]
-; X64-SSE2-NEXT:    addq $40, %rsp
-; X64-SSE2-NEXT:    retq
-;
-; X64-AVX2-LABEL: shuffle_i64_of_ashr_i64:
-; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    subq $40, %rsp
-; X64-AVX2-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX2-NEXT:    movl $63, %edi
-; X64-AVX2-NEXT:    callq llvm.x86.sse2.psrai.q@PLT
-; X64-AVX2-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX2-NEXT:    movl $63, %edi
-; X64-AVX2-NEXT:    callq llvm.x86.sse2.psrai.q@PLT
-; X64-AVX2-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
-; X64-AVX2-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
-; X64-AVX2-NEXT:    addq $40, %rsp
-; X64-AVX2-NEXT:    retq
-;
-; X86-SSE2-LABEL: shuffle_i64_of_ashr_i64:
-; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    subl $32, %esp
-; X86-SSE2-NEXT:    movups %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-SSE2-NEXT:    pushl $63
-; X86-SSE2-NEXT:    calll llvm.x86.sse2.psrai.q@PLT
-; X86-SSE2-NEXT:    addl $4, %esp
-; X86-SSE2-NEXT:    movups %xmm0, (%esp) # 16-byte Spill
-; X86-SSE2-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-SSE2-NEXT:    pushl $63
-; X86-SSE2-NEXT:    calll llvm.x86.sse2.psrai.q@PLT
-; X86-SSE2-NEXT:    addl $4, %esp
-; X86-SSE2-NEXT:    movups (%esp), %xmm1 # 16-byte Reload
-; X86-SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm1[0,1]
-; X86-SSE2-NEXT:    addl $32, %esp
-; X86-SSE2-NEXT:    retl
-;
-; X86-AVX2-LABEL: shuffle_i64_of_ashr_i64:
-; X86-AVX2:       # %bb.0:
-; X86-AVX2-NEXT:    subl $32, %esp
-; X86-AVX2-NEXT:    vmovups %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX2-NEXT:    pushl $63
-; X86-AVX2-NEXT:    calll llvm.x86.sse2.psrai.q@PLT
-; X86-AVX2-NEXT:    addl $4, %esp
-; X86-AVX2-NEXT:    vmovups %xmm0, (%esp) # 16-byte Spill
-; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX2-NEXT:    pushl $63
-; X86-AVX2-NEXT:    calll llvm.x86.sse2.psrai.q@PLT
-; X86-AVX2-NEXT:    addl $4, %esp
-; X86-AVX2-NEXT:    vmovdqu (%esp), %xmm1 # 16-byte Reload
-; X86-AVX2-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
-; X86-AVX2-NEXT:    addl $32, %esp
-; X86-AVX2-NEXT:    retl
-  %i1 = tail call <2 x i64> @llvm.x86.sse2.psrai.q(<2 x i64> %x, i32 63)
-  %i2 = tail call <2 x i64> @llvm.x86.sse2.psrai.q(<2 x i64> %y, i32 63)
-  %i3 = bitcast <2 x i64> %i1 to <2 x i64>
-  %i4 = bitcast <2 x i64> %i2 to <2 x i64>
-  %i5 = shufflevector <2 x i64> %i3, <2 x i64> %i4, <2 x i32> <i32 3, i32 0>
-  ret <2 x i64> %i5
-}
-
-declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32)
-declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32)
-declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32)
-declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32)
-declare <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32>, i32)
-declare <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32>, i32)
-declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32)
-declare <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64>, i32)
-declare <2 x i64> @llvm.x86.sse2.psrai.q(<2 x i64>, i32) ; does not exist
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK: {{.*}}
-; X64: {{.*}}
-; X86: {{.*}}
diff --git a/llvm/test/CodeGen/X86/shuffle-of-shift.ll b/llvm/test/CodeGen/X86/shuffle-of-shift.ll
index e2dc74d..555c4cd 100644
--- a/llvm/test/CodeGen/X86/shuffle-of-shift.ll
+++ b/llvm/test/CodeGen/X86/shuffle-of-shift.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2,X64,X64-SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX2,X64,X64-AVX2
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2,X86,X86-SSE2
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX2,X86,X86-AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
 
 ;------------------------------ 32-bit shuffles -------------------------------;
 
@@ -141,45 +141,6 @@
   %i3 = shufflevector <4 x i32> %i2, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
   ret <4 x i32> %i3
 }
-define <4 x i32> @shuffle_i32_of_ashr_i64(<2 x i64> %x) nounwind {
-; X64-SSE2-LABEL: shuffle_i32_of_ashr_i64:
-; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pushq %rax
-; X64-SSE2-NEXT:    movl $63, %edi
-; X64-SSE2-NEXT:    callq llvm.x86.sse2.psrai.q@PLT
-; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; X64-SSE2-NEXT:    popq %rax
-; X64-SSE2-NEXT:    retq
-;
-; X64-AVX2-LABEL: shuffle_i32_of_ashr_i64:
-; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    pushq %rax
-; X64-AVX2-NEXT:    movl $63, %edi
-; X64-AVX2-NEXT:    callq llvm.x86.sse2.psrai.q@PLT
-; X64-AVX2-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; X64-AVX2-NEXT:    popq %rax
-; X64-AVX2-NEXT:    retq
-;
-; X86-SSE2-LABEL: shuffle_i32_of_ashr_i64:
-; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pushl $63
-; X86-SSE2-NEXT:    calll llvm.x86.sse2.psrai.q@PLT
-; X86-SSE2-NEXT:    addl $4, %esp
-; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; X86-SSE2-NEXT:    retl
-;
-; X86-AVX2-LABEL: shuffle_i32_of_ashr_i64:
-; X86-AVX2:       # %bb.0:
-; X86-AVX2-NEXT:    pushl $63
-; X86-AVX2-NEXT:    calll llvm.x86.sse2.psrai.q@PLT
-; X86-AVX2-NEXT:    addl $4, %esp
-; X86-AVX2-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; X86-AVX2-NEXT:    retl
-  %i1 = tail call <2 x i64> @llvm.x86.sse2.psrai.q(<2 x i64> %x, i32 63)
-  %i2 = bitcast <2 x i64> %i1 to <4 x i32>
-  %i3 = shufflevector <4 x i32> %i2, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-  ret <4 x i32> %i3
-}
 
 ;------------------------------ 64-bit shuffles -------------------------------;
 
@@ -321,56 +282,3 @@
   %i3 = shufflevector <2 x i64> %i2, <2 x i64> poison, <2 x i32> <i32 1, i32 0>
   ret <2 x i64> %i3
 }
-define <2 x i64> @shuffle_i64_of_ashr_i64(<2 x i64> %x) nounwind {
-; X64-SSE2-LABEL: shuffle_i64_of_ashr_i64:
-; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pushq %rax
-; X64-SSE2-NEXT:    movl $63, %edi
-; X64-SSE2-NEXT:    callq llvm.x86.sse2.psrai.q@PLT
-; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X64-SSE2-NEXT:    popq %rax
-; X64-SSE2-NEXT:    retq
-;
-; X64-AVX2-LABEL: shuffle_i64_of_ashr_i64:
-; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    pushq %rax
-; X64-AVX2-NEXT:    movl $63, %edi
-; X64-AVX2-NEXT:    callq llvm.x86.sse2.psrai.q@PLT
-; X64-AVX2-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X64-AVX2-NEXT:    popq %rax
-; X64-AVX2-NEXT:    retq
-;
-; X86-SSE2-LABEL: shuffle_i64_of_ashr_i64:
-; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pushl $63
-; X86-SSE2-NEXT:    calll llvm.x86.sse2.psrai.q@PLT
-; X86-SSE2-NEXT:    addl $4, %esp
-; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X86-SSE2-NEXT:    retl
-;
-; X86-AVX2-LABEL: shuffle_i64_of_ashr_i64:
-; X86-AVX2:       # %bb.0:
-; X86-AVX2-NEXT:    pushl $63
-; X86-AVX2-NEXT:    calll llvm.x86.sse2.psrai.q@PLT
-; X86-AVX2-NEXT:    addl $4, %esp
-; X86-AVX2-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X86-AVX2-NEXT:    retl
-  %i1 = tail call <2 x i64> @llvm.x86.sse2.psrai.q(<2 x i64> %x, i32 63)
-  %i2 = bitcast <2 x i64> %i1 to <2 x i64>
-  %i3 = shufflevector <2 x i64> %i2, <2 x i64> poison, <2 x i32> <i32 1, i32 0>
-  ret <2 x i64> %i3
-}
-
-declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32)
-declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32)
-declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32)
-declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32)
-declare <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32>, i32)
-declare <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32>, i32)
-declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32)
-declare <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64>, i32)
-declare <2 x i64> @llvm.x86.sse2.psrai.q(<2 x i64>, i32) ; does not exist
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK: {{.*}}
-; X64: {{.*}}
-; X86: {{.*}}