blob: 6fd1a35505aac5f5b01ab116d1274c47dba18a42 [file] [log] [blame] [edit]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx | FileCheck %s --check-prefixes=AVX,AVX1
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx512f | FileCheck %s --check-prefixes=AVX,AVX512
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx512dq,avx512vl | FileCheck %s --check-prefixes=AVX512DQ
define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) nounwind {
; X86-LABEL: llrint_v1i64_v1f32:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp
; X86-NEXT: flds 8(%ebp)
; X86-NEXT: fistpll (%esp)
; X86-NEXT: movl (%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; SSE-LABEL: llrint_v1i64_v1f32:
; SSE: # %bb.0:
; SSE-NEXT: cvtss2si %xmm0, %rax
; SSE-NEXT: retq
;
; AVX-LABEL: llrint_v1i64_v1f32:
; AVX: # %bb.0:
; AVX-NEXT: vcvtss2si %xmm0, %rax
; AVX-NEXT: retq
;
; AVX512DQ-LABEL: llrint_v1i64_v1f32:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvtss2si %xmm0, %rax
; AVX512DQ-NEXT: retq
%a = call <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float> %x)
ret <1 x i64> %a
}
declare <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float>)
define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) nounwind {
; X86-LABEL: llrint_v2i64_v2f32:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $16, %esp
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: flds 16(%ebp)
; X86-NEXT: flds 12(%ebp)
; X86-NEXT: fistpll (%esp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: movl (%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%eax)
; X86-NEXT: movl %esi, 8(%eax)
; X86-NEXT: movl %edx, 4(%eax)
; X86-NEXT: movl %ecx, (%eax)
; X86-NEXT: leal -8(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebp
; X86-NEXT: retl $4
;
; SSE-LABEL: llrint_v2i64_v2f32:
; SSE: # %bb.0:
; SSE-NEXT: cvtss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
; SSE-NEXT: cvtss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: llrint_v2i64_v2f32:
; AVX: # %bb.0:
; AVX-NEXT: vcvtss2si %xmm0, %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX-NEXT: vcvtss2si %xmm0, %rax
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: retq
;
; AVX512DQ-LABEL: llrint_v2i64_v2f32:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvtps2qq %xmm0, %xmm0
; AVX512DQ-NEXT: retq
%a = call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> %x)
ret <2 x i64> %a
}
declare <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float>)
define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) nounwind {
; X86-LABEL: llrint_v4i64_v4f32:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $56, %esp
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: flds 24(%ebp)
; X86-NEXT: flds 20(%ebp)
; X86-NEXT: flds 16(%ebp)
; X86-NEXT: flds 12(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl %esi, 28(%eax)
; X86-NEXT: movl %ecx, 24(%eax)
; X86-NEXT: movl %edx, 20(%eax)
; X86-NEXT: movl %ebx, 16(%eax)
; X86-NEXT: movl %edi, 12(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 8(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 4(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, (%eax)
; X86-NEXT: leal -12(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl $4
;
; SSE-LABEL: llrint_v4i64_v4f32:
; SSE: # %bb.0:
; SSE-NEXT: cvtss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
; SSE-NEXT: cvtss2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
; SSE-NEXT: cvtss2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm3
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: cvtss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: llrint_v4i64_v4f32:
; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[3,3,3,3]
; AVX1-NEXT: vcvtss2si %xmm1, %rax
; AVX1-NEXT: vmovq %rax, %xmm1
; AVX1-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX1-NEXT: vcvtss2si %xmm2, %rax
; AVX1-NEXT: vmovq %rax, %xmm2
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX1-NEXT: vcvtss2si %xmm0, %rax
; AVX1-NEXT: vmovq %rax, %xmm2
; AVX1-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX1-NEXT: vcvtss2si %xmm0, %rax
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX512-LABEL: llrint_v4i64_v4f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vshufps {{.*#+}} xmm1 = xmm0[3,3,3,3]
; AVX512-NEXT: vcvtss2si %xmm1, %rax
; AVX512-NEXT: vmovq %rax, %xmm1
; AVX512-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vcvtss2si %xmm2, %rax
; AVX512-NEXT: vmovq %rax, %xmm2
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512-NEXT: vcvtss2si %xmm0, %rax
; AVX512-NEXT: vmovq %rax, %xmm2
; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX512-NEXT: vcvtss2si %xmm0, %rax
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512DQ-LABEL: llrint_v4i64_v4f32:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvtps2qq %xmm0, %ymm0
; AVX512DQ-NEXT: retq
%a = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> %x)
ret <4 x i64> %a
}
declare <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float>)
define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) nounwind {
; X86-LABEL: llrint_v8i64_v8f32:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $120, %esp
; X86-NEXT: flds 12(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 16(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 20(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 24(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 28(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 32(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 36(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 40(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X86-NEXT: movl %ebx, 60(%eax)
; X86-NEXT: movl %ecx, 56(%eax)
; X86-NEXT: movl %edx, 52(%eax)
; X86-NEXT: movl %esi, 48(%eax)
; X86-NEXT: movl %edi, 44(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 40(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 36(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 32(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 28(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 24(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 20(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 16(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 12(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 8(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 4(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, (%eax)
; X86-NEXT: leal -12(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl $4
;
; SSE-LABEL: llrint_v8i64_v8f32:
; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: cvtss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm2[1,1]
; SSE-NEXT: cvtss2si %xmm3, %rax
; SSE-NEXT: movq %rax, %xmm3
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3],xmm2[3,3]
; SSE-NEXT: cvtss2si %xmm3, %rax
; SSE-NEXT: movq %rax, %xmm3
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: cvtss2si %xmm2, %rax
; SSE-NEXT: movq %rax, %xmm4
; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0]
; SSE-NEXT: cvtss2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: movaps %xmm1, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm1[1,1]
; SSE-NEXT: cvtss2si %xmm3, %rax
; SSE-NEXT: movq %rax, %xmm3
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE-NEXT: movaps %xmm1, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3],xmm1[3,3]
; SSE-NEXT: cvtss2si %xmm3, %rax
; SSE-NEXT: movq %rax, %xmm5
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: cvtss2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm3
; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
; SSE-NEXT: movdqa %xmm4, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: llrint_v8i64_v8f32:
; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[3,3,3,3]
; AVX1-NEXT: vcvtss2si %xmm1, %rax
; AVX1-NEXT: vmovq %rax, %xmm1
; AVX1-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX1-NEXT: vcvtss2si %xmm2, %rax
; AVX1-NEXT: vmovq %rax, %xmm2
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX1-NEXT: vcvtss2si %xmm0, %rax
; AVX1-NEXT: vmovq %rax, %xmm2
; AVX1-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX1-NEXT: vcvtss2si %xmm3, %rax
; AVX1-NEXT: vmovq %rax, %xmm3
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[3,3,3,3]
; AVX1-NEXT: vcvtss2si %xmm1, %rax
; AVX1-NEXT: vmovq %rax, %xmm1
; AVX1-NEXT: vshufpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX1-NEXT: vcvtss2si %xmm3, %rax
; AVX1-NEXT: vmovq %rax, %xmm3
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0]
; AVX1-NEXT: vcvtss2si %xmm0, %rax
; AVX1-NEXT: vmovq %rax, %xmm3
; AVX1-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX1-NEXT: vcvtss2si %xmm0, %rax
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm3[0],xmm0[0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX1-NEXT: vmovaps %ymm2, %ymm0
; AVX1-NEXT: retq
;
; AVX512-LABEL: llrint_v8i64_v8f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vshufps {{.*#+}} xmm2 = xmm1[3,3,3,3]
; AVX512-NEXT: vcvtss2si %xmm2, %rax
; AVX512-NEXT: vmovq %rax, %xmm2
; AVX512-NEXT: vshufpd {{.*#+}} xmm3 = xmm1[1,0]
; AVX512-NEXT: vcvtss2si %xmm3, %rax
; AVX512-NEXT: vmovq %rax, %xmm3
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX512-NEXT: vcvtss2si %xmm1, %rax
; AVX512-NEXT: vmovq %rax, %xmm3
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
; AVX512-NEXT: vcvtss2si %xmm1, %rax
; AVX512-NEXT: vmovq %rax, %xmm1
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0]
; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512-NEXT: vshufps {{.*#+}} xmm2 = xmm0[3,3,3,3]
; AVX512-NEXT: vcvtss2si %xmm2, %rax
; AVX512-NEXT: vmovq %rax, %xmm2
; AVX512-NEXT: vshufpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX512-NEXT: vcvtss2si %xmm3, %rax
; AVX512-NEXT: vmovq %rax, %xmm3
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX512-NEXT: vcvtss2si %xmm0, %rax
; AVX512-NEXT: vmovq %rax, %xmm3
; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX512-NEXT: vcvtss2si %xmm0, %rax
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm3[0],xmm0[0]
; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512-NEXT: retq
;
; AVX512DQ-LABEL: llrint_v8i64_v8f32:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvtps2qq %ymm0, %zmm0
; AVX512DQ-NEXT: retq
%a = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> %x)
ret <8 x i64> %a
}
declare <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float>)
define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) nounwind {
; X86-LABEL: llrint_v16i64_v16f32:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $248, %esp
; X86-NEXT: flds 12(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 16(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 20(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 24(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 28(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 32(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 36(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 40(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 44(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 48(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 52(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 56(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 60(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 64(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 68(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: flds 72(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X86-NEXT: movl %ebx, 124(%eax)
; X86-NEXT: movl %ecx, 120(%eax)
; X86-NEXT: movl %edx, 116(%eax)
; X86-NEXT: movl %esi, 112(%eax)
; X86-NEXT: movl %edi, 108(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 104(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 100(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 96(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 92(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 88(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 84(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 80(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 76(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 72(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 68(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 64(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 60(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 56(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 52(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 48(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 44(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 40(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 36(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 32(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 28(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 24(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 20(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 16(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 12(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 8(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 4(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, (%eax)
; X86-NEXT: leal -12(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl $4
;
; SSE-LABEL: llrint_v16i64_v16f32:
; SSE: # %bb.0:
; SSE-NEXT: movq %rdi, %rax
; SSE-NEXT: cvtss2si %xmm0, %rcx
; SSE-NEXT: movq %rcx, %xmm4
; SSE-NEXT: movaps %xmm0, %xmm5
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm0[1,1]
; SSE-NEXT: cvtss2si %xmm5, %rcx
; SSE-NEXT: movq %rcx, %xmm5
; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
; SSE-NEXT: movaps %xmm0, %xmm5
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,3],xmm0[3,3]
; SSE-NEXT: cvtss2si %xmm5, %rcx
; SSE-NEXT: movq %rcx, %xmm5
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: cvtss2si %xmm0, %rcx
; SSE-NEXT: movq %rcx, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
; SSE-NEXT: cvtss2si %xmm1, %rcx
; SSE-NEXT: movq %rcx, %xmm5
; SSE-NEXT: movaps %xmm1, %xmm6
; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,1],xmm1[1,1]
; SSE-NEXT: cvtss2si %xmm6, %rcx
; SSE-NEXT: movq %rcx, %xmm6
; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0]
; SSE-NEXT: movaps %xmm1, %xmm6
; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,3],xmm1[3,3]
; SSE-NEXT: cvtss2si %xmm6, %rcx
; SSE-NEXT: movq %rcx, %xmm6
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: cvtss2si %xmm1, %rcx
; SSE-NEXT: movq %rcx, %xmm1
; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm6[0]
; SSE-NEXT: cvtss2si %xmm2, %rcx
; SSE-NEXT: movq %rcx, %xmm6
; SSE-NEXT: movaps %xmm2, %xmm7
; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,1],xmm2[1,1]
; SSE-NEXT: cvtss2si %xmm7, %rcx
; SSE-NEXT: movq %rcx, %xmm7
; SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0]
; SSE-NEXT: movaps %xmm2, %xmm7
; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,3],xmm2[3,3]
; SSE-NEXT: cvtss2si %xmm7, %rcx
; SSE-NEXT: movq %rcx, %xmm7
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: cvtss2si %xmm2, %rcx
; SSE-NEXT: movq %rcx, %xmm2
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm7[0]
; SSE-NEXT: cvtss2si %xmm3, %rcx
; SSE-NEXT: movq %rcx, %xmm7
; SSE-NEXT: movaps %xmm3, %xmm8
; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,1],xmm3[1,1]
; SSE-NEXT: cvtss2si %xmm8, %rcx
; SSE-NEXT: movq %rcx, %xmm8
; SSE-NEXT: punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm8[0]
; SSE-NEXT: movaps %xmm3, %xmm8
; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[3,3],xmm3[3,3]
; SSE-NEXT: cvtss2si %xmm8, %rcx
; SSE-NEXT: movq %rcx, %xmm8
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
; SSE-NEXT: cvtss2si %xmm3, %rcx
; SSE-NEXT: movq %rcx, %xmm3
; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm8[0]
; SSE-NEXT: movdqa %xmm3, 112(%rdi)
; SSE-NEXT: movdqa %xmm7, 96(%rdi)
; SSE-NEXT: movdqa %xmm2, 80(%rdi)
; SSE-NEXT: movdqa %xmm6, 64(%rdi)
; SSE-NEXT: movdqa %xmm1, 48(%rdi)
; SSE-NEXT: movdqa %xmm5, 32(%rdi)
; SSE-NEXT: movdqa %xmm0, 16(%rdi)
; SSE-NEXT: movdqa %xmm4, (%rdi)
; SSE-NEXT: retq
;
; AVX1-LABEL: llrint_v16i64_v16f32:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps %ymm0, %ymm2
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm2[3,3,3,3]
; AVX1-NEXT: vcvtss2si %xmm0, %rax
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vshufpd {{.*#+}} xmm3 = xmm2[1,0]
; AVX1-NEXT: vcvtss2si %xmm3, %rax
; AVX1-NEXT: vmovq %rax, %xmm3
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm3[0],xmm0[0]
; AVX1-NEXT: vcvtss2si %xmm2, %rax
; AVX1-NEXT: vmovq %rax, %xmm3
; AVX1-NEXT: vmovshdup {{.*#+}} xmm4 = xmm2[1,1,3,3]
; AVX1-NEXT: vcvtss2si %xmm4, %rax
; AVX1-NEXT: vmovq %rax, %xmm4
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm2[3,3,3,3]
; AVX1-NEXT: vcvtss2si %xmm3, %rax
; AVX1-NEXT: vmovq %rax, %xmm3
; AVX1-NEXT: vshufpd {{.*#+}} xmm4 = xmm2[1,0]
; AVX1-NEXT: vcvtss2si %xmm4, %rax
; AVX1-NEXT: vmovq %rax, %xmm4
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
; AVX1-NEXT: vcvtss2si %xmm2, %rax
; AVX1-NEXT: vmovq %rax, %xmm4
; AVX1-NEXT: vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3]
; AVX1-NEXT: vcvtss2si %xmm2, %rax
; AVX1-NEXT: vmovq %rax, %xmm2
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm4[0],xmm2[0]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm4
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm1[3,3,3,3]
; AVX1-NEXT: vcvtss2si %xmm2, %rax
; AVX1-NEXT: vmovq %rax, %xmm2
; AVX1-NEXT: vshufpd {{.*#+}} xmm3 = xmm1[1,0]
; AVX1-NEXT: vcvtss2si %xmm3, %rax
; AVX1-NEXT: vmovq %rax, %xmm3
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX1-NEXT: vcvtss2si %xmm1, %rax
; AVX1-NEXT: vmovq %rax, %xmm3
; AVX1-NEXT: vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
; AVX1-NEXT: vcvtss2si %xmm5, %rax
; AVX1-NEXT: vmovq %rax, %xmm5
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm1[3,3,3,3]
; AVX1-NEXT: vcvtss2si %xmm3, %rax
; AVX1-NEXT: vmovq %rax, %xmm3
; AVX1-NEXT: vshufpd {{.*#+}} xmm5 = xmm1[1,0]
; AVX1-NEXT: vcvtss2si %xmm5, %rax
; AVX1-NEXT: vmovq %rax, %xmm5
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm5[0],xmm3[0]
; AVX1-NEXT: vcvtss2si %xmm1, %rax
; AVX1-NEXT: vmovq %rax, %xmm5
; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
; AVX1-NEXT: vcvtss2si %xmm1, %rax
; AVX1-NEXT: vmovq %rax, %xmm1
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm1[0]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm3
; AVX1-NEXT: vmovaps %ymm4, %ymm1
; AVX1-NEXT: retq
;
; AVX512-LABEL: llrint_v16i64_v16f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vshufps {{.*#+}} xmm2 = xmm1[3,3,3,3]
; AVX512-NEXT: vcvtss2si %xmm2, %rax
; AVX512-NEXT: vmovq %rax, %xmm2
; AVX512-NEXT: vshufpd {{.*#+}} xmm3 = xmm1[1,0]
; AVX512-NEXT: vcvtss2si %xmm3, %rax
; AVX512-NEXT: vmovq %rax, %xmm3
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX512-NEXT: vcvtss2si %xmm1, %rax
; AVX512-NEXT: vmovq %rax, %xmm3
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
; AVX512-NEXT: vcvtss2si %xmm1, %rax
; AVX512-NEXT: vmovq %rax, %xmm1
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0]
; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512-NEXT: vshufps {{.*#+}} xmm2 = xmm0[3,3,3,3]
; AVX512-NEXT: vcvtss2si %xmm2, %rax
; AVX512-NEXT: vmovq %rax, %xmm2
; AVX512-NEXT: vshufpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX512-NEXT: vcvtss2si %xmm3, %rax
; AVX512-NEXT: vmovq %rax, %xmm3
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX512-NEXT: vcvtss2si %xmm0, %rax
; AVX512-NEXT: vmovq %rax, %xmm3
; AVX512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
; AVX512-NEXT: vcvtss2si %xmm4, %rax
; AVX512-NEXT: vmovq %rax, %xmm4
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm2
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vshufps {{.*#+}} xmm3 = xmm1[3,3,3,3]
; AVX512-NEXT: vcvtss2si %xmm3, %rax
; AVX512-NEXT: vmovq %rax, %xmm3
; AVX512-NEXT: vshufpd {{.*#+}} xmm4 = xmm1[1,0]
; AVX512-NEXT: vcvtss2si %xmm4, %rax
; AVX512-NEXT: vmovq %rax, %xmm4
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
; AVX512-NEXT: vcvtss2si %xmm1, %rax
; AVX512-NEXT: vmovq %rax, %xmm4
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
; AVX512-NEXT: vcvtss2si %xmm1, %rax
; AVX512-NEXT: vmovq %rax, %xmm1
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm4[0],xmm1[0]
; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
; AVX512-NEXT: vshufps {{.*#+}} xmm3 = xmm0[3,3,3,3]
; AVX512-NEXT: vcvtss2si %xmm3, %rax
; AVX512-NEXT: vmovq %rax, %xmm3
; AVX512-NEXT: vshufpd {{.*#+}} xmm4 = xmm0[1,0]
; AVX512-NEXT: vcvtss2si %xmm4, %rax
; AVX512-NEXT: vmovq %rax, %xmm4
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
; AVX512-NEXT: vcvtss2si %xmm0, %rax
; AVX512-NEXT: vmovq %rax, %xmm4
; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX512-NEXT: vcvtss2si %xmm0, %rax
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm4[0],xmm0[0]
; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0
; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm1
; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512-NEXT: retq
;
; AVX512DQ-LABEL: llrint_v16i64_v16f32:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvtps2qq %ymm0, %zmm2
; AVX512DQ-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; AVX512DQ-NEXT: vcvtps2qq %ymm0, %zmm1
; AVX512DQ-NEXT: vmovaps %zmm2, %zmm0
; AVX512DQ-NEXT: retq
%a = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> %x)
ret <16 x i64> %a
}
declare <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float>)
define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) nounwind {
; X86-LABEL: llrint_v1i64_v1f64:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp
; X86-NEXT: fldl 8(%ebp)
; X86-NEXT: fistpll (%esp)
; X86-NEXT: movl (%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; SSE-LABEL: llrint_v1i64_v1f64:
; SSE: # %bb.0:
; SSE-NEXT: cvtsd2si %xmm0, %rax
; SSE-NEXT: retq
;
; AVX-LABEL: llrint_v1i64_v1f64:
; AVX: # %bb.0:
; AVX-NEXT: vcvtsd2si %xmm0, %rax
; AVX-NEXT: retq
;
; AVX512DQ-LABEL: llrint_v1i64_v1f64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvtsd2si %xmm0, %rax
; AVX512DQ-NEXT: retq
%a = call <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double> %x)
ret <1 x i64> %a
}
declare <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double>)
define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) nounwind {
; X86-LABEL: llrint_v2i64_v2f64:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $16, %esp
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: fldl 20(%ebp)
; X86-NEXT: fldl 12(%ebp)
; X86-NEXT: fistpll (%esp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: movl (%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%eax)
; X86-NEXT: movl %esi, 8(%eax)
; X86-NEXT: movl %edx, 4(%eax)
; X86-NEXT: movl %ecx, (%eax)
; X86-NEXT: leal -8(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebp
; X86-NEXT: retl $4
;
; SSE-LABEL: llrint_v2i64_v2f64:
; SSE: # %bb.0:
; SSE-NEXT: cvtsd2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: cvtsd2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: llrint_v2i64_v2f64:
; AVX: # %bb.0:
; AVX-NEXT: vcvtsd2si %xmm0, %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vcvtsd2si %xmm0, %rax
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: retq
;
; AVX512DQ-LABEL: llrint_v2i64_v2f64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvtpd2qq %xmm0, %xmm0
; AVX512DQ-NEXT: retq
%a = call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> %x)
ret <2 x i64> %a
}
declare <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double>)
define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) nounwind {
; X86-LABEL: llrint_v4i64_v4f64:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $56, %esp
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: fldl 36(%ebp)
; X86-NEXT: fldl 28(%ebp)
; X86-NEXT: fldl 20(%ebp)
; X86-NEXT: fldl 12(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl %esi, 28(%eax)
; X86-NEXT: movl %ecx, 24(%eax)
; X86-NEXT: movl %edx, 20(%eax)
; X86-NEXT: movl %ebx, 16(%eax)
; X86-NEXT: movl %edi, 12(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 8(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 4(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, (%eax)
; X86-NEXT: leal -12(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl $4
;
; SSE-LABEL: llrint_v4i64_v4f64:
; SSE: # %bb.0:
; SSE-NEXT: cvtsd2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: cvtsd2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
; SSE-NEXT: cvtsd2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm3
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: cvtsd2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: movdqa %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: llrint_v4i64_v4f64:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vcvtsd2si %xmm1, %rax
; AVX1-NEXT: vmovq %rax, %xmm2
; AVX1-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX1-NEXT: vcvtsd2si %xmm1, %rax
; AVX1-NEXT: vmovq %rax, %xmm1
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX1-NEXT: vcvtsd2si %xmm0, %rax
; AVX1-NEXT: vmovq %rax, %xmm2
; AVX1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vcvtsd2si %xmm0, %rax
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX512-LABEL: llrint_v4i64_v4f64:
; AVX512: # %bb.0:
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vcvtsd2si %xmm1, %rax
; AVX512-NEXT: vmovq %rax, %xmm2
; AVX512-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX512-NEXT: vcvtsd2si %xmm1, %rax
; AVX512-NEXT: vmovq %rax, %xmm1
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512-NEXT: vcvtsd2si %xmm0, %rax
; AVX512-NEXT: vmovq %rax, %xmm2
; AVX512-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vcvtsd2si %xmm0, %rax
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512DQ-LABEL: llrint_v4i64_v4f64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvtpd2qq %ymm0, %ymm0
; AVX512DQ-NEXT: retq
%a = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> %x)
ret <4 x i64> %a
}
declare <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double>)
define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) nounwind {
; X86-LABEL: llrint_v8i64_v8f64:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $120, %esp
; X86-NEXT: fldl 12(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fldl 20(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fldl 28(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fldl 36(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fldl 44(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fldl 52(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fldl 60(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: fldl 68(%ebp)
; X86-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X86-NEXT: movl %ebx, 60(%eax)
; X86-NEXT: movl %ecx, 56(%eax)
; X86-NEXT: movl %edx, 52(%eax)
; X86-NEXT: movl %esi, 48(%eax)
; X86-NEXT: movl %edi, 44(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 40(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 36(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 32(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 28(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 24(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 20(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 16(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 12(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 8(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, 4(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, (%eax)
; X86-NEXT: leal -12(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl $4
;
; SSE-LABEL: llrint_v8i64_v8f64:
; SSE: # %bb.0:
; SSE-NEXT: cvtsd2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm4
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: cvtsd2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
; SSE-NEXT: cvtsd2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm5
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: cvtsd2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm0[0]
; SSE-NEXT: cvtsd2si %xmm2, %rax
; SSE-NEXT: movq %rax, %xmm6
; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: cvtsd2si %xmm2, %rax
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm0[0]
; SSE-NEXT: cvtsd2si %xmm3, %rax
; SSE-NEXT: movq %rax, %xmm7
; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1]
; SSE-NEXT: cvtsd2si %xmm3, %rax
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm0[0]
; SSE-NEXT: movdqa %xmm4, %xmm0
; SSE-NEXT: movdqa %xmm5, %xmm1
; SSE-NEXT: movdqa %xmm6, %xmm2
; SSE-NEXT: movdqa %xmm7, %xmm3
; SSE-NEXT: retq
;
; AVX1-LABEL: llrint_v8i64_v8f64:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vcvtsd2si %xmm2, %rax
; AVX1-NEXT: vmovq %rax, %xmm3
; AVX1-NEXT: vshufpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX1-NEXT: vcvtsd2si %xmm2, %rax
; AVX1-NEXT: vmovq %rax, %xmm2
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX1-NEXT: vcvtsd2si %xmm0, %rax
; AVX1-NEXT: vmovq %rax, %xmm3
; AVX1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vcvtsd2si %xmm0, %rax
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm3[0],xmm0[0]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vcvtsd2si %xmm2, %rax
; AVX1-NEXT: vmovq %rax, %xmm3
; AVX1-NEXT: vshufpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX1-NEXT: vcvtsd2si %xmm2, %rax
; AVX1-NEXT: vmovq %rax, %xmm2
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX1-NEXT: vcvtsd2si %xmm1, %rax
; AVX1-NEXT: vmovq %rax, %xmm3
; AVX1-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX1-NEXT: vcvtsd2si %xmm1, %rax
; AVX1-NEXT: vmovq %rax, %xmm1
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX512-LABEL: llrint_v8i64_v8f64:
; AVX512: # %bb.0:
; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm1
; AVX512-NEXT: vcvtsd2si %xmm1, %rax
; AVX512-NEXT: vmovq %rax, %xmm2
; AVX512-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX512-NEXT: vcvtsd2si %xmm1, %rax
; AVX512-NEXT: vmovq %rax, %xmm1
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm2
; AVX512-NEXT: vcvtsd2si %xmm2, %rax
; AVX512-NEXT: vmovq %rax, %xmm3
; AVX512-NEXT: vshufpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX512-NEXT: vcvtsd2si %xmm2, %rax
; AVX512-NEXT: vmovq %rax, %xmm2
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX512-NEXT: vcvtsd2si %xmm2, %rax
; AVX512-NEXT: vmovq %rax, %xmm3
; AVX512-NEXT: vshufpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX512-NEXT: vcvtsd2si %xmm2, %rax
; AVX512-NEXT: vmovq %rax, %xmm2
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX512-NEXT: vcvtsd2si %xmm0, %rax
; AVX512-NEXT: vmovq %rax, %xmm3
; AVX512-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vcvtsd2si %xmm0, %rax
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm3[0],xmm0[0]
; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512-NEXT: retq
;
; AVX512DQ-LABEL: llrint_v8i64_v8f64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvtpd2qq %zmm0, %zmm0
; AVX512DQ-NEXT: retq
%a = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> %x)
ret <8 x i64> %a
}
declare <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double>)
define <1 x i64> @llrint_v1i64_v1f128(<1 x fp128> %x) nounwind {
; X86-LABEL: llrint_v1i64_v1f128:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-16, %esp
; X86-NEXT: subl $16, %esp
; X86-NEXT: pushl 20(%ebp)
; X86-NEXT: pushl 16(%ebp)
; X86-NEXT: pushl 12(%ebp)
; X86-NEXT: pushl 8(%ebp)
; X86-NEXT: calll llrintl
; X86-NEXT: addl $16, %esp
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; SSE-LABEL: llrint_v1i64_v1f128:
; SSE: # %bb.0:
; SSE-NEXT: pushq %rax
; SSE-NEXT: callq llrintl@PLT
; SSE-NEXT: popq %rcx
; SSE-NEXT: retq
;
; AVX-LABEL: llrint_v1i64_v1f128:
; AVX: # %bb.0:
; AVX-NEXT: pushq %rax
; AVX-NEXT: callq llrintl@PLT
; AVX-NEXT: popq %rcx
; AVX-NEXT: retq
;
; AVX512DQ-LABEL: llrint_v1i64_v1f128:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: pushq %rax
; AVX512DQ-NEXT: callq llrintl@PLT
; AVX512DQ-NEXT: popq %rcx
; AVX512DQ-NEXT: retq
%a = call <1 x i64> @llvm.llrint.v1i64.v1f128(<1 x fp128> %x)
ret <1 x i64> %a
}
declare <1 x i64> @llvm.llrint.v1i64.v1f128(<1 x fp128>)
define <2 x i64> @llrint_v2i64_v2f128(<2 x fp128> %x) nounwind {
; X86-LABEL: llrint_v2i64_v2f128:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: andl $-16, %esp
; X86-NEXT: subl $16, %esp
; X86-NEXT: movl 8(%ebp), %esi
; X86-NEXT: pushl 24(%ebp)
; X86-NEXT: pushl 20(%ebp)
; X86-NEXT: pushl 16(%ebp)
; X86-NEXT: pushl 12(%ebp)
; X86-NEXT: calll llrintl
; X86-NEXT: addl $16, %esp
; X86-NEXT: movl %eax, %edi
; X86-NEXT: movl %edx, %ebx
; X86-NEXT: pushl 40(%ebp)
; X86-NEXT: pushl 36(%ebp)
; X86-NEXT: pushl 32(%ebp)
; X86-NEXT: pushl 28(%ebp)
; X86-NEXT: calll llrintl
; X86-NEXT: addl $16, %esp
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, 8(%esi)
; X86-NEXT: movl %ebx, 4(%esi)
; X86-NEXT: movl %edi, (%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: leal -12(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl $4
;
; SSE-LABEL: llrint_v2i64_v2f128:
; SSE: # %bb.0:
; SSE-NEXT: subq $40, %rsp
; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: callq llrintl@PLT
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: callq llrintl@PLT
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: punpcklqdq (%rsp), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
; SSE-NEXT: addq $40, %rsp
; SSE-NEXT: retq
;
; AVX-LABEL: llrint_v2i64_v2f128:
; AVX: # %bb.0:
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: callq llrintl@PLT
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: callq llrintl@PLT
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: retq
;
; AVX512DQ-LABEL: llrint_v2i64_v2f128:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: subq $40, %rsp
; AVX512DQ-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps %xmm1, %xmm0
; AVX512DQ-NEXT: callq llrintl@PLT
; AVX512DQ-NEXT: vmovq %rax, %xmm0
; AVX512DQ-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512DQ-NEXT: callq llrintl@PLT
; AVX512DQ-NEXT: vmovq %rax, %xmm0
; AVX512DQ-NEXT: vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX512DQ-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX512DQ-NEXT: addq $40, %rsp
; AVX512DQ-NEXT: retq
%a = call <2 x i64> @llvm.llrint.v2i64.v2f128(<2 x fp128> %x)
ret <2 x i64> %a
}
declare <2 x i64> @llvm.llrint.v2i64.v2f128(<2 x fp128>)
define <4 x i64> @llrint_v4i64_v4f128(<4 x fp128> %x) nounwind {
; X86-LABEL: llrint_v4i64_v4f128:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: andl $-16, %esp
; X86-NEXT: subl $32, %esp
; X86-NEXT: movl 8(%ebp), %esi
; X86-NEXT: movl 36(%ebp), %edi
; X86-NEXT: movl 40(%ebp), %ebx
; X86-NEXT: pushl 24(%ebp)
; X86-NEXT: pushl 20(%ebp)
; X86-NEXT: pushl 16(%ebp)
; X86-NEXT: pushl 12(%ebp)
; X86-NEXT: calll llrintl
; X86-NEXT: addl $16, %esp
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl 32(%ebp)
; X86-NEXT: pushl 28(%ebp)
; X86-NEXT: calll llrintl
; X86-NEXT: addl $16, %esp
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: pushl 56(%ebp)
; X86-NEXT: pushl 52(%ebp)
; X86-NEXT: pushl 48(%ebp)
; X86-NEXT: pushl 44(%ebp)
; X86-NEXT: calll llrintl
; X86-NEXT: addl $16, %esp
; X86-NEXT: movl %eax, %edi
; X86-NEXT: movl %edx, %ebx
; X86-NEXT: pushl 72(%ebp)
; X86-NEXT: pushl 68(%ebp)
; X86-NEXT: pushl 64(%ebp)
; X86-NEXT: pushl 60(%ebp)
; X86-NEXT: calll llrintl
; X86-NEXT: addl $16, %esp
; X86-NEXT: movl %edx, 28(%esi)
; X86-NEXT: movl %eax, 24(%esi)
; X86-NEXT: movl %ebx, 20(%esi)
; X86-NEXT: movl %edi, 16(%esi)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, 12(%esi)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, 8(%esi)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, 4(%esi)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: leal -12(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl $4
;
; SSE-LABEL: llrint_v4i64_v4f128:
; SSE: # %bb.0:
; SSE-NEXT: subq $72, %rsp
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: callq llrintl@PLT
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE-NEXT: callq llrintl@PLT
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: callq llrintl@PLT
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: callq llrintl@PLT
; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = xmm1[0],mem[0]
; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE-NEXT: addq $72, %rsp
; SSE-NEXT: retq
;
; AVX1-LABEL: llrint_v4i64_v4f128:
; AVX1: # %bb.0:
; AVX1-NEXT: subq $72, %rsp
; AVX1-NEXT: vmovaps %xmm2, (%rsp) # 16-byte Spill
; AVX1-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps %xmm3, %xmm0
; AVX1-NEXT: callq llrintl@PLT
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq llrintl@PLT
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq llrintl@PLT
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq llrintl@PLT
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX1-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX1-NEXT: addq $72, %rsp
; AVX1-NEXT: retq
;
; AVX512-LABEL: llrint_v4i64_v4f128:
; AVX512: # %bb.0:
; AVX512-NEXT: subq $72, %rsp
; AVX512-NEXT: vmovaps %xmm2, (%rsp) # 16-byte Spill
; AVX512-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-NEXT: vmovaps %xmm3, %xmm0
; AVX512-NEXT: callq llrintl@PLT
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq llrintl@PLT
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX512-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX512-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq llrintl@PLT
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq llrintl@PLT
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX512-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX512-NEXT: vinserti128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX512-NEXT: addq $72, %rsp
; AVX512-NEXT: retq
;
; AVX512DQ-LABEL: llrint_v4i64_v4f128:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: subq $72, %rsp
; AVX512DQ-NEXT: vmovaps %xmm2, (%rsp) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps %xmm3, %xmm0
; AVX512DQ-NEXT: callq llrintl@PLT
; AVX512DQ-NEXT: vmovq %rax, %xmm0
; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX512DQ-NEXT: callq llrintl@PLT
; AVX512DQ-NEXT: vmovq %rax, %xmm0
; AVX512DQ-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX512DQ-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX512DQ-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512DQ-NEXT: callq llrintl@PLT
; AVX512DQ-NEXT: vmovq %rax, %xmm0
; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512DQ-NEXT: callq llrintl@PLT
; AVX512DQ-NEXT: vmovq %rax, %xmm0
; AVX512DQ-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX512DQ-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX512DQ-NEXT: vinserti128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX512DQ-NEXT: addq $72, %rsp
; AVX512DQ-NEXT: retq
%a = call <4 x i64> @llvm.llrint.v4i64.v4f128(<4 x fp128> %x)
ret <4 x i64> %a
}
declare <4 x i64> @llvm.llrint.v4i64.v4f128(<4 x fp128>)
define <8 x i64> @llrint_v8i64_v8f128(<8 x fp128> %x) nounwind {
; X86-LABEL: llrint_v8i64_v8f128:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: andl $-16, %esp
; X86-NEXT: subl $64, %esp
; X86-NEXT: movl 8(%ebp), %esi
; X86-NEXT: movl 36(%ebp), %edi
; X86-NEXT: movl 40(%ebp), %ebx
; X86-NEXT: pushl 24(%ebp)
; X86-NEXT: pushl 20(%ebp)
; X86-NEXT: pushl 16(%ebp)
; X86-NEXT: pushl 12(%ebp)
; X86-NEXT: calll llrintl
; X86-NEXT: addl $16, %esp
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl 32(%ebp)
; X86-NEXT: pushl 28(%ebp)
; X86-NEXT: calll llrintl
; X86-NEXT: addl $16, %esp
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: pushl 56(%ebp)
; X86-NEXT: pushl 52(%ebp)
; X86-NEXT: pushl 48(%ebp)
; X86-NEXT: pushl 44(%ebp)
; X86-NEXT: calll llrintl
; X86-NEXT: addl $16, %esp
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: pushl 72(%ebp)
; X86-NEXT: pushl 68(%ebp)
; X86-NEXT: pushl 64(%ebp)
; X86-NEXT: pushl 60(%ebp)
; X86-NEXT: calll llrintl
; X86-NEXT: addl $16, %esp
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: pushl 88(%ebp)
; X86-NEXT: pushl 84(%ebp)
; X86-NEXT: pushl 80(%ebp)
; X86-NEXT: pushl 76(%ebp)
; X86-NEXT: calll llrintl
; X86-NEXT: addl $16, %esp
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: pushl 104(%ebp)
; X86-NEXT: pushl 100(%ebp)
; X86-NEXT: pushl 96(%ebp)
; X86-NEXT: pushl 92(%ebp)
; X86-NEXT: calll llrintl
; X86-NEXT: addl $16, %esp
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: pushl 120(%ebp)
; X86-NEXT: pushl 116(%ebp)
; X86-NEXT: pushl 112(%ebp)
; X86-NEXT: pushl 108(%ebp)
; X86-NEXT: calll llrintl
; X86-NEXT: addl $16, %esp
; X86-NEXT: movl %eax, %edi
; X86-NEXT: movl %edx, %ebx
; X86-NEXT: pushl 136(%ebp)
; X86-NEXT: pushl 132(%ebp)
; X86-NEXT: pushl 128(%ebp)
; X86-NEXT: pushl 124(%ebp)
; X86-NEXT: calll llrintl
; X86-NEXT: addl $16, %esp
; X86-NEXT: movl %edx, 60(%esi)
; X86-NEXT: movl %eax, 56(%esi)
; X86-NEXT: movl %ebx, 52(%esi)
; X86-NEXT: movl %edi, 48(%esi)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, 44(%esi)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, 40(%esi)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, 36(%esi)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, 32(%esi)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, 28(%esi)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, 24(%esi)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, 20(%esi)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, 16(%esi)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, 12(%esi)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, 8(%esi)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, 4(%esi)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: leal -12(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl $4
;
; SSE-LABEL: llrint_v8i64_v8f128:
; SSE: # %bb.0:
; SSE-NEXT: subq $136, %rsp
; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm3, (%rsp) # 16-byte Spill
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: callq llrintl@PLT
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: callq llrintl@PLT
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE-NEXT: callq llrintl@PLT
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: callq llrintl@PLT
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: punpcklqdq (%rsp), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: callq llrintl@PLT
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: callq llrintl@PLT
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: callq llrintl@PLT
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: callq llrintl@PLT
; SSE-NEXT: movq %rax, %xmm3
; SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
; SSE-NEXT: # xmm3 = xmm3[0],mem[0]
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: addq $136, %rsp
; SSE-NEXT: retq
;
; AVX1-LABEL: llrint_v8i64_v8f128:
; AVX1: # %bb.0:
; AVX1-NEXT: subq $152, %rsp
; AVX1-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps %xmm2, (%rsp) # 16-byte Spill
; AVX1-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps %xmm3, %xmm0
; AVX1-NEXT: callq llrintl@PLT
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq llrintl@PLT
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq llrintl@PLT
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq llrintl@PLT
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX1-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq llrintl@PLT
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq llrintl@PLT
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq llrintl@PLT
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq llrintl@PLT
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-NEXT: addq $152, %rsp
; AVX1-NEXT: retq
;
; AVX512-LABEL: llrint_v8i64_v8f128:
; AVX512: # %bb.0:
; AVX512-NEXT: subq $152, %rsp
; AVX512-NEXT: vmovaps %xmm6, (%rsp) # 16-byte Spill
; AVX512-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-NEXT: vmovaps %xmm7, %xmm0
; AVX512-NEXT: callq llrintl@PLT
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq llrintl@PLT
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX512-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX512-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq llrintl@PLT
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq llrintl@PLT
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX512-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX512-NEXT: vinserti128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq llrintl@PLT
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq llrintl@PLT
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX512-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq llrintl@PLT
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq llrintl@PLT
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX512-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX512-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
; AVX512-NEXT: addq $152, %rsp
; AVX512-NEXT: retq
;
; AVX512DQ-LABEL: llrint_v8i64_v8f128:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: subq $152, %rsp
; AVX512DQ-NEXT: vmovaps %xmm6, (%rsp) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps %xmm7, %xmm0
; AVX512DQ-NEXT: callq llrintl@PLT
; AVX512DQ-NEXT: vmovq %rax, %xmm0
; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX512DQ-NEXT: callq llrintl@PLT
; AVX512DQ-NEXT: vmovq %rax, %xmm0
; AVX512DQ-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX512DQ-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX512DQ-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512DQ-NEXT: callq llrintl@PLT
; AVX512DQ-NEXT: vmovq %rax, %xmm0
; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512DQ-NEXT: callq llrintl@PLT
; AVX512DQ-NEXT: vmovq %rax, %xmm0
; AVX512DQ-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX512DQ-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX512DQ-NEXT: vinserti128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX512DQ-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: callq llrintl@PLT
; AVX512DQ-NEXT: vmovq %rax, %xmm0
; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512DQ-NEXT: callq llrintl@PLT
; AVX512DQ-NEXT: vmovq %rax, %xmm0
; AVX512DQ-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX512DQ-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512DQ-NEXT: callq llrintl@PLT
; AVX512DQ-NEXT: vmovq %rax, %xmm0
; AVX512DQ-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX512DQ-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512DQ-NEXT: callq llrintl@PLT
; AVX512DQ-NEXT: vmovq %rax, %xmm0
; AVX512DQ-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX512DQ-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX512DQ-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
; AVX512DQ-NEXT: addq $152, %rsp
; AVX512DQ-NEXT: retq
%a = call <8 x i64> @llvm.llrint.v8i64.v8f128(<8 x fp128> %x)
ret <8 x i64> %a
}
declare <8 x i64> @llvm.llrint.v8i64.v8f128(<8 x fp128>)