| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X86 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 -code-model=small | FileCheck %s --check-prefix=X64 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 -code-model=medium | FileCheck %s --check-prefix=X64 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 -code-model=large | FileCheck %s --check-prefix=X64-LARGE |
| |
| define double @mmx_zero(double, double, double, double) nounwind { |
| ; X86-LABEL: mmx_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebp |
| ; X86-NEXT: movl %esp, %ebp |
| ; X86-NEXT: andl $-8, %esp |
| ; X86-NEXT: subl $16, %esp |
| ; X86-NEXT: movq 8(%ebp), %mm0 |
| ; X86-NEXT: movq 16(%ebp), %mm5 |
| ; X86-NEXT: movq %mm5, (%esp) # 8-byte Spill |
| ; X86-NEXT: movq %mm0, %mm3 |
| ; X86-NEXT: paddd %mm5, %mm3 |
| ; X86-NEXT: pxor %mm1, %mm1 |
| ; X86-NEXT: movq %mm3, %mm6 |
| ; X86-NEXT: pmuludq %mm1, %mm6 |
| ; X86-NEXT: movq 24(%ebp), %mm4 |
| ; X86-NEXT: movq %mm6, %mm2 |
| ; X86-NEXT: paddd %mm4, %mm2 |
| ; X86-NEXT: paddw %mm2, %mm0 |
| ; X86-NEXT: movq %mm5, %mm1 |
| ; X86-NEXT: paddw %mm0, %mm1 |
| ; X86-NEXT: movq 32(%ebp), %mm5 |
| ; X86-NEXT: movq %mm1, %mm7 |
| ; X86-NEXT: pmuludq %mm5, %mm7 |
| ; X86-NEXT: paddw %mm4, %mm7 |
| ; X86-NEXT: paddw %mm7, %mm5 |
| ; X86-NEXT: paddw %mm5, %mm2 |
| ; X86-NEXT: paddw %mm2, %mm0 |
| ; X86-NEXT: paddw %mm6, %mm0 |
| ; X86-NEXT: pmuludq %mm3, %mm0 |
| ; X86-NEXT: pxor %mm3, %mm3 |
| ; X86-NEXT: paddw %mm3, %mm0 |
| ; X86-NEXT: paddw %mm1, %mm0 |
| ; X86-NEXT: pmuludq %mm7, %mm0 |
| ; X86-NEXT: pmuludq (%esp), %mm0 # 8-byte Folded Reload |
| ; X86-NEXT: paddw %mm5, %mm0 |
| ; X86-NEXT: paddw %mm2, %mm0 |
| ; X86-NEXT: movq2dq %mm0, %xmm0 |
| ; X86-NEXT: movsd %xmm0, {{[0-9]+}}(%esp) |
| ; X86-NEXT: fldl {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl %ebp, %esp |
| ; X86-NEXT: popl %ebp |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: mmx_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movdq2q %xmm0, %mm0 |
| ; X64-NEXT: movdq2q %xmm1, %mm5 |
| ; X64-NEXT: movq %mm5, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; X64-NEXT: movq %mm0, %mm3 |
| ; X64-NEXT: paddd %mm5, %mm3 |
| ; X64-NEXT: pxor %mm1, %mm1 |
| ; X64-NEXT: movq %mm3, %mm6 |
| ; X64-NEXT: pmuludq %mm1, %mm6 |
| ; X64-NEXT: movdq2q %xmm2, %mm4 |
| ; X64-NEXT: movq %mm6, %mm2 |
| ; X64-NEXT: paddd %mm4, %mm2 |
| ; X64-NEXT: paddw %mm2, %mm0 |
| ; X64-NEXT: movq %mm5, %mm1 |
| ; X64-NEXT: paddw %mm0, %mm1 |
| ; X64-NEXT: movdq2q %xmm3, %mm5 |
| ; X64-NEXT: movq %mm1, %mm7 |
| ; X64-NEXT: pmuludq %mm5, %mm7 |
| ; X64-NEXT: paddw %mm4, %mm7 |
| ; X64-NEXT: paddw %mm7, %mm5 |
| ; X64-NEXT: paddw %mm5, %mm2 |
| ; X64-NEXT: paddw %mm2, %mm0 |
| ; X64-NEXT: paddw %mm6, %mm0 |
| ; X64-NEXT: pmuludq %mm3, %mm0 |
| ; X64-NEXT: pxor %mm3, %mm3 |
| ; X64-NEXT: paddw %mm3, %mm0 |
| ; X64-NEXT: paddw %mm1, %mm0 |
| ; X64-NEXT: pmuludq %mm7, %mm0 |
| ; X64-NEXT: pmuludq {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload |
| ; X64-NEXT: paddw %mm5, %mm0 |
| ; X64-NEXT: paddw %mm2, %mm0 |
| ; X64-NEXT: movq2dq %mm0, %xmm0 |
| ; X64-NEXT: retq |
| ; |
| ; X64-LARGE-LABEL: mmx_zero: |
| ; X64-LARGE: # %bb.0: |
| ; X64-LARGE-NEXT: movdq2q %xmm0, %mm0 |
| ; X64-LARGE-NEXT: movdq2q %xmm1, %mm5 |
| ; X64-LARGE-NEXT: movq %mm5, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; X64-LARGE-NEXT: movq %mm0, %mm3 |
| ; X64-LARGE-NEXT: paddd %mm5, %mm3 |
| ; X64-LARGE-NEXT: pxor %mm1, %mm1 |
| ; X64-LARGE-NEXT: movq %mm3, %mm6 |
| ; X64-LARGE-NEXT: pmuludq %mm1, %mm6 |
| ; X64-LARGE-NEXT: movdq2q %xmm2, %mm4 |
| ; X64-LARGE-NEXT: movq %mm6, %mm2 |
| ; X64-LARGE-NEXT: paddd %mm4, %mm2 |
| ; X64-LARGE-NEXT: paddw %mm2, %mm0 |
| ; X64-LARGE-NEXT: movq %mm5, %mm1 |
| ; X64-LARGE-NEXT: paddw %mm0, %mm1 |
| ; X64-LARGE-NEXT: movdq2q %xmm3, %mm5 |
| ; X64-LARGE-NEXT: movq %mm1, %mm7 |
| ; X64-LARGE-NEXT: pmuludq %mm5, %mm7 |
| ; X64-LARGE-NEXT: paddw %mm4, %mm7 |
| ; X64-LARGE-NEXT: paddw %mm7, %mm5 |
| ; X64-LARGE-NEXT: paddw %mm5, %mm2 |
| ; X64-LARGE-NEXT: paddw %mm2, %mm0 |
| ; X64-LARGE-NEXT: paddw %mm6, %mm0 |
| ; X64-LARGE-NEXT: pmuludq %mm3, %mm0 |
| ; X64-LARGE-NEXT: pxor %mm3, %mm3 |
| ; X64-LARGE-NEXT: paddw %mm3, %mm0 |
| ; X64-LARGE-NEXT: paddw %mm1, %mm0 |
| ; X64-LARGE-NEXT: pmuludq %mm7, %mm0 |
| ; X64-LARGE-NEXT: pmuludq {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload |
| ; X64-LARGE-NEXT: paddw %mm5, %mm0 |
| ; X64-LARGE-NEXT: paddw %mm2, %mm0 |
| ; X64-LARGE-NEXT: movq2dq %mm0, %xmm0 |
| ; X64-LARGE-NEXT: retq |
| %5 = bitcast double %0 to <1 x i64> |
| %6 = bitcast double %1 to <1 x i64> |
| %7 = tail call <1 x i64> @llvm.x86.mmx.padd.d(<1 x i64> %5, <1 x i64> %6) |
| %8 = tail call <1 x i64> @llvm.x86.mmx.pmulu.dq(<1 x i64> %7, <1 x i64> bitcast (double 0.000000e+00 to <1 x i64>)) |
| %9 = bitcast double %2 to <1 x i64> |
| %10 = tail call <1 x i64> @llvm.x86.mmx.padd.d(<1 x i64> %8, <1 x i64> %9) |
| %11 = tail call <1 x i64> @llvm.x86.mmx.padd.w(<1 x i64> %5, <1 x i64> %10) |
| %12 = tail call <1 x i64> @llvm.x86.mmx.padd.w(<1 x i64> %6, <1 x i64> %11) |
| %13 = bitcast double %3 to <1 x i64> |
| %14 = tail call <1 x i64> @llvm.x86.mmx.pmulu.dq(<1 x i64> %12, <1 x i64> %13) |
| %15 = tail call <1 x i64> @llvm.x86.mmx.padd.w(<1 x i64> %14, <1 x i64> %9) |
| %16 = tail call <1 x i64> @llvm.x86.mmx.padd.w(<1 x i64> %15, <1 x i64> %13) |
| %17 = tail call <1 x i64> @llvm.x86.mmx.padd.w(<1 x i64> %16, <1 x i64> %10) |
| %18 = tail call <1 x i64> @llvm.x86.mmx.padd.w(<1 x i64> %17, <1 x i64> %11) |
| %19 = tail call <1 x i64> @llvm.x86.mmx.padd.w(<1 x i64> %18, <1 x i64> %8) |
| %20 = tail call <1 x i64> @llvm.x86.mmx.pmulu.dq(<1 x i64> %19, <1 x i64> %7) |
| %21 = tail call <1 x i64> @llvm.x86.mmx.padd.w(<1 x i64> %20, <1 x i64> bitcast (double 0.000000e+00 to <1 x i64>)) |
| %22 = tail call <1 x i64> @llvm.x86.mmx.padd.w(<1 x i64> %21, <1 x i64> %12) |
| %23 = tail call <1 x i64> @llvm.x86.mmx.pmulu.dq(<1 x i64> %22, <1 x i64> %15) |
| %24 = tail call <1 x i64> @llvm.x86.mmx.pmulu.dq(<1 x i64> %23, <1 x i64> %6) |
| %25 = tail call <1 x i64> @llvm.x86.mmx.padd.w(<1 x i64> %24, <1 x i64> %16) |
| %26 = tail call <1 x i64> @llvm.x86.mmx.padd.w(<1 x i64> %25, <1 x i64> %17) |
| %27 = bitcast <1 x i64> %26 to double |
| ret double %27 |
| } |
| |
| declare <1 x i64> @llvm.x86.mmx.padd.d(<1 x i64>, <1 x i64>) |
| declare <1 x i64> @llvm.x86.mmx.padd.w(<1 x i64>, <1 x i64>) |
| declare <1 x i64> @llvm.x86.mmx.pmulu.dq(<1 x i64>, <1 x i64>) |