| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2 |
| ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3,SSSE3-SLOW |
| ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3,fast-hops | FileCheck %s --check-prefixes=SSE,SSSE3,SSSE3-FAST |
| ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-SLOW |
| ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-FAST |
| ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2 |
| |
| define float @pr26491(<4 x float> %a0) { |
| ; SSE2-LABEL: pr26491: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movaps %xmm0, %xmm1 |
| ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[3,3] |
| ; SSE2-NEXT: addps %xmm0, %xmm1 |
| ; SSE2-NEXT: movaps %xmm1, %xmm0 |
| ; SSE2-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] |
| ; SSE2-NEXT: addss %xmm1, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; SSSE3-LABEL: pr26491: |
| ; SSSE3: # %bb.0: |
| ; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] |
| ; SSSE3-NEXT: addps %xmm0, %xmm1 |
| ; SSSE3-NEXT: movaps %xmm1, %xmm0 |
| ; SSSE3-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] |
| ; SSSE3-NEXT: addss %xmm1, %xmm0 |
| ; SSSE3-NEXT: retq |
| ; |
| ; AVX-LABEL: pr26491: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] |
| ; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0 |
| ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] |
| ; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; AVX-NEXT: retq |
| %1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3> |
| %2 = fadd <4 x float> %1, %a0 |
| %3 = extractelement <4 x float> %2, i32 2 |
| %4 = extractelement <4 x float> %2, i32 0 |
| %5 = fadd float %3, %4 |
| ret float %5 |
| } |
| |
| ; When simplifying away a splat (broadcast), the hop type must match the shuffle type. |
| |
| define <4 x double> @PR41414(i64 %x, <4 x double> %y) { |
| ; SSE2-LABEL: PR41414: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movq %rdi, %xmm2 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1] |
| ; SSE2-NEXT: subpd {{.*}}(%rip), %xmm2 |
| ; SSE2-NEXT: movapd %xmm2, %xmm3 |
| ; SSE2-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm2[1] |
| ; SSE2-NEXT: addpd %xmm2, %xmm3 |
| ; SSE2-NEXT: unpcklpd {{.*#+}} xmm3 = xmm3[0,0] |
| ; SSE2-NEXT: divpd %xmm3, %xmm1 |
| ; SSE2-NEXT: divpd %xmm3, %xmm0 |
| ; SSE2-NEXT: xorpd %xmm2, %xmm2 |
| ; SSE2-NEXT: addpd %xmm2, %xmm0 |
| ; SSE2-NEXT: addpd %xmm2, %xmm1 |
| ; SSE2-NEXT: retq |
| ; |
| ; SSSE3-SLOW-LABEL: PR41414: |
| ; SSSE3-SLOW: # %bb.0: |
| ; SSSE3-SLOW-NEXT: movq %rdi, %xmm2 |
| ; SSSE3-SLOW-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1] |
| ; SSSE3-SLOW-NEXT: subpd {{.*}}(%rip), %xmm2 |
| ; SSSE3-SLOW-NEXT: movapd %xmm2, %xmm3 |
| ; SSSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm2[1] |
| ; SSSE3-SLOW-NEXT: addpd %xmm2, %xmm3 |
| ; SSSE3-SLOW-NEXT: movddup {{.*#+}} xmm2 = xmm3[0,0] |
| ; SSSE3-SLOW-NEXT: divpd %xmm2, %xmm1 |
| ; SSSE3-SLOW-NEXT: divpd %xmm2, %xmm0 |
| ; SSSE3-SLOW-NEXT: xorpd %xmm2, %xmm2 |
| ; SSSE3-SLOW-NEXT: addpd %xmm2, %xmm0 |
| ; SSSE3-SLOW-NEXT: addpd %xmm2, %xmm1 |
| ; SSSE3-SLOW-NEXT: retq |
| ; |
| ; SSSE3-FAST-LABEL: PR41414: |
| ; SSSE3-FAST: # %bb.0: |
| ; SSSE3-FAST-NEXT: movq %rdi, %xmm2 |
| ; SSSE3-FAST-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1] |
| ; SSSE3-FAST-NEXT: subpd {{.*}}(%rip), %xmm2 |
| ; SSSE3-FAST-NEXT: haddpd %xmm2, %xmm2 |
| ; SSSE3-FAST-NEXT: divpd %xmm2, %xmm1 |
| ; SSSE3-FAST-NEXT: divpd %xmm2, %xmm0 |
| ; SSSE3-FAST-NEXT: xorpd %xmm2, %xmm2 |
| ; SSSE3-FAST-NEXT: addpd %xmm2, %xmm0 |
| ; SSSE3-FAST-NEXT: addpd %xmm2, %xmm1 |
| ; SSSE3-FAST-NEXT: retq |
| ; |
| ; AVX1-SLOW-LABEL: PR41414: |
| ; AVX1-SLOW: # %bb.0: |
| ; AVX1-SLOW-NEXT: vmovq %rdi, %xmm1 |
| ; AVX1-SLOW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] |
| ; AVX1-SLOW-NEXT: vsubpd {{.*}}(%rip), %xmm1, %xmm1 |
| ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] |
| ; AVX1-SLOW-NEXT: vaddpd %xmm1, %xmm2, %xmm1 |
| ; AVX1-SLOW-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0] |
| ; AVX1-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1 |
| ; AVX1-SLOW-NEXT: vdivpd %ymm1, %ymm0, %ymm0 |
| ; AVX1-SLOW-NEXT: vxorpd %xmm1, %xmm1, %xmm1 |
| ; AVX1-SLOW-NEXT: vaddpd %ymm1, %ymm0, %ymm0 |
| ; AVX1-SLOW-NEXT: retq |
| ; |
| ; AVX1-FAST-LABEL: PR41414: |
| ; AVX1-FAST: # %bb.0: |
| ; AVX1-FAST-NEXT: vmovq %rdi, %xmm1 |
| ; AVX1-FAST-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] |
| ; AVX1-FAST-NEXT: vsubpd {{.*}}(%rip), %xmm1, %xmm1 |
| ; AVX1-FAST-NEXT: vhaddpd %xmm1, %xmm1, %xmm1 |
| ; AVX1-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1 |
| ; AVX1-FAST-NEXT: vdivpd %ymm1, %ymm0, %ymm0 |
| ; AVX1-FAST-NEXT: vxorpd %xmm1, %xmm1, %xmm1 |
| ; AVX1-FAST-NEXT: vaddpd %ymm1, %ymm0, %ymm0 |
| ; AVX1-FAST-NEXT: retq |
| ; |
| ; AVX2-LABEL: PR41414: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovq %rdi, %xmm1 |
| ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] |
| ; AVX2-NEXT: vsubpd {{.*}}(%rip), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] |
| ; AVX2-NEXT: vaddsd %xmm1, %xmm2, %xmm1 |
| ; AVX2-NEXT: vbroadcastsd %xmm1, %ymm1 |
| ; AVX2-NEXT: vdivpd %ymm1, %ymm0, %ymm0 |
| ; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1 |
| ; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 |
| ; AVX2-NEXT: retq |
| %conv = uitofp i64 %x to double |
| %t0 = insertelement <4 x double> undef, double %conv, i32 0 |
| %t1 = shufflevector <4 x double> %t0, <4 x double> undef, <4 x i32> zeroinitializer |
| %t2 = fdiv <4 x double> %y, %t1 |
| %t3 = fadd <4 x double> zeroinitializer, %t2 |
| ret <4 x double> %t3 |
| } |