| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE41 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-SLOW |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fast-hops | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-FAST |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX512 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512 |
| |
| ; |
| ; vXi64 |
| ; |
| |
| define i64 @test_v2i64_v2i32(<2 x i32> %a0) { |
| ; SSE2-LABEL: test_v2i64_v2i32: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: pcmpgtd %xmm0, %xmm1 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE2-NEXT: paddq %xmm0, %xmm1 |
| ; SSE2-NEXT: movq %xmm1, %rax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v2i64_v2i32: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pmovsxdq %xmm0, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE41-NEXT: paddq %xmm0, %xmm1 |
| ; SSE41-NEXT: movq %xmm1, %rax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX-LABEL: test_v2i64_v2i32: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpmovsxdq %xmm0, %xmm0 |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX-NEXT: vmovq %xmm0, %rax |
| ; AVX-NEXT: retq |
| %1 = sext <2 x i32> %a0 to <2 x i64> |
| %2 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %1) |
| ret i64 %2 |
| } |
| |
| define i64 @test_v4i64_v4i16(<4 x i16> %a0) { |
| ; SSE2-LABEL: test_v4i64_v4i16: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] |
| ; SSE2-NEXT: psrad $16, %xmm0 |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: pcmpgtd %xmm0, %xmm1 |
| ; SSE2-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3] |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE2-NEXT: paddq %xmm2, %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE2-NEXT: paddq %xmm0, %xmm1 |
| ; SSE2-NEXT: movq %xmm1, %rax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v4i64_v4i16: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pmovsxwq %xmm0, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] |
| ; SSE41-NEXT: pmovsxwq %xmm0, %xmm0 |
| ; SSE41-NEXT: paddq %xmm1, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE41-NEXT: paddq %xmm0, %xmm1 |
| ; SSE41-NEXT: movq %xmm1, %rax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-LABEL: test_v4i64_v4i16: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vpmovsxwq %xmm0, %xmm1 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] |
| ; AVX1-NEXT: vpmovsxwq %xmm0, %xmm0 |
| ; AVX1-NEXT: vpaddq %xmm0, %xmm1, %xmm0 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vmovq %xmm0, %rax |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v4i64_v4i16: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovq %xmm0, %rax |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4i64_v4i16: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpmovsxwq %xmm0, %ymm0 |
| ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovq %xmm0, %rax |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = sext <4 x i16> %a0 to <4 x i64> |
| %2 = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %1) |
| ret i64 %2 |
| } |
| |
| define i64 @test_v8i64_v8i8(<8 x i8> %a0) { |
| ; SSE2-LABEL: test_v8i64_v8i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] |
| ; SSE2-NEXT: psrad $24, %xmm1 |
| ; SSE2-NEXT: pxor %xmm2, %xmm2 |
| ; SSE2-NEXT: pxor %xmm3, %xmm3 |
| ; SSE2-NEXT: pcmpgtd %xmm1, %xmm3 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm4 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] |
| ; SSE2-NEXT: psrad $24, %xmm0 |
| ; SSE2-NEXT: pcmpgtd %xmm0, %xmm2 |
| ; SSE2-NEXT: movdqa %xmm0, %xmm5 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1] |
| ; SSE2-NEXT: paddq %xmm4, %xmm5 |
| ; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3] |
| ; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3] |
| ; SSE2-NEXT: paddq %xmm1, %xmm0 |
| ; SSE2-NEXT: paddq %xmm5, %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE2-NEXT: paddq %xmm0, %xmm1 |
| ; SSE2-NEXT: movq %xmm1, %rax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v8i64_v8i8: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pmovsxbq %xmm0, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1] |
| ; SSE41-NEXT: pmovsxbq %xmm2, %xmm2 |
| ; SSE41-NEXT: paddq %xmm1, %xmm2 |
| ; SSE41-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE41-NEXT: psrlq $48, %xmm1 |
| ; SSE41-NEXT: pmovsxbq %xmm1, %xmm1 |
| ; SSE41-NEXT: psrld $16, %xmm0 |
| ; SSE41-NEXT: pmovsxbq %xmm0, %xmm0 |
| ; SSE41-NEXT: paddq %xmm1, %xmm0 |
| ; SSE41-NEXT: paddq %xmm2, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE41-NEXT: paddq %xmm0, %xmm1 |
| ; SSE41-NEXT: movq %xmm1, %rax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-LABEL: test_v8i64_v8i8: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX1-NEXT: vpmovsxbq %xmm1, %xmm1 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,1,1] |
| ; AVX1-NEXT: vpsrld $16, %xmm2, %xmm3 |
| ; AVX1-NEXT: vpmovsxbq %xmm3, %xmm3 |
| ; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1 |
| ; AVX1-NEXT: vpmovsxbq %xmm0, %xmm0 |
| ; AVX1-NEXT: vpmovsxbq %xmm2, %xmm2 |
| ; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vmovq %xmm0, %rax |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v8i64_v8i8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpmovsxbq %xmm0, %ymm1 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] |
| ; AVX2-NEXT: vpmovsxbq %xmm0, %ymm0 |
| ; AVX2-NEXT: vpaddq %ymm0, %ymm1, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovq %xmm0, %rax |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v8i64_v8i8: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpmovsxbq %xmm0, %zmm0 |
| ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 |
| ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovq %xmm0, %rax |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = sext <8 x i8> %a0 to <8 x i64> |
| %2 = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %1) |
| ret i64 %2 |
| } |
| |
| define i64 @test_v16i64_v16i8(<16 x i8> %a0) { |
| ; SSE2-LABEL: test_v16i64_v16i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15] |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] |
| ; SSE2-NEXT: psrad $24, %xmm2 |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: pxor %xmm3, %xmm3 |
| ; SSE2-NEXT: pcmpgtd %xmm2, %xmm3 |
| ; SSE2-NEXT: movdqa %xmm2, %xmm5 |
| ; SSE2-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3] |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7] |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3] |
| ; SSE2-NEXT: psrad $24, %xmm0 |
| ; SSE2-NEXT: pxor %xmm7, %xmm7 |
| ; SSE2-NEXT: pcmpgtd %xmm0, %xmm7 |
| ; SSE2-NEXT: movdqa %xmm0, %xmm8 |
| ; SSE2-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm7[2],xmm8[3],xmm7[3] |
| ; SSE2-NEXT: paddq %xmm5, %xmm8 |
| ; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: psrad $24, %xmm4 |
| ; SSE2-NEXT: pxor %xmm5, %xmm5 |
| ; SSE2-NEXT: pcmpgtd %xmm4, %xmm5 |
| ; SSE2-NEXT: movdqa %xmm4, %xmm9 |
| ; SSE2-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm5[2],xmm9[3],xmm5[3] |
| ; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: psrad $24, %xmm6 |
| ; SSE2-NEXT: pcmpgtd %xmm6, %xmm1 |
| ; SSE2-NEXT: movdqa %xmm6, %xmm10 |
| ; SSE2-NEXT: punpckhdq {{.*#+}} xmm10 = xmm10[2],xmm1[2],xmm10[3],xmm1[3] |
| ; SSE2-NEXT: paddq %xmm9, %xmm10 |
| ; SSE2-NEXT: paddq %xmm8, %xmm10 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1] |
| ; SSE2-NEXT: paddq %xmm2, %xmm0 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1] |
| ; SSE2-NEXT: paddq %xmm4, %xmm6 |
| ; SSE2-NEXT: paddq %xmm0, %xmm6 |
| ; SSE2-NEXT: paddq %xmm10, %xmm6 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3] |
| ; SSE2-NEXT: paddq %xmm6, %xmm0 |
| ; SSE2-NEXT: movq %xmm0, %rax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v16i64_v16i8: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE41-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE41-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE41-NEXT: pmovsxbq %xmm0, %xmm4 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,3,2,3] |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[3,3,3,3] |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1] |
| ; SSE41-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; SSE41-NEXT: pmovsxbq %xmm0, %xmm0 |
| ; SSE41-NEXT: psrld $16, %xmm1 |
| ; SSE41-NEXT: pmovsxbq %xmm1, %xmm1 |
| ; SSE41-NEXT: paddq %xmm0, %xmm1 |
| ; SSE41-NEXT: psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; SSE41-NEXT: pmovsxbq %xmm2, %xmm0 |
| ; SSE41-NEXT: psrlq $48, %xmm3 |
| ; SSE41-NEXT: pmovsxbq %xmm3, %xmm2 |
| ; SSE41-NEXT: paddq %xmm0, %xmm2 |
| ; SSE41-NEXT: paddq %xmm1, %xmm2 |
| ; SSE41-NEXT: pmovsxbq %xmm5, %xmm0 |
| ; SSE41-NEXT: paddq %xmm4, %xmm0 |
| ; SSE41-NEXT: pmovsxbq %xmm6, %xmm1 |
| ; SSE41-NEXT: pmovsxbq %xmm7, %xmm3 |
| ; SSE41-NEXT: paddq %xmm1, %xmm3 |
| ; SSE41-NEXT: paddq %xmm0, %xmm3 |
| ; SSE41-NEXT: paddq %xmm2, %xmm3 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3] |
| ; SSE41-NEXT: paddq %xmm3, %xmm0 |
| ; SSE41-NEXT: movq %xmm0, %rax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-LABEL: test_v16i64_v16i8: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX1-NEXT: vpmovsxbq %xmm1, %xmm1 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3] |
| ; AVX1-NEXT: vpmovsxbw %xmm2, %xmm3 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,1,1] |
| ; AVX1-NEXT: vpmovsxwq %xmm4, %xmm4 |
| ; AVX1-NEXT: vpaddq %xmm4, %xmm1, %xmm1 |
| ; AVX1-NEXT: vpmovsxbw %xmm0, %xmm4 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[3,3,3,3] |
| ; AVX1-NEXT: vpmovsxwq %xmm5, %xmm5 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[3,3,3,3] |
| ; AVX1-NEXT: vpmovsxwq %xmm6, %xmm6 |
| ; AVX1-NEXT: vpaddq %xmm6, %xmm5, %xmm5 |
| ; AVX1-NEXT: vpaddq %xmm5, %xmm1, %xmm1 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3] |
| ; AVX1-NEXT: vpmovsxwq %xmm4, %xmm4 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3] |
| ; AVX1-NEXT: vpmovsxwq %xmm3, %xmm3 |
| ; AVX1-NEXT: vpaddq %xmm3, %xmm4, %xmm3 |
| ; AVX1-NEXT: vpmovsxbq %xmm0, %xmm0 |
| ; AVX1-NEXT: vpmovsxbq %xmm2, %xmm2 |
| ; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpaddq %xmm3, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vmovq %xmm0, %rax |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v16i64_v16i8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpmovsxbw %xmm0, %ymm1 |
| ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,2,3] |
| ; AVX2-NEXT: vpmovsxwq %xmm3, %ymm3 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] |
| ; AVX2-NEXT: vpmovsxwq %xmm1, %ymm1 |
| ; AVX2-NEXT: vpaddq %ymm3, %ymm1, %ymm1 |
| ; AVX2-NEXT: vpmovsxbq %xmm0, %ymm0 |
| ; AVX2-NEXT: vpmovsxwq %xmm2, %ymm2 |
| ; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0 |
| ; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovq %xmm0, %rax |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v16i64_v16i8: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpmovsxbw %xmm0, %ymm0 |
| ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512-NEXT: vpmovsxwq %xmm1, %zmm1 |
| ; AVX512-NEXT: vpmovsxwq %xmm0, %zmm0 |
| ; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 |
| ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 |
| ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovq %xmm0, %rax |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = sext <16 x i8> %a0 to <16 x i64> |
| %2 = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %1) |
| ret i64 %2 |
| } |
| |
| ; |
| ; vXi32 |
| ; |
| |
| define i32 @test_v2i32_v2i16(<2 x i16> %a0) { |
| ; SSE2-LABEL: test_v2i32_v2i16: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] |
| ; SSE2-NEXT: psrad $16, %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; SSE2-NEXT: paddd %xmm0, %xmm1 |
| ; SSE2-NEXT: movd %xmm1, %eax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v2i32_v2i16: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pmovsxwd %xmm0, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; SSE41-NEXT: paddd %xmm0, %xmm1 |
| ; SSE41-NEXT: movd %xmm1, %eax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-SLOW-LABEL: test_v2i32_v2i16: |
| ; AVX1-SLOW: # %bb.0: |
| ; AVX1-SLOW-NEXT: vpmovsxwd %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vmovd %xmm0, %eax |
| ; AVX1-SLOW-NEXT: retq |
| ; |
| ; AVX1-FAST-LABEL: test_v2i32_v2i16: |
| ; AVX1-FAST: # %bb.0: |
| ; AVX1-FAST-NEXT: vpmovsxwd %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vmovd %xmm0, %eax |
| ; AVX1-FAST-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v2i32_v2i16: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpmovsxwd %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm0, %eax |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v2i32_v2i16: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpmovsxwd %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovd %xmm0, %eax |
| ; AVX512-NEXT: retq |
| %1 = sext <2 x i16> %a0 to <2 x i32> |
| %2 = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %1) |
| ret i32 %2 |
| } |
| |
| define i32 @test_v4i32(<4 x i8> %a0) { |
| ; SSE2-LABEL: test_v4i32: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] |
| ; SSE2-NEXT: psrad $24, %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE2-NEXT: paddd %xmm0, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1] |
| ; SSE2-NEXT: paddd %xmm1, %xmm0 |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v4i32: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pmovsxbd %xmm0, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE41-NEXT: paddd %xmm0, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1] |
| ; SSE41-NEXT: paddd %xmm1, %xmm0 |
| ; SSE41-NEXT: movd %xmm0, %eax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-SLOW-LABEL: test_v4i32: |
| ; AVX1-SLOW: # %bb.0: |
| ; AVX1-SLOW-NEXT: vpmovsxbd %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vmovd %xmm0, %eax |
| ; AVX1-SLOW-NEXT: retq |
| ; |
| ; AVX1-FAST-LABEL: test_v4i32: |
| ; AVX1-FAST: # %bb.0: |
| ; AVX1-FAST-NEXT: vpmovsxbd %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vmovd %xmm0, %eax |
| ; AVX1-FAST-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v4i32: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpmovsxbd %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm0, %eax |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4i32: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpmovsxbd %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovd %xmm0, %eax |
| ; AVX512-NEXT: retq |
| %1 = sext <4 x i8> %a0 to <4 x i32> |
| %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %1) |
| ret i32 %2 |
| } |
| |
| define i32 @test_v8i32_v8i8(<8 x i8> %a0) { |
| ; SSE2-LABEL: test_v8i32_v8i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] |
| ; SSE2-NEXT: psrad $24, %xmm1 |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] |
| ; SSE2-NEXT: psrad $24, %xmm0 |
| ; SSE2-NEXT: paddd %xmm1, %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE2-NEXT: paddd %xmm0, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1] |
| ; SSE2-NEXT: paddd %xmm1, %xmm0 |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v8i32_v8i8: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pmovsxbd %xmm0, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] |
| ; SSE41-NEXT: pmovsxbd %xmm0, %xmm0 |
| ; SSE41-NEXT: paddd %xmm1, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE41-NEXT: paddd %xmm0, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1] |
| ; SSE41-NEXT: paddd %xmm1, %xmm0 |
| ; SSE41-NEXT: movd %xmm0, %eax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-SLOW-LABEL: test_v8i32_v8i8: |
| ; AVX1-SLOW: # %bb.0: |
| ; AVX1-SLOW-NEXT: vpmovsxbd %xmm0, %xmm1 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] |
| ; AVX1-SLOW-NEXT: vpmovsxbd %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vmovd %xmm0, %eax |
| ; AVX1-SLOW-NEXT: retq |
| ; |
| ; AVX1-FAST-LABEL: test_v8i32_v8i8: |
| ; AVX1-FAST: # %bb.0: |
| ; AVX1-FAST-NEXT: vpmovsxbd %xmm0, %xmm1 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] |
| ; AVX1-FAST-NEXT: vpmovsxbd %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vmovd %xmm0, %eax |
| ; AVX1-FAST-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v8i32_v8i8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm0, %eax |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v8i32_v8i8: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpmovsxbd %xmm0, %ymm0 |
| ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovd %xmm0, %eax |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = sext <8 x i8> %a0 to <8 x i32> |
| %2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %1) |
| ret i32 %2 |
| } |
| |
| define i32 @test_v16i32_v16i8(<16 x i8> %a0) { |
| ; SSE2-LABEL: test_v16i32_v16i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] |
| ; SSE2-NEXT: psrad $24, %xmm2 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] |
| ; SSE2-NEXT: psrad $24, %xmm3 |
| ; SSE2-NEXT: paddd %xmm2, %xmm3 |
| ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: psrad $24, %xmm1 |
| ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: psrad $24, %xmm0 |
| ; SSE2-NEXT: paddd %xmm1, %xmm0 |
| ; SSE2-NEXT: paddd %xmm3, %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE2-NEXT: paddd %xmm0, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1] |
| ; SSE2-NEXT: paddd %xmm1, %xmm0 |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v16i32_v16i8: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pmovsxbd %xmm0, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3] |
| ; SSE41-NEXT: pmovsxbd %xmm2, %xmm2 |
| ; SSE41-NEXT: paddd %xmm1, %xmm2 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3] |
| ; SSE41-NEXT: pmovsxbd %xmm1, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] |
| ; SSE41-NEXT: pmovsxbd %xmm0, %xmm0 |
| ; SSE41-NEXT: paddd %xmm1, %xmm0 |
| ; SSE41-NEXT: paddd %xmm2, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE41-NEXT: paddd %xmm0, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1] |
| ; SSE41-NEXT: paddd %xmm1, %xmm0 |
| ; SSE41-NEXT: movd %xmm0, %eax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-SLOW-LABEL: test_v16i32_v16i8: |
| ; AVX1-SLOW: # %bb.0: |
| ; AVX1-SLOW-NEXT: vpmovsxbd %xmm0, %xmm1 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpmovsxbd %xmm2, %xmm2 |
| ; AVX1-SLOW-NEXT: vpaddd %xmm2, %xmm1, %xmm1 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[3,3,3,3] |
| ; AVX1-SLOW-NEXT: vpmovsxbd %xmm2, %xmm2 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] |
| ; AVX1-SLOW-NEXT: vpmovsxbd %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpaddd %xmm2, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vmovd %xmm0, %eax |
| ; AVX1-SLOW-NEXT: retq |
| ; |
| ; AVX1-FAST-LABEL: test_v16i32_v16i8: |
| ; AVX1-FAST: # %bb.0: |
| ; AVX1-FAST-NEXT: vpmovsxbd %xmm0, %xmm1 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3] |
| ; AVX1-FAST-NEXT: vpmovsxbd %xmm2, %xmm2 |
| ; AVX1-FAST-NEXT: vpaddd %xmm2, %xmm1, %xmm1 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[3,3,3,3] |
| ; AVX1-FAST-NEXT: vpmovsxbd %xmm2, %xmm2 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] |
| ; AVX1-FAST-NEXT: vpmovsxbd %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vpaddd %xmm2, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vpaddd %xmm0, %xmm1, %xmm0 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-FAST-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vmovd %xmm0, %eax |
| ; AVX1-FAST-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v16i32_v16i8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpmovsxbd %xmm0, %ymm1 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0 |
| ; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm0, %eax |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v16i32_v16i8: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0 |
| ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 |
| ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovd %xmm0, %eax |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = sext <16 x i8> %a0 to <16 x i32> |
| %2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %1) |
| ret i32 %2 |
| } |
| |
| define i32 @test_v32i32_v32i8(<32 x i8> %a0) { |
| ; SSE2-LABEL: test_v32i32_v32i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] |
| ; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] |
| ; SSE2-NEXT: psrad $24, %xmm3 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] |
| ; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] |
| ; SSE2-NEXT: psrad $24, %xmm5 |
| ; SSE2-NEXT: paddd %xmm3, %xmm5 |
| ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] |
| ; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] |
| ; SSE2-NEXT: psrad $24, %xmm3 |
| ; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] |
| ; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7] |
| ; SSE2-NEXT: psrad $24, %xmm6 |
| ; SSE2-NEXT: paddd %xmm3, %xmm6 |
| ; SSE2-NEXT: paddd %xmm5, %xmm6 |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3] |
| ; SSE2-NEXT: psrad $24, %xmm2 |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3] |
| ; SSE2-NEXT: psrad $24, %xmm3 |
| ; SSE2-NEXT: paddd %xmm2, %xmm3 |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3] |
| ; SSE2-NEXT: psrad $24, %xmm1 |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] |
| ; SSE2-NEXT: psrad $24, %xmm0 |
| ; SSE2-NEXT: paddd %xmm1, %xmm0 |
| ; SSE2-NEXT: paddd %xmm3, %xmm0 |
| ; SSE2-NEXT: paddd %xmm6, %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE2-NEXT: paddd %xmm0, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1] |
| ; SSE2-NEXT: paddd %xmm1, %xmm0 |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v32i32_v32i8: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1] |
| ; SSE41-NEXT: pmovsxbd %xmm2, %xmm2 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,1,1] |
| ; SSE41-NEXT: pmovsxbd %xmm3, %xmm3 |
| ; SSE41-NEXT: paddd %xmm2, %xmm3 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3] |
| ; SSE41-NEXT: pmovsxbd %xmm2, %xmm2 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3] |
| ; SSE41-NEXT: pmovsxbd %xmm4, %xmm4 |
| ; SSE41-NEXT: paddd %xmm2, %xmm4 |
| ; SSE41-NEXT: paddd %xmm3, %xmm4 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3] |
| ; SSE41-NEXT: pmovsxbd %xmm2, %xmm2 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] |
| ; SSE41-NEXT: pmovsxbd %xmm3, %xmm3 |
| ; SSE41-NEXT: paddd %xmm2, %xmm3 |
| ; SSE41-NEXT: pmovsxbd %xmm1, %xmm1 |
| ; SSE41-NEXT: pmovsxbd %xmm0, %xmm0 |
| ; SSE41-NEXT: paddd %xmm1, %xmm0 |
| ; SSE41-NEXT: paddd %xmm3, %xmm0 |
| ; SSE41-NEXT: paddd %xmm4, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE41-NEXT: paddd %xmm0, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1] |
| ; SSE41-NEXT: paddd %xmm1, %xmm0 |
| ; SSE41-NEXT: movd %xmm0, %eax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-SLOW-LABEL: test_v32i32_v32i8: |
| ; AVX1-SLOW: # %bb.0: |
| ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1] |
| ; AVX1-SLOW-NEXT: vpmovsxbd %xmm2, %xmm2 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,1,1] |
| ; AVX1-SLOW-NEXT: vpmovsxbd %xmm3, %xmm3 |
| ; AVX1-SLOW-NEXT: vpaddd %xmm2, %xmm3, %xmm2 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[3,3,3,3] |
| ; AVX1-SLOW-NEXT: vpmovsxbd %xmm3, %xmm3 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[3,3,3,3] |
| ; AVX1-SLOW-NEXT: vpmovsxbd %xmm4, %xmm4 |
| ; AVX1-SLOW-NEXT: vpaddd %xmm3, %xmm4, %xmm3 |
| ; AVX1-SLOW-NEXT: vpaddd %xmm3, %xmm2, %xmm2 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpmovsxbd %xmm3, %xmm3 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpmovsxbd %xmm4, %xmm4 |
| ; AVX1-SLOW-NEXT: vpaddd %xmm3, %xmm4, %xmm3 |
| ; AVX1-SLOW-NEXT: vpmovsxbd %xmm1, %xmm1 |
| ; AVX1-SLOW-NEXT: vpmovsxbd %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpaddd %xmm3, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpaddd %xmm2, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vmovd %xmm0, %eax |
| ; AVX1-SLOW-NEXT: vzeroupper |
| ; AVX1-SLOW-NEXT: retq |
| ; |
| ; AVX1-FAST-LABEL: test_v32i32_v32i8: |
| ; AVX1-FAST: # %bb.0: |
| ; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1] |
| ; AVX1-FAST-NEXT: vpmovsxbd %xmm2, %xmm2 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,1,1] |
| ; AVX1-FAST-NEXT: vpmovsxbd %xmm3, %xmm3 |
| ; AVX1-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[3,3,3,3] |
| ; AVX1-FAST-NEXT: vpmovsxbd %xmm3, %xmm3 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[3,3,3,3] |
| ; AVX1-FAST-NEXT: vpmovsxbd %xmm4, %xmm4 |
| ; AVX1-FAST-NEXT: vpaddd %xmm3, %xmm4, %xmm3 |
| ; AVX1-FAST-NEXT: vpaddd %xmm3, %xmm2, %xmm2 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3] |
| ; AVX1-FAST-NEXT: vpmovsxbd %xmm3, %xmm3 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3] |
| ; AVX1-FAST-NEXT: vpmovsxbd %xmm4, %xmm4 |
| ; AVX1-FAST-NEXT: vpaddd %xmm3, %xmm4, %xmm3 |
| ; AVX1-FAST-NEXT: vpmovsxbd %xmm1, %xmm1 |
| ; AVX1-FAST-NEXT: vpmovsxbd %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vpaddd %xmm3, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vpaddd %xmm2, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-FAST-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vmovd %xmm0, %eax |
| ; AVX1-FAST-NEXT: vzeroupper |
| ; AVX1-FAST-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v32i32_v32i8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,2,3] |
| ; AVX2-NEXT: vpmovsxbd %xmm2, %ymm2 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpmovsxbd %xmm3, %ymm3 |
| ; AVX2-NEXT: vpaddd %ymm2, %ymm3, %ymm2 |
| ; AVX2-NEXT: vpmovsxbd %xmm1, %ymm1 |
| ; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0 |
| ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 |
| ; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm0, %eax |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v32i32_v32i8: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512-NEXT: vpmovsxbd %xmm1, %zmm1 |
| ; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0 |
| ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 |
| ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 |
| ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovd %xmm0, %eax |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = sext <32 x i8> %a0 to <32 x i32> |
| %2 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %1) |
| ret i32 %2 |
| } |
| |
| ; |
| ; vXi16 |
| ; |
| |
| define i16 @test_v2i16_v2i8(<2 x i8> %a0) { |
| ; SSE2-LABEL: test_v2i16_v2i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: psraw $8, %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE2-NEXT: psrld $16, %xmm1 |
| ; SSE2-NEXT: paddw %xmm0, %xmm1 |
| ; SSE2-NEXT: movd %xmm1, %eax |
| ; SSE2-NEXT: # kill: def $ax killed $ax killed $eax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v2i16_v2i8: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 |
| ; SSE41-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE41-NEXT: psrld $16, %xmm1 |
| ; SSE41-NEXT: paddw %xmm0, %xmm1 |
| ; SSE41-NEXT: movd %xmm1, %eax |
| ; SSE41-NEXT: # kill: def $ax killed $ax killed $eax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-SLOW-LABEL: test_v2i16_v2i8: |
| ; AVX1-SLOW: # %bb.0: |
| ; AVX1-SLOW-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vmovd %xmm0, %eax |
| ; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX1-SLOW-NEXT: retq |
| ; |
| ; AVX1-FAST-LABEL: test_v2i16_v2i8: |
| ; AVX1-FAST: # %bb.0: |
| ; AVX1-FAST-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vmovd %xmm0, %eax |
| ; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX1-FAST-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v2i16_v2i8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm0, %eax |
| ; AVX2-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v2i16_v2i8: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovd %xmm0, %eax |
| ; AVX512-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX512-NEXT: retq |
| %1 = sext <2 x i8> %a0 to <2 x i16> |
| %2 = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> %1) |
| ret i16 %2 |
| } |
| |
| define i16 @test_v4i16_v4i8(<4 x i8> %a0) { |
| ; SSE2-LABEL: test_v4i16_v4i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: psraw $8, %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; SSE2-NEXT: paddw %xmm0, %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE2-NEXT: psrld $16, %xmm0 |
| ; SSE2-NEXT: paddw %xmm1, %xmm0 |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: # kill: def $ax killed $ax killed $eax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v4i16_v4i8: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; SSE41-NEXT: paddw %xmm0, %xmm1 |
| ; SSE41-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE41-NEXT: psrld $16, %xmm0 |
| ; SSE41-NEXT: paddw %xmm1, %xmm0 |
| ; SSE41-NEXT: movd %xmm0, %eax |
| ; SSE41-NEXT: # kill: def $ax killed $ax killed $eax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-SLOW-LABEL: test_v4i16_v4i8: |
| ; AVX1-SLOW: # %bb.0: |
| ; AVX1-SLOW-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vmovd %xmm0, %eax |
| ; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX1-SLOW-NEXT: retq |
| ; |
| ; AVX1-FAST-LABEL: test_v4i16_v4i8: |
| ; AVX1-FAST: # %bb.0: |
| ; AVX1-FAST-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vmovd %xmm0, %eax |
| ; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX1-FAST-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v4i16_v4i8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm0, %eax |
| ; AVX2-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4i16_v4i8: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovd %xmm0, %eax |
| ; AVX512-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX512-NEXT: retq |
| %1 = sext <4 x i8> %a0 to <4 x i16> |
| %2 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %1) |
| ret i16 %2 |
| |
| } |
| |
| define i16 @test_v8i16_v8i8(<8 x i8> %a0) { |
| ; SSE2-LABEL: test_v8i16_v8i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: psraw $8, %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE2-NEXT: paddw %xmm0, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1] |
| ; SSE2-NEXT: paddw %xmm1, %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE2-NEXT: psrld $16, %xmm1 |
| ; SSE2-NEXT: paddw %xmm0, %xmm1 |
| ; SSE2-NEXT: movd %xmm1, %eax |
| ; SSE2-NEXT: # kill: def $ax killed $ax killed $eax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v8i16_v8i8: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE41-NEXT: paddw %xmm0, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1] |
| ; SSE41-NEXT: paddw %xmm1, %xmm0 |
| ; SSE41-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE41-NEXT: psrld $16, %xmm1 |
| ; SSE41-NEXT: paddw %xmm0, %xmm1 |
| ; SSE41-NEXT: movd %xmm1, %eax |
| ; SSE41-NEXT: # kill: def $ax killed $ax killed $eax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-SLOW-LABEL: test_v8i16_v8i8: |
| ; AVX1-SLOW: # %bb.0: |
| ; AVX1-SLOW-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vmovd %xmm0, %eax |
| ; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX1-SLOW-NEXT: retq |
| ; |
| ; AVX1-FAST-LABEL: test_v8i16_v8i8: |
| ; AVX1-FAST: # %bb.0: |
| ; AVX1-FAST-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vmovd %xmm0, %eax |
| ; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX1-FAST-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v8i16_v8i8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm0, %eax |
| ; AVX2-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v8i16_v8i8: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovd %xmm0, %eax |
| ; AVX512-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX512-NEXT: retq |
| %1 = sext <8 x i8> %a0 to <8 x i16> |
| %2 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %1) |
| ret i16 %2 |
| } |
| |
| define i16 @test_v16i16_v16i8(<16 x i8> %a0) { |
| ; SSE2-LABEL: test_v16i16_v16i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] |
| ; SSE2-NEXT: psraw $8, %xmm1 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: psraw $8, %xmm0 |
| ; SSE2-NEXT: paddw %xmm1, %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE2-NEXT: paddw %xmm0, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1] |
| ; SSE2-NEXT: paddw %xmm1, %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE2-NEXT: psrld $16, %xmm1 |
| ; SSE2-NEXT: paddw %xmm0, %xmm1 |
| ; SSE2-NEXT: movd %xmm1, %eax |
| ; SSE2-NEXT: # kill: def $ax killed $ax killed $eax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v16i16_v16i8: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pmovsxbw %xmm0, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] |
| ; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 |
| ; SSE41-NEXT: paddw %xmm1, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE41-NEXT: paddw %xmm0, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1] |
| ; SSE41-NEXT: paddw %xmm1, %xmm0 |
| ; SSE41-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE41-NEXT: psrld $16, %xmm1 |
| ; SSE41-NEXT: paddw %xmm0, %xmm1 |
| ; SSE41-NEXT: movd %xmm1, %eax |
| ; SSE41-NEXT: # kill: def $ax killed $ax killed $eax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-SLOW-LABEL: test_v16i16_v16i8: |
| ; AVX1-SLOW: # %bb.0: |
| ; AVX1-SLOW-NEXT: vpmovsxbw %xmm0, %xmm1 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm0, %xmm1, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vmovd %xmm0, %eax |
| ; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX1-SLOW-NEXT: retq |
| ; |
| ; AVX1-FAST-LABEL: test_v16i16_v16i8: |
| ; AVX1-FAST: # %bb.0: |
| ; AVX1-FAST-NEXT: vpmovsxbw %xmm0, %xmm1 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] |
| ; AVX1-FAST-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vmovd %xmm0, %eax |
| ; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX1-FAST-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v16i16_v16i8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm0, %eax |
| ; AVX2-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v16i16_v16i8: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpmovsxbw %xmm0, %ymm0 |
| ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovd %xmm0, %eax |
| ; AVX512-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = sext <16 x i8> %a0 to <16 x i16> |
| %2 = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %1) |
| ret i16 %2 |
| } |
| |
| define i16 @test_v32i16_v32i8(<32 x i8> %a0) { |
| ; SSE2-LABEL: test_v32i16_v32i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] |
| ; SSE2-NEXT: psraw $8, %xmm2 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] |
| ; SSE2-NEXT: psraw $8, %xmm3 |
| ; SSE2-NEXT: paddw %xmm2, %xmm3 |
| ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] |
| ; SSE2-NEXT: psraw $8, %xmm1 |
| ; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] |
| ; SSE2-NEXT: psraw $8, %xmm0 |
| ; SSE2-NEXT: paddw %xmm1, %xmm0 |
| ; SSE2-NEXT: paddw %xmm3, %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE2-NEXT: paddw %xmm0, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1] |
| ; SSE2-NEXT: paddw %xmm1, %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE2-NEXT: psrld $16, %xmm1 |
| ; SSE2-NEXT: paddw %xmm0, %xmm1 |
| ; SSE2-NEXT: movd %xmm1, %eax |
| ; SSE2-NEXT: # kill: def $ax killed $ax killed $eax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v32i16_v32i8: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3] |
| ; SSE41-NEXT: pmovsxbw %xmm2, %xmm2 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] |
| ; SSE41-NEXT: pmovsxbw %xmm3, %xmm3 |
| ; SSE41-NEXT: paddw %xmm2, %xmm3 |
| ; SSE41-NEXT: pmovsxbw %xmm1, %xmm1 |
| ; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 |
| ; SSE41-NEXT: paddw %xmm1, %xmm0 |
| ; SSE41-NEXT: paddw %xmm3, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE41-NEXT: paddw %xmm0, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1] |
| ; SSE41-NEXT: paddw %xmm1, %xmm0 |
| ; SSE41-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE41-NEXT: psrld $16, %xmm1 |
| ; SSE41-NEXT: paddw %xmm0, %xmm1 |
| ; SSE41-NEXT: movd %xmm1, %eax |
| ; SSE41-NEXT: # kill: def $ax killed $ax killed $eax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-SLOW-LABEL: test_v32i16_v32i8: |
| ; AVX1-SLOW: # %bb.0: |
| ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpmovsxbw %xmm2, %xmm2 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpmovsxbw %xmm3, %xmm3 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm2, %xmm3, %xmm2 |
| ; AVX1-SLOW-NEXT: vpmovsxbw %xmm1, %xmm1 |
| ; AVX1-SLOW-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm2, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vmovd %xmm0, %eax |
| ; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX1-SLOW-NEXT: vzeroupper |
| ; AVX1-SLOW-NEXT: retq |
| ; |
| ; AVX1-FAST-LABEL: test_v32i16_v32i8: |
| ; AVX1-FAST: # %bb.0: |
| ; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,2,3] |
| ; AVX1-FAST-NEXT: vpmovsxbw %xmm2, %xmm2 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] |
| ; AVX1-FAST-NEXT: vpmovsxbw %xmm3, %xmm3 |
| ; AVX1-FAST-NEXT: vpaddw %xmm2, %xmm3, %xmm2 |
| ; AVX1-FAST-NEXT: vpmovsxbw %xmm1, %xmm1 |
| ; AVX1-FAST-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vpaddw %xmm2, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vmovd %xmm0, %eax |
| ; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX1-FAST-NEXT: vzeroupper |
| ; AVX1-FAST-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v32i16_v32i8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1 |
| ; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 |
| ; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm0, %eax |
| ; AVX2-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v32i16_v32i8: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpmovsxbw %ymm0, %zmm0 |
| ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 |
| ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovd %xmm0, %eax |
| ; AVX512-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = sext <32 x i8> %a0 to <32 x i16> |
| %2 = call i16 @llvm.vector.reduce.add.v32i16(<32 x i16> %1) |
| ret i16 %2 |
| } |
| |
| define i16 @test_v64i16_v64i8(<64 x i8> %a0) { |
| ; SSE2-LABEL: test_v64i16_v64i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15] |
| ; SSE2-NEXT: psraw $8, %xmm4 |
| ; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15] |
| ; SSE2-NEXT: psraw $8, %xmm5 |
| ; SSE2-NEXT: paddw %xmm4, %xmm5 |
| ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15] |
| ; SSE2-NEXT: psraw $8, %xmm4 |
| ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm1[8],xmm6[9],xmm1[9],xmm6[10],xmm1[10],xmm6[11],xmm1[11],xmm6[12],xmm1[12],xmm6[13],xmm1[13],xmm6[14],xmm1[14],xmm6[15],xmm1[15] |
| ; SSE2-NEXT: psraw $8, %xmm6 |
| ; SSE2-NEXT: paddw %xmm4, %xmm6 |
| ; SSE2-NEXT: paddw %xmm5, %xmm6 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: psraw $8, %xmm2 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: psraw $8, %xmm0 |
| ; SSE2-NEXT: paddw %xmm2, %xmm0 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] |
| ; SSE2-NEXT: psraw $8, %xmm2 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: psraw $8, %xmm1 |
| ; SSE2-NEXT: paddw %xmm2, %xmm1 |
| ; SSE2-NEXT: paddw %xmm0, %xmm1 |
| ; SSE2-NEXT: paddw %xmm6, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3] |
| ; SSE2-NEXT: paddw %xmm1, %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; SSE2-NEXT: paddw %xmm0, %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE2-NEXT: psrld $16, %xmm0 |
| ; SSE2-NEXT: paddw %xmm1, %xmm0 |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: # kill: def $ax killed $ax killed $eax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v64i16_v64i8: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pmovsxbw %xmm2, %xmm4 |
| ; SSE41-NEXT: pmovsxbw %xmm0, %xmm5 |
| ; SSE41-NEXT: paddw %xmm4, %xmm5 |
| ; SSE41-NEXT: pmovsxbw %xmm3, %xmm4 |
| ; SSE41-NEXT: pmovsxbw %xmm1, %xmm6 |
| ; SSE41-NEXT: paddw %xmm4, %xmm6 |
| ; SSE41-NEXT: paddw %xmm5, %xmm6 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] |
| ; SSE41-NEXT: pmovsxbw %xmm2, %xmm2 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] |
| ; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 |
| ; SSE41-NEXT: paddw %xmm2, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,2,3] |
| ; SSE41-NEXT: pmovsxbw %xmm2, %xmm2 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] |
| ; SSE41-NEXT: pmovsxbw %xmm1, %xmm1 |
| ; SSE41-NEXT: paddw %xmm2, %xmm1 |
| ; SSE41-NEXT: paddw %xmm0, %xmm1 |
| ; SSE41-NEXT: paddw %xmm6, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3] |
| ; SSE41-NEXT: paddw %xmm1, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; SSE41-NEXT: paddw %xmm0, %xmm1 |
| ; SSE41-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE41-NEXT: psrld $16, %xmm0 |
| ; SSE41-NEXT: paddw %xmm1, %xmm0 |
| ; SSE41-NEXT: movd %xmm0, %eax |
| ; SSE41-NEXT: # kill: def $ax killed $ax killed $eax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-SLOW-LABEL: test_v64i16_v64i8: |
| ; AVX1-SLOW: # %bb.0: |
| ; AVX1-SLOW-NEXT: vpmovsxbw %xmm1, %xmm2 |
| ; AVX1-SLOW-NEXT: vpmovsxbw %xmm0, %xmm3 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm2, %xmm3, %xmm2 |
| ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm3 |
| ; AVX1-SLOW-NEXT: vpmovsxbw %xmm3, %xmm4 |
| ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm5 |
| ; AVX1-SLOW-NEXT: vpmovsxbw %xmm5, %xmm6 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm4, %xmm6, %xmm4 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm4, %xmm2, %xmm2 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpmovsxbw %xmm1, %xmm1 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm3[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpmovsxbw %xmm1, %xmm1 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm5[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpmovsxbw %xmm3, %xmm3 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm3, %xmm1 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm0, %xmm2, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vmovd %xmm0, %eax |
| ; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX1-SLOW-NEXT: vzeroupper |
| ; AVX1-SLOW-NEXT: retq |
| ; |
| ; AVX1-FAST-LABEL: test_v64i16_v64i8: |
| ; AVX1-FAST: # %bb.0: |
| ; AVX1-FAST-NEXT: vpmovsxbw %xmm1, %xmm2 |
| ; AVX1-FAST-NEXT: vpmovsxbw %xmm0, %xmm3 |
| ; AVX1-FAST-NEXT: vpaddw %xmm2, %xmm3, %xmm2 |
| ; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm3 |
| ; AVX1-FAST-NEXT: vpmovsxbw %xmm3, %xmm4 |
| ; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm5 |
| ; AVX1-FAST-NEXT: vpmovsxbw %xmm5, %xmm6 |
| ; AVX1-FAST-NEXT: vpaddw %xmm4, %xmm6, %xmm4 |
| ; AVX1-FAST-NEXT: vpaddw %xmm4, %xmm2, %xmm2 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] |
| ; AVX1-FAST-NEXT: vpmovsxbw %xmm1, %xmm1 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] |
| ; AVX1-FAST-NEXT: vpmovsxbw %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm3[2,3,2,3] |
| ; AVX1-FAST-NEXT: vpmovsxbw %xmm1, %xmm1 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm5[2,3,2,3] |
| ; AVX1-FAST-NEXT: vpmovsxbw %xmm3, %xmm3 |
| ; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm3, %xmm1 |
| ; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vpaddw %xmm0, %xmm2, %xmm0 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vmovd %xmm0, %eax |
| ; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX1-FAST-NEXT: vzeroupper |
| ; AVX1-FAST-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v64i16_v64i8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpmovsxbw %xmm1, %ymm2 |
| ; AVX2-NEXT: vpmovsxbw %xmm0, %ymm3 |
| ; AVX2-NEXT: vpaddw %ymm2, %ymm3, %ymm2 |
| ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 |
| ; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 |
| ; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 |
| ; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0 |
| ; AVX2-NEXT: vpaddw %ymm0, %ymm2, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm0, %eax |
| ; AVX2-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v64i16_v64i8: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512-NEXT: vpmovsxbw %ymm1, %zmm1 |
| ; AVX512-NEXT: vpmovsxbw %ymm0, %zmm0 |
| ; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 |
| ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 |
| ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovd %xmm0, %eax |
| ; AVX512-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = sext <64 x i8> %a0 to <64 x i16> |
| %2 = call i16 @llvm.vector.reduce.add.v64i16(<64 x i16> %1) |
| ret i16 %2 |
| |
| } |
| |
| ; |
| ; vXi1 - sum of extended bool vectors |
| ; |
| |
| define i64 @test_v2i64_v2i1(<2 x i64> %a0) { |
| ; SSE2-LABEL: test_v2i64_v2i1: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: pcmpgtd %xmm0, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3] |
| ; SSE2-NEXT: paddq %xmm1, %xmm0 |
| ; SSE2-NEXT: movq %xmm0, %rax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v2i64_v2i1: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] |
| ; SSE41-NEXT: pxor %xmm1, %xmm1 |
| ; SSE41-NEXT: pcmpgtd %xmm0, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3] |
| ; SSE41-NEXT: paddq %xmm1, %xmm0 |
| ; SSE41-NEXT: movq %xmm0, %rax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX-LABEL: test_v2i64_v2i1: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0 |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 |
| ; AVX-NEXT: vmovq %xmm0, %rax |
| ; AVX-NEXT: retq |
| %1 = icmp slt <2 x i64> %a0, zeroinitializer |
| %2 = sext <2 x i1> %1 to <2 x i64> |
| %3 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %2) |
| ret i64 %3 |
| } |
| |
| define i32 @test_v4i32_v4i1(<4 x i32> %a0) { |
| ; SSE2-LABEL: test_v4i32_v4i1: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: pcmpgtd %xmm0, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3] |
| ; SSE2-NEXT: paddd %xmm1, %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; SSE2-NEXT: paddd %xmm0, %xmm1 |
| ; SSE2-NEXT: movd %xmm1, %eax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v4i32_v4i1: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pxor %xmm1, %xmm1 |
| ; SSE41-NEXT: pcmpgtd %xmm0, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3] |
| ; SSE41-NEXT: paddd %xmm1, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; SSE41-NEXT: paddd %xmm0, %xmm1 |
| ; SSE41-NEXT: movd %xmm1, %eax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-SLOW-LABEL: test_v4i32_v4i1: |
| ; AVX1-SLOW: # %bb.0: |
| ; AVX1-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX1-SLOW-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vmovd %xmm0, %eax |
| ; AVX1-SLOW-NEXT: retq |
| ; |
| ; AVX1-FAST-LABEL: test_v4i32_v4i1: |
| ; AVX1-FAST: # %bb.0: |
| ; AVX1-FAST-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX1-FAST-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vmovd %xmm0, %eax |
| ; AVX1-FAST-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v4i32_v4i1: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX2-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm0, %eax |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4i32_v4i1: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovd %xmm0, %eax |
| ; AVX512-NEXT: retq |
| %1 = icmp slt <4 x i32> %a0, zeroinitializer |
| %2 = sext <4 x i1> %1 to <4 x i32> |
| %3 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %2) |
| ret i32 %3 |
| } |
| |
| define i16 @test_v8i16_v8i1(<8 x i16> %a0) { |
| ; SSE2-LABEL: test_v8i16_v8i1: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: pcmpgtw %xmm0, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3] |
| ; SSE2-NEXT: paddw %xmm1, %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; SSE2-NEXT: paddw %xmm0, %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE2-NEXT: psrld $16, %xmm0 |
| ; SSE2-NEXT: paddw %xmm1, %xmm0 |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: # kill: def $ax killed $ax killed $eax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v8i16_v8i1: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pxor %xmm1, %xmm1 |
| ; SSE41-NEXT: pcmpgtw %xmm0, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3] |
| ; SSE41-NEXT: paddw %xmm1, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; SSE41-NEXT: paddw %xmm0, %xmm1 |
| ; SSE41-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE41-NEXT: psrld $16, %xmm0 |
| ; SSE41-NEXT: paddw %xmm1, %xmm0 |
| ; SSE41-NEXT: movd %xmm0, %eax |
| ; SSE41-NEXT: # kill: def $ax killed $ax killed $eax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-SLOW-LABEL: test_v8i16_v8i1: |
| ; AVX1-SLOW: # %bb.0: |
| ; AVX1-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX1-SLOW-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX1-SLOW-NEXT: vmovd %xmm0, %eax |
| ; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX1-SLOW-NEXT: retq |
| ; |
| ; AVX1-FAST-LABEL: test_v8i16_v8i1: |
| ; AVX1-FAST: # %bb.0: |
| ; AVX1-FAST-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX1-FAST-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0 |
| ; AVX1-FAST-NEXT: vmovd %xmm0, %eax |
| ; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX1-FAST-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v8i16_v8i1: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX2-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm0, %eax |
| ; AVX2-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v8i16_v8i1: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 |
| ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovd %xmm0, %eax |
| ; AVX512-NEXT: # kill: def $ax killed $ax killed $eax |
| ; AVX512-NEXT: retq |
| %1 = icmp slt <8 x i16> %a0, zeroinitializer |
| %2 = sext <8 x i1> %1 to <8 x i16> |
| %3 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %2) |
| ret i16 %3 |
| } |
| |
| define i8 @test_v16i8_v16i1(<16 x i8> %a0) { |
| ; SSE2-LABEL: test_v16i8_v16i1: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: pxor %xmm2, %xmm2 |
| ; SSE2-NEXT: pcmpgtb %xmm0, %xmm2 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3] |
| ; SSE2-NEXT: paddb %xmm2, %xmm0 |
| ; SSE2-NEXT: psadbw %xmm1, %xmm0 |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: # kill: def $al killed $al killed $eax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v16i8_v16i1: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pxor %xmm1, %xmm1 |
| ; SSE41-NEXT: pxor %xmm2, %xmm2 |
| ; SSE41-NEXT: pcmpgtb %xmm0, %xmm2 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3] |
| ; SSE41-NEXT: paddb %xmm2, %xmm0 |
| ; SSE41-NEXT: psadbw %xmm1, %xmm0 |
| ; SSE41-NEXT: movd %xmm0, %eax |
| ; SSE41-NEXT: # kill: def $al killed $al killed $eax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX-LABEL: test_v16i8_v16i1: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0 |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3] |
| ; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0 |
| ; AVX-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 |
| ; AVX-NEXT: vmovd %xmm0, %eax |
| ; AVX-NEXT: # kill: def $al killed $al killed $eax |
| ; AVX-NEXT: retq |
| %1 = icmp slt <16 x i8> %a0, zeroinitializer |
| %2 = sext <16 x i1> %1 to <16 x i8> |
| %3 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %2) |
| ret i8 %3 |
| } |
| |
| define i8 @test_v32i8_v32i1(<32 x i8> %a0) { |
| ; SSE2-LABEL: test_v32i8_v32i1: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pxor %xmm2, %xmm2 |
| ; SSE2-NEXT: pxor %xmm3, %xmm3 |
| ; SSE2-NEXT: pcmpgtb %xmm1, %xmm3 |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: pcmpgtb %xmm0, %xmm1 |
| ; SSE2-NEXT: paddb %xmm3, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3] |
| ; SSE2-NEXT: paddb %xmm1, %xmm0 |
| ; SSE2-NEXT: psadbw %xmm2, %xmm0 |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: # kill: def $al killed $al killed $eax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v32i8_v32i1: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pxor %xmm2, %xmm2 |
| ; SSE41-NEXT: pxor %xmm3, %xmm3 |
| ; SSE41-NEXT: pcmpgtb %xmm1, %xmm3 |
| ; SSE41-NEXT: pxor %xmm1, %xmm1 |
| ; SSE41-NEXT: pcmpgtb %xmm0, %xmm1 |
| ; SSE41-NEXT: paddb %xmm3, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3] |
| ; SSE41-NEXT: paddb %xmm1, %xmm0 |
| ; SSE41-NEXT: psadbw %xmm2, %xmm0 |
| ; SSE41-NEXT: movd %xmm0, %eax |
| ; SSE41-NEXT: # kill: def $al killed $al killed $eax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-LABEL: test_v32i8_v32i1: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 |
| ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1 |
| ; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0 |
| ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpsadbw %xmm2, %xmm0, %xmm0 |
| ; AVX1-NEXT: vmovd %xmm0, %eax |
| ; AVX1-NEXT: # kill: def $al killed $al killed $eax |
| ; AVX1-NEXT: vzeroupper |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v32i8_v32i1: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX2-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm0, %eax |
| ; AVX2-NEXT: # kill: def $al killed $al killed $eax |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v32i8_v32i1: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0 |
| ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovd %xmm0, %eax |
| ; AVX512-NEXT: # kill: def $al killed $al killed $eax |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = icmp slt <32 x i8> %a0, zeroinitializer |
| %2 = sext <32 x i1> %1 to <32 x i8> |
| %3 = call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %2) |
| ret i8 %3 |
| } |
| |
| define i8 @test_v64i8_v64i1(<64 x i8> %a0) { |
| ; SSE2-LABEL: test_v64i8_v64i1: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pxor %xmm4, %xmm4 |
| ; SSE2-NEXT: pxor %xmm5, %xmm5 |
| ; SSE2-NEXT: pcmpgtb %xmm2, %xmm5 |
| ; SSE2-NEXT: pxor %xmm2, %xmm2 |
| ; SSE2-NEXT: pcmpgtb %xmm0, %xmm2 |
| ; SSE2-NEXT: paddb %xmm5, %xmm2 |
| ; SSE2-NEXT: pxor %xmm0, %xmm0 |
| ; SSE2-NEXT: pcmpgtb %xmm3, %xmm0 |
| ; SSE2-NEXT: pxor %xmm3, %xmm3 |
| ; SSE2-NEXT: pcmpgtb %xmm1, %xmm3 |
| ; SSE2-NEXT: paddb %xmm0, %xmm3 |
| ; SSE2-NEXT: paddb %xmm2, %xmm3 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3] |
| ; SSE2-NEXT: paddb %xmm3, %xmm0 |
| ; SSE2-NEXT: psadbw %xmm4, %xmm0 |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: # kill: def $al killed $al killed $eax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v64i8_v64i1: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pxor %xmm4, %xmm4 |
| ; SSE41-NEXT: pxor %xmm5, %xmm5 |
| ; SSE41-NEXT: pcmpgtb %xmm2, %xmm5 |
| ; SSE41-NEXT: pxor %xmm2, %xmm2 |
| ; SSE41-NEXT: pcmpgtb %xmm0, %xmm2 |
| ; SSE41-NEXT: paddb %xmm5, %xmm2 |
| ; SSE41-NEXT: pxor %xmm0, %xmm0 |
| ; SSE41-NEXT: pcmpgtb %xmm3, %xmm0 |
| ; SSE41-NEXT: pxor %xmm3, %xmm3 |
| ; SSE41-NEXT: pcmpgtb %xmm1, %xmm3 |
| ; SSE41-NEXT: paddb %xmm0, %xmm3 |
| ; SSE41-NEXT: paddb %xmm2, %xmm3 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3] |
| ; SSE41-NEXT: paddb %xmm3, %xmm0 |
| ; SSE41-NEXT: psadbw %xmm4, %xmm0 |
| ; SSE41-NEXT: movd %xmm0, %eax |
| ; SSE41-NEXT: # kill: def $al killed $al killed $eax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-LABEL: test_v64i8_v64i1: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm3 |
| ; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm4 |
| ; AVX1-NEXT: vpaddb %xmm3, %xmm4, %xmm3 |
| ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 |
| ; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1 |
| ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 |
| ; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0 |
| ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpaddb %xmm0, %xmm3, %xmm0 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpsadbw %xmm2, %xmm0, %xmm0 |
| ; AVX1-NEXT: vmovd %xmm0, %eax |
| ; AVX1-NEXT: # kill: def $al killed $al killed $eax |
| ; AVX1-NEXT: vzeroupper |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v64i8_v64i1: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-NEXT: vpcmpgtb %ymm1, %ymm2, %ymm1 |
| ; AVX2-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm0 |
| ; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm0, %eax |
| ; AVX2-NEXT: # kill: def $al killed $al killed $eax |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v64i8_v64i1: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpmovb2m %zmm0, %k0 |
| ; AVX512-NEXT: vpmovm2b %k0, %zmm0 |
| ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512-NEXT: vpaddb %ymm1, %ymm0, %ymm0 |
| ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovd %xmm0, %eax |
| ; AVX512-NEXT: # kill: def $al killed $al killed $eax |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = icmp slt <64 x i8> %a0, zeroinitializer |
| %2 = sext <64 x i1> %1 to <64 x i8> |
| %3 = call i8 @llvm.vector.reduce.add.v64i8(<64 x i8> %2) |
| ret i8 %3 |
| } |
| |
| define i8 @test_v128i8_v128i1(<128 x i8> %a0) { |
| ; SSE2-LABEL: test_v128i8_v128i1: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pxor %xmm8, %xmm8 |
| ; SSE2-NEXT: pxor %xmm9, %xmm9 |
| ; SSE2-NEXT: pcmpgtb %xmm4, %xmm9 |
| ; SSE2-NEXT: pxor %xmm4, %xmm4 |
| ; SSE2-NEXT: pcmpgtb %xmm0, %xmm4 |
| ; SSE2-NEXT: paddb %xmm9, %xmm4 |
| ; SSE2-NEXT: pxor %xmm0, %xmm0 |
| ; SSE2-NEXT: pcmpgtb %xmm6, %xmm0 |
| ; SSE2-NEXT: pxor %xmm6, %xmm6 |
| ; SSE2-NEXT: pcmpgtb %xmm2, %xmm6 |
| ; SSE2-NEXT: paddb %xmm0, %xmm6 |
| ; SSE2-NEXT: paddb %xmm4, %xmm6 |
| ; SSE2-NEXT: pxor %xmm0, %xmm0 |
| ; SSE2-NEXT: pcmpgtb %xmm5, %xmm0 |
| ; SSE2-NEXT: pxor %xmm2, %xmm2 |
| ; SSE2-NEXT: pcmpgtb %xmm1, %xmm2 |
| ; SSE2-NEXT: paddb %xmm0, %xmm2 |
| ; SSE2-NEXT: pxor %xmm0, %xmm0 |
| ; SSE2-NEXT: pcmpgtb %xmm7, %xmm0 |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: pcmpgtb %xmm3, %xmm1 |
| ; SSE2-NEXT: paddb %xmm0, %xmm1 |
| ; SSE2-NEXT: paddb %xmm2, %xmm1 |
| ; SSE2-NEXT: paddb %xmm6, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3] |
| ; SSE2-NEXT: paddb %xmm1, %xmm0 |
| ; SSE2-NEXT: psadbw %xmm8, %xmm0 |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: # kill: def $al killed $al killed $eax |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE41-LABEL: test_v128i8_v128i1: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pxor %xmm8, %xmm8 |
| ; SSE41-NEXT: pxor %xmm9, %xmm9 |
| ; SSE41-NEXT: pcmpgtb %xmm4, %xmm9 |
| ; SSE41-NEXT: pxor %xmm4, %xmm4 |
| ; SSE41-NEXT: pcmpgtb %xmm0, %xmm4 |
| ; SSE41-NEXT: paddb %xmm9, %xmm4 |
| ; SSE41-NEXT: pxor %xmm0, %xmm0 |
| ; SSE41-NEXT: pcmpgtb %xmm6, %xmm0 |
| ; SSE41-NEXT: pxor %xmm6, %xmm6 |
| ; SSE41-NEXT: pcmpgtb %xmm2, %xmm6 |
| ; SSE41-NEXT: paddb %xmm0, %xmm6 |
| ; SSE41-NEXT: paddb %xmm4, %xmm6 |
| ; SSE41-NEXT: pxor %xmm0, %xmm0 |
| ; SSE41-NEXT: pcmpgtb %xmm5, %xmm0 |
| ; SSE41-NEXT: pxor %xmm2, %xmm2 |
| ; SSE41-NEXT: pcmpgtb %xmm1, %xmm2 |
| ; SSE41-NEXT: paddb %xmm0, %xmm2 |
| ; SSE41-NEXT: pxor %xmm0, %xmm0 |
| ; SSE41-NEXT: pcmpgtb %xmm7, %xmm0 |
| ; SSE41-NEXT: pxor %xmm1, %xmm1 |
| ; SSE41-NEXT: pcmpgtb %xmm3, %xmm1 |
| ; SSE41-NEXT: paddb %xmm0, %xmm1 |
| ; SSE41-NEXT: paddb %xmm2, %xmm1 |
| ; SSE41-NEXT: paddb %xmm6, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3] |
| ; SSE41-NEXT: paddb %xmm1, %xmm0 |
| ; SSE41-NEXT: psadbw %xmm8, %xmm0 |
| ; SSE41-NEXT: movd %xmm0, %eax |
| ; SSE41-NEXT: # kill: def $al killed $al killed $eax |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-LABEL: test_v128i8_v128i1: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4 |
| ; AVX1-NEXT: vpcmpgtb %xmm2, %xmm4, %xmm5 |
| ; AVX1-NEXT: vpcmpgtb %xmm0, %xmm4, %xmm6 |
| ; AVX1-NEXT: vpaddb %xmm5, %xmm6, %xmm5 |
| ; AVX1-NEXT: vpcmpgtb %xmm3, %xmm4, %xmm6 |
| ; AVX1-NEXT: vpcmpgtb %xmm1, %xmm4, %xmm7 |
| ; AVX1-NEXT: vpaddb %xmm6, %xmm7, %xmm6 |
| ; AVX1-NEXT: vpaddb %xmm6, %xmm5, %xmm5 |
| ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2 |
| ; AVX1-NEXT: vpcmpgtb %xmm2, %xmm4, %xmm2 |
| ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 |
| ; AVX1-NEXT: vpcmpgtb %xmm0, %xmm4, %xmm0 |
| ; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0 |
| ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2 |
| ; AVX1-NEXT: vpcmpgtb %xmm2, %xmm4, %xmm2 |
| ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 |
| ; AVX1-NEXT: vpcmpgtb %xmm1, %xmm4, %xmm1 |
| ; AVX1-NEXT: vpaddb %xmm2, %xmm1, %xmm1 |
| ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpaddb %xmm0, %xmm5, %xmm0 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpsadbw %xmm4, %xmm0, %xmm0 |
| ; AVX1-NEXT: vmovd %xmm0, %eax |
| ; AVX1-NEXT: # kill: def $al killed $al killed $eax |
| ; AVX1-NEXT: vzeroupper |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: test_v128i8_v128i1: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4 |
| ; AVX2-NEXT: vpcmpgtb %ymm2, %ymm4, %ymm2 |
| ; AVX2-NEXT: vpcmpgtb %ymm0, %ymm4, %ymm0 |
| ; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0 |
| ; AVX2-NEXT: vpcmpgtb %ymm3, %ymm4, %ymm2 |
| ; AVX2-NEXT: vpcmpgtb %ymm1, %ymm4, %ymm1 |
| ; AVX2-NEXT: vpaddb %ymm2, %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm0, %eax |
| ; AVX2-NEXT: # kill: def $al killed $al killed $eax |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v128i8_v128i1: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpmovb2m %zmm0, %k0 |
| ; AVX512-NEXT: vpmovb2m %zmm1, %k1 |
| ; AVX512-NEXT: vpmovm2b %k1, %zmm0 |
| ; AVX512-NEXT: vpmovm2b %k0, %zmm1 |
| ; AVX512-NEXT: vpaddb %zmm0, %zmm1, %zmm0 |
| ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512-NEXT: vpaddb %ymm1, %ymm0, %ymm0 |
| ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovd %xmm0, %eax |
| ; AVX512-NEXT: # kill: def $al killed $al killed $eax |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %1 = icmp slt <128 x i8> %a0, zeroinitializer |
| %2 = sext <128 x i1> %1 to <128 x i8> |
| %3 = call i8 @llvm.vector.reduce.add.v128i8(<128 x i8> %2) |
| ret i8 %3 |
| } |
| |
| declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) |
| declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>) |
| declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64>) |
| declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>) |
| |
| declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>) |
| declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) |
| declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>) |
| declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>) |
| declare i32 @llvm.vector.reduce.add.v32i32(<32 x i32>) |
| |
| declare i16 @llvm.vector.reduce.add.v2i16(<2 x i16>) |
| declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>) |
| declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>) |
| declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>) |
| declare i16 @llvm.vector.reduce.add.v32i16(<32 x i16>) |
| declare i16 @llvm.vector.reduce.add.v64i16(<64 x i16>) |
| |
| declare i8 @llvm.vector.reduce.add.v2i8(<2 x i8>) |
| declare i8 @llvm.vector.reduce.add.v4i8(<4 x i8>) |
| declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>) |
| declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>) |
| declare i8 @llvm.vector.reduce.add.v32i8(<32 x i8>) |
| declare i8 @llvm.vector.reduce.add.v64i8(<64 x i8>) |
| declare i8 @llvm.vector.reduce.add.v128i8(<128 x i8>) |