| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512 |
| |
| declare {<1 x i32>, <1 x i1>} @llvm.usub.with.overflow.v1i32(<1 x i32>, <1 x i32>) |
| declare {<2 x i32>, <2 x i1>} @llvm.usub.with.overflow.v2i32(<2 x i32>, <2 x i32>) |
| declare {<3 x i32>, <3 x i1>} @llvm.usub.with.overflow.v3i32(<3 x i32>, <3 x i32>) |
| declare {<4 x i32>, <4 x i1>} @llvm.usub.with.overflow.v4i32(<4 x i32>, <4 x i32>) |
| declare {<6 x i32>, <6 x i1>} @llvm.usub.with.overflow.v6i32(<6 x i32>, <6 x i32>) |
| declare {<8 x i32>, <8 x i1>} @llvm.usub.with.overflow.v8i32(<8 x i32>, <8 x i32>) |
| declare {<16 x i32>, <16 x i1>} @llvm.usub.with.overflow.v16i32(<16 x i32>, <16 x i32>) |
| |
| declare {<16 x i8>, <16 x i1>} @llvm.usub.with.overflow.v16i8(<16 x i8>, <16 x i8>) |
| declare {<8 x i16>, <8 x i1>} @llvm.usub.with.overflow.v8i16(<8 x i16>, <8 x i16>) |
| declare {<2 x i64>, <2 x i1>} @llvm.usub.with.overflow.v2i64(<2 x i64>, <2 x i64>) |
| |
| declare {<4 x i24>, <4 x i1>} @llvm.usub.with.overflow.v4i24(<4 x i24>, <4 x i24>) |
| declare {<4 x i1>, <4 x i1>} @llvm.usub.with.overflow.v4i1(<4 x i1>, <4 x i1>) |
| declare {<2 x i128>, <2 x i1>} @llvm.usub.with.overflow.v2i128(<2 x i128>, <2 x i128>) |
| |
| define <1 x i32> @usubo_v1i32(<1 x i32> %a0, <1 x i32> %a1, <1 x i32>* %p2) nounwind { |
| ; SSE-LABEL: usubo_v1i32: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: subl %esi, %edi |
| ; SSE-NEXT: sbbl %eax, %eax |
| ; SSE-NEXT: movl %edi, (%rdx) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX-LABEL: usubo_v1i32: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: subl %esi, %edi |
| ; AVX-NEXT: sbbl %eax, %eax |
| ; AVX-NEXT: movl %edi, (%rdx) |
| ; AVX-NEXT: retq |
| %t = call {<1 x i32>, <1 x i1>} @llvm.usub.with.overflow.v1i32(<1 x i32> %a0, <1 x i32> %a1) |
| %val = extractvalue {<1 x i32>, <1 x i1>} %t, 0 |
| %obit = extractvalue {<1 x i32>, <1 x i1>} %t, 1 |
| %res = sext <1 x i1> %obit to <1 x i32> |
| store <1 x i32> %val, <1 x i32>* %p2 |
| ret <1 x i32> %res |
| } |
| |
| define <2 x i32> @usubo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) nounwind { |
| ; SSE2-LABEL: usubo_v2i32: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,0,4294967295,0] |
| ; SSE2-NEXT: pand %xmm2, %xmm1 |
| ; SSE2-NEXT: pand %xmm2, %xmm0 |
| ; SSE2-NEXT: psubq %xmm1, %xmm0 |
| ; SSE2-NEXT: pand %xmm0, %xmm2 |
| ; SSE2-NEXT: pcmpeqd %xmm0, %xmm2 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,0,3,2] |
| ; SSE2-NEXT: pand %xmm2, %xmm3 |
| ; SSE2-NEXT: pcmpeqd %xmm1, %xmm1 |
| ; SSE2-NEXT: pxor %xmm3, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE2-NEXT: movq %xmm0, (%rdi) |
| ; SSE2-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; SSSE3-LABEL: usubo_v2i32: |
| ; SSSE3: # %bb.0: |
| ; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,0,4294967295,0] |
| ; SSSE3-NEXT: pand %xmm2, %xmm1 |
| ; SSSE3-NEXT: pand %xmm2, %xmm0 |
| ; SSSE3-NEXT: psubq %xmm1, %xmm0 |
| ; SSSE3-NEXT: pand %xmm0, %xmm2 |
| ; SSSE3-NEXT: pcmpeqd %xmm0, %xmm2 |
| ; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,0,3,2] |
| ; SSSE3-NEXT: pand %xmm2, %xmm3 |
| ; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1 |
| ; SSSE3-NEXT: pxor %xmm3, %xmm1 |
| ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSSE3-NEXT: movq %xmm0, (%rdi) |
| ; SSSE3-NEXT: movdqa %xmm1, %xmm0 |
| ; SSSE3-NEXT: retq |
| ; |
| ; SSE41-LABEL: usubo_v2i32: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: pxor %xmm2, %xmm2 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] |
| ; SSE41-NEXT: psubq %xmm1, %xmm0 |
| ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] |
| ; SSE41-NEXT: pcmpeqq %xmm0, %xmm2 |
| ; SSE41-NEXT: pcmpeqd %xmm1, %xmm1 |
| ; SSE41-NEXT: pxor %xmm2, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE41-NEXT: movq %xmm0, (%rdi) |
| ; SSE41-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-LABEL: usubo_v2i32: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] |
| ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] |
| ; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm1 |
| ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] |
| ; AVX1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 |
| ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; AVX1-NEXT: vmovq %xmm1, (%rdi) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: usubo_v2i32: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] |
| ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] |
| ; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm1 |
| ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] |
| ; AVX2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 |
| ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; AVX2-NEXT: vmovq %xmm1, (%rdi) |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: usubo_v2i32: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] |
| ; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] |
| ; AVX512-NEXT: vpsubq %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] |
| ; AVX512-NEXT: vpmovqd %xmm0, (%rdi) |
| ; AVX512-NEXT: vpcmpeqq %xmm0, %xmm1, %xmm0 |
| ; AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0 |
| ; AVX512-NEXT: retq |
| %t = call {<2 x i32>, <2 x i1>} @llvm.usub.with.overflow.v2i32(<2 x i32> %a0, <2 x i32> %a1) |
| %val = extractvalue {<2 x i32>, <2 x i1>} %t, 0 |
| %obit = extractvalue {<2 x i32>, <2 x i1>} %t, 1 |
| %res = sext <2 x i1> %obit to <2 x i32> |
| store <2 x i32> %val, <2 x i32>* %p2 |
| ret <2 x i32> %res |
| } |
| |
| define <3 x i32> @usubo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) nounwind { |
| ; SSE2-LABEL: usubo_v3i32: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE2-NEXT: pxor %xmm2, %xmm3 |
| ; SSE2-NEXT: psubd %xmm1, %xmm0 |
| ; SSE2-NEXT: pxor %xmm0, %xmm2 |
| ; SSE2-NEXT: pcmpgtd %xmm3, %xmm2 |
| ; SSE2-NEXT: movq %xmm0, (%rdi) |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] |
| ; SSE2-NEXT: movd %xmm0, 8(%rdi) |
| ; SSE2-NEXT: movdqa %xmm2, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; SSSE3-LABEL: usubo_v3i32: |
| ; SSSE3: # %bb.0: |
| ; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] |
| ; SSSE3-NEXT: movdqa %xmm0, %xmm3 |
| ; SSSE3-NEXT: pxor %xmm2, %xmm3 |
| ; SSSE3-NEXT: psubd %xmm1, %xmm0 |
| ; SSSE3-NEXT: pxor %xmm0, %xmm2 |
| ; SSSE3-NEXT: pcmpgtd %xmm3, %xmm2 |
| ; SSSE3-NEXT: movq %xmm0, (%rdi) |
| ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] |
| ; SSSE3-NEXT: movd %xmm0, 8(%rdi) |
| ; SSSE3-NEXT: movdqa %xmm2, %xmm0 |
| ; SSSE3-NEXT: retq |
| ; |
| ; SSE41-LABEL: usubo_v3i32: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE41-NEXT: psubd %xmm1, %xmm2 |
| ; SSE41-NEXT: pminud %xmm2, %xmm0 |
| ; SSE41-NEXT: pcmpeqd %xmm2, %xmm0 |
| ; SSE41-NEXT: pcmpeqd %xmm1, %xmm1 |
| ; SSE41-NEXT: pxor %xmm1, %xmm0 |
| ; SSE41-NEXT: pextrd $2, %xmm2, 8(%rdi) |
| ; SSE41-NEXT: movq %xmm2, (%rdi) |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-LABEL: usubo_v3i32: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm1 |
| ; AVX1-NEXT: vpminud %xmm0, %xmm1, %xmm0 |
| ; AVX1-NEXT: vpcmpeqd %xmm0, %xmm1, %xmm0 |
| ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 |
| ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpextrd $2, %xmm1, 8(%rdi) |
| ; AVX1-NEXT: vmovq %xmm1, (%rdi) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: usubo_v3i32: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm1 |
| ; AVX2-NEXT: vpminud %xmm0, %xmm1, %xmm0 |
| ; AVX2-NEXT: vpcmpeqd %xmm0, %xmm1, %xmm0 |
| ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 |
| ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpextrd $2, %xmm1, 8(%rdi) |
| ; AVX2-NEXT: vmovq %xmm1, (%rdi) |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: usubo_v3i32: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpsubd %xmm1, %xmm0, %xmm1 |
| ; AVX512-NEXT: vpcmpnleud %xmm0, %xmm1, %k1 |
| ; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} |
| ; AVX512-NEXT: vpextrd $2, %xmm1, 8(%rdi) |
| ; AVX512-NEXT: vmovq %xmm1, (%rdi) |
| ; AVX512-NEXT: retq |
| %t = call {<3 x i32>, <3 x i1>} @llvm.usub.with.overflow.v3i32(<3 x i32> %a0, <3 x i32> %a1) |
| %val = extractvalue {<3 x i32>, <3 x i1>} %t, 0 |
| %obit = extractvalue {<3 x i32>, <3 x i1>} %t, 1 |
| %res = sext <3 x i1> %obit to <3 x i32> |
| store <3 x i32> %val, <3 x i32>* %p2 |
| ret <3 x i32> %res |
| } |
| |
| define <4 x i32> @usubo_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>* %p2) nounwind { |
| ; SSE2-LABEL: usubo_v4i32: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE2-NEXT: pxor %xmm2, %xmm3 |
| ; SSE2-NEXT: psubd %xmm1, %xmm0 |
| ; SSE2-NEXT: pxor %xmm0, %xmm2 |
| ; SSE2-NEXT: pcmpgtd %xmm3, %xmm2 |
| ; SSE2-NEXT: movdqa %xmm0, (%rdi) |
| ; SSE2-NEXT: movdqa %xmm2, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; SSSE3-LABEL: usubo_v4i32: |
| ; SSSE3: # %bb.0: |
| ; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] |
| ; SSSE3-NEXT: movdqa %xmm0, %xmm3 |
| ; SSSE3-NEXT: pxor %xmm2, %xmm3 |
| ; SSSE3-NEXT: psubd %xmm1, %xmm0 |
| ; SSSE3-NEXT: pxor %xmm0, %xmm2 |
| ; SSSE3-NEXT: pcmpgtd %xmm3, %xmm2 |
| ; SSSE3-NEXT: movdqa %xmm0, (%rdi) |
| ; SSSE3-NEXT: movdqa %xmm2, %xmm0 |
| ; SSSE3-NEXT: retq |
| ; |
| ; SSE41-LABEL: usubo_v4i32: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE41-NEXT: psubd %xmm1, %xmm2 |
| ; SSE41-NEXT: pminud %xmm2, %xmm0 |
| ; SSE41-NEXT: pcmpeqd %xmm2, %xmm0 |
| ; SSE41-NEXT: pcmpeqd %xmm1, %xmm1 |
| ; SSE41-NEXT: pxor %xmm1, %xmm0 |
| ; SSE41-NEXT: movdqa %xmm2, (%rdi) |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-LABEL: usubo_v4i32: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm1 |
| ; AVX1-NEXT: vpminud %xmm0, %xmm1, %xmm0 |
| ; AVX1-NEXT: vpcmpeqd %xmm0, %xmm1, %xmm0 |
| ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 |
| ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 |
| ; AVX1-NEXT: vmovdqa %xmm1, (%rdi) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: usubo_v4i32: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm1 |
| ; AVX2-NEXT: vpminud %xmm0, %xmm1, %xmm0 |
| ; AVX2-NEXT: vpcmpeqd %xmm0, %xmm1, %xmm0 |
| ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 |
| ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovdqa %xmm1, (%rdi) |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: usubo_v4i32: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpsubd %xmm1, %xmm0, %xmm1 |
| ; AVX512-NEXT: vpcmpnleud %xmm0, %xmm1, %k1 |
| ; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} |
| ; AVX512-NEXT: vmovdqa %xmm1, (%rdi) |
| ; AVX512-NEXT: retq |
| %t = call {<4 x i32>, <4 x i1>} @llvm.usub.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> %a1) |
| %val = extractvalue {<4 x i32>, <4 x i1>} %t, 0 |
| %obit = extractvalue {<4 x i32>, <4 x i1>} %t, 1 |
| %res = sext <4 x i1> %obit to <4 x i32> |
| store <4 x i32> %val, <4 x i32>* %p2 |
| ret <4 x i32> %res |
| } |
| |
| define <6 x i32> @usubo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) nounwind { |
| ; SSE2-LABEL: usubo_v6i32: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movq %rdi, %rax |
| ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] |
| ; SSE2-NEXT: movd %r8d, %xmm0 |
| ; SSE2-NEXT: movd %ecx, %xmm1 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| ; SSE2-NEXT: movd %edx, %xmm3 |
| ; SSE2-NEXT: movd %esi, %xmm0 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
| ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] |
| ; SSE2-NEXT: movd %r9d, %xmm1 |
| ; SSE2-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSE2-NEXT: movdqa %xmm0, %xmm4 |
| ; SSE2-NEXT: psubd %xmm2, %xmm4 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] |
| ; SSE2-NEXT: movdqa %xmm4, (%rcx) |
| ; SSE2-NEXT: pxor %xmm2, %xmm4 |
| ; SSE2-NEXT: pxor %xmm2, %xmm0 |
| ; SSE2-NEXT: pcmpgtd %xmm0, %xmm4 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE2-NEXT: psubd %xmm3, %xmm0 |
| ; SSE2-NEXT: movq %xmm0, 16(%rcx) |
| ; SSE2-NEXT: pxor %xmm2, %xmm0 |
| ; SSE2-NEXT: pxor %xmm2, %xmm1 |
| ; SSE2-NEXT: pcmpgtd %xmm1, %xmm0 |
| ; SSE2-NEXT: movq %xmm0, 16(%rdi) |
| ; SSE2-NEXT: movdqa %xmm4, (%rdi) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSSE3-LABEL: usubo_v6i32: |
| ; SSSE3: # %bb.0: |
| ; SSSE3-NEXT: movq %rdi, %rax |
| ; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero |
| ; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero |
| ; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| ; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero |
| ; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero |
| ; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] |
| ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] |
| ; SSSE3-NEXT: movd %r8d, %xmm0 |
| ; SSSE3-NEXT: movd %ecx, %xmm1 |
| ; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| ; SSSE3-NEXT: movd %edx, %xmm3 |
| ; SSSE3-NEXT: movd %esi, %xmm0 |
| ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] |
| ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
| ; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero |
| ; SSSE3-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero |
| ; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] |
| ; SSSE3-NEXT: movd %r9d, %xmm1 |
| ; SSSE3-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero |
| ; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] |
| ; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSSE3-NEXT: movdqa %xmm0, %xmm4 |
| ; SSSE3-NEXT: psubd %xmm2, %xmm4 |
| ; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] |
| ; SSSE3-NEXT: movdqa %xmm4, (%rcx) |
| ; SSSE3-NEXT: pxor %xmm2, %xmm4 |
| ; SSSE3-NEXT: pxor %xmm2, %xmm0 |
| ; SSSE3-NEXT: pcmpgtd %xmm0, %xmm4 |
| ; SSSE3-NEXT: movdqa %xmm1, %xmm0 |
| ; SSSE3-NEXT: psubd %xmm3, %xmm0 |
| ; SSSE3-NEXT: movq %xmm0, 16(%rcx) |
| ; SSSE3-NEXT: pxor %xmm2, %xmm0 |
| ; SSSE3-NEXT: pxor %xmm2, %xmm1 |
| ; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 |
| ; SSSE3-NEXT: movq %xmm0, 16(%rdi) |
| ; SSSE3-NEXT: movdqa %xmm4, (%rdi) |
| ; SSSE3-NEXT: retq |
| ; |
| ; SSE41-LABEL: usubo_v6i32: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: movq %rdi, %rax |
| ; SSE41-NEXT: movd %esi, %xmm0 |
| ; SSE41-NEXT: pinsrd $1, %edx, %xmm0 |
| ; SSE41-NEXT: pinsrd $2, %ecx, %xmm0 |
| ; SSE41-NEXT: pinsrd $3, %r8d, %xmm0 |
| ; SSE41-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero |
| ; SSE41-NEXT: pinsrd $1, {{[0-9]+}}(%rsp), %xmm1 |
| ; SSE41-NEXT: movd %r9d, %xmm2 |
| ; SSE41-NEXT: pinsrd $1, {{[0-9]+}}(%rsp), %xmm2 |
| ; SSE41-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero |
| ; SSE41-NEXT: pinsrd $1, {{[0-9]+}}(%rsp), %xmm3 |
| ; SSE41-NEXT: pinsrd $2, {{[0-9]+}}(%rsp), %xmm3 |
| ; SSE41-NEXT: pinsrd $3, {{[0-9]+}}(%rsp), %xmm3 |
| ; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSE41-NEXT: movdqa %xmm0, %xmm4 |
| ; SSE41-NEXT: psubd %xmm3, %xmm4 |
| ; SSE41-NEXT: pminud %xmm4, %xmm0 |
| ; SSE41-NEXT: pcmpeqd %xmm4, %xmm0 |
| ; SSE41-NEXT: pcmpeqd %xmm3, %xmm3 |
| ; SSE41-NEXT: pxor %xmm3, %xmm0 |
| ; SSE41-NEXT: movdqa %xmm2, %xmm5 |
| ; SSE41-NEXT: psubd %xmm1, %xmm5 |
| ; SSE41-NEXT: pminud %xmm5, %xmm2 |
| ; SSE41-NEXT: pcmpeqd %xmm5, %xmm2 |
| ; SSE41-NEXT: pxor %xmm3, %xmm2 |
| ; SSE41-NEXT: movq %xmm5, 16(%rcx) |
| ; SSE41-NEXT: movdqa %xmm4, (%rcx) |
| ; SSE41-NEXT: movq %xmm2, 16(%rdi) |
| ; SSE41-NEXT: movdqa %xmm0, (%rdi) |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-LABEL: usubo_v6i32: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 |
| ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 |
| ; AVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2 |
| ; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm3 |
| ; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm3 |
| ; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4 |
| ; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3 |
| ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm1 |
| ; AVX1-NEXT: vpminud %xmm0, %xmm1, %xmm0 |
| ; AVX1-NEXT: vpcmpeqd %xmm0, %xmm1, %xmm0 |
| ; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpmovsxwd %xmm0, %xmm3 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] |
| ; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 |
| ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 |
| ; AVX1-NEXT: vmovq %xmm2, 16(%rdi) |
| ; AVX1-NEXT: vmovdqa %xmm1, (%rdi) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: usubo_v6i32: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm1 |
| ; AVX2-NEXT: vpminud %ymm0, %ymm1, %ymm0 |
| ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm1, %ymm0 |
| ; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 |
| ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 |
| ; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 |
| ; AVX2-NEXT: vmovq %xmm2, 16(%rdi) |
| ; AVX2-NEXT: vmovdqa %xmm1, (%rdi) |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: usubo_v6i32: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm1 |
| ; AVX512-NEXT: vpcmpnleud %ymm0, %ymm1, %k1 |
| ; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0 |
| ; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} |
| ; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 |
| ; AVX512-NEXT: vmovq %xmm2, 16(%rdi) |
| ; AVX512-NEXT: vmovdqa %xmm1, (%rdi) |
| ; AVX512-NEXT: retq |
| %t = call {<6 x i32>, <6 x i1>} @llvm.usub.with.overflow.v6i32(<6 x i32> %a0, <6 x i32> %a1) |
| %val = extractvalue {<6 x i32>, <6 x i1>} %t, 0 |
| %obit = extractvalue {<6 x i32>, <6 x i1>} %t, 1 |
| %res = sext <6 x i1> %obit to <6 x i32> |
| store <6 x i32> %val, <6 x i32>* %p2 |
| ret <6 x i32> %res |
| } |
| |
| define <8 x i32> @usubo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) nounwind { |
| ; SSE2-LABEL: usubo_v8i32: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm5 |
| ; SSE2-NEXT: pxor %xmm4, %xmm5 |
| ; SSE2-NEXT: psubd %xmm2, %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, (%rdi) |
| ; SSE2-NEXT: pxor %xmm4, %xmm0 |
| ; SSE2-NEXT: pcmpgtd %xmm5, %xmm0 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm2 |
| ; SSE2-NEXT: pxor %xmm4, %xmm2 |
| ; SSE2-NEXT: psubd %xmm3, %xmm1 |
| ; SSE2-NEXT: pxor %xmm1, %xmm4 |
| ; SSE2-NEXT: pcmpgtd %xmm2, %xmm4 |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rdi) |
| ; SSE2-NEXT: movdqa %xmm4, %xmm1 |
| ; SSE2-NEXT: retq |
| ; |
| ; SSSE3-LABEL: usubo_v8i32: |
| ; SSSE3: # %bb.0: |
| ; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648] |
| ; SSSE3-NEXT: movdqa %xmm0, %xmm5 |
| ; SSSE3-NEXT: pxor %xmm4, %xmm5 |
| ; SSSE3-NEXT: psubd %xmm2, %xmm0 |
| ; SSSE3-NEXT: movdqa %xmm0, (%rdi) |
| ; SSSE3-NEXT: pxor %xmm4, %xmm0 |
| ; SSSE3-NEXT: pcmpgtd %xmm5, %xmm0 |
| ; SSSE3-NEXT: movdqa %xmm1, %xmm2 |
| ; SSSE3-NEXT: pxor %xmm4, %xmm2 |
| ; SSSE3-NEXT: psubd %xmm3, %xmm1 |
| ; SSSE3-NEXT: pxor %xmm1, %xmm4 |
| ; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4 |
| ; SSSE3-NEXT: movdqa %xmm1, 16(%rdi) |
| ; SSSE3-NEXT: movdqa %xmm4, %xmm1 |
| ; SSSE3-NEXT: retq |
| ; |
| ; SSE41-LABEL: usubo_v8i32: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: movdqa %xmm0, %xmm4 |
| ; SSE41-NEXT: psubd %xmm2, %xmm4 |
| ; SSE41-NEXT: pminud %xmm4, %xmm0 |
| ; SSE41-NEXT: pcmpeqd %xmm4, %xmm0 |
| ; SSE41-NEXT: pcmpeqd %xmm2, %xmm2 |
| ; SSE41-NEXT: pxor %xmm2, %xmm0 |
| ; SSE41-NEXT: movdqa %xmm1, %xmm5 |
| ; SSE41-NEXT: psubd %xmm3, %xmm5 |
| ; SSE41-NEXT: pminud %xmm5, %xmm1 |
| ; SSE41-NEXT: pcmpeqd %xmm5, %xmm1 |
| ; SSE41-NEXT: pxor %xmm2, %xmm1 |
| ; SSE41-NEXT: movdqa %xmm5, 16(%rdi) |
| ; SSE41-NEXT: movdqa %xmm4, (%rdi) |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-LABEL: usubo_v8i32: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 |
| ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 |
| ; AVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2 |
| ; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm3 |
| ; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm3 |
| ; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4 |
| ; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3 |
| ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm1 |
| ; AVX1-NEXT: vpminud %xmm0, %xmm1, %xmm0 |
| ; AVX1-NEXT: vpcmpeqd %xmm0, %xmm1, %xmm0 |
| ; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0 |
| ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 |
| ; AVX1-NEXT: vpmovsxwd %xmm0, %xmm2 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] |
| ; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 |
| ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 |
| ; AVX1-NEXT: vmovaps %ymm1, (%rdi) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: usubo_v8i32: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm1 |
| ; AVX2-NEXT: vpminud %ymm0, %ymm1, %ymm0 |
| ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm1, %ymm0 |
| ; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 |
| ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 |
| ; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm1, (%rdi) |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: usubo_v8i32: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm1 |
| ; AVX512-NEXT: vpcmpnleud %ymm0, %ymm1, %k1 |
| ; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0 |
| ; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} |
| ; AVX512-NEXT: vmovdqa %ymm1, (%rdi) |
| ; AVX512-NEXT: retq |
| %t = call {<8 x i32>, <8 x i1>} @llvm.usub.with.overflow.v8i32(<8 x i32> %a0, <8 x i32> %a1) |
| %val = extractvalue {<8 x i32>, <8 x i1>} %t, 0 |
| %obit = extractvalue {<8 x i32>, <8 x i1>} %t, 1 |
| %res = sext <8 x i1> %obit to <8 x i32> |
| store <8 x i32> %val, <8 x i32>* %p2 |
| ret <8 x i32> %res |
| } |
| |
| define <16 x i32> @usubo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2) nounwind { |
| ; SSE2-LABEL: usubo_v16i32: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,2147483648,2147483648,2147483648] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm9 |
| ; SSE2-NEXT: pxor %xmm8, %xmm9 |
| ; SSE2-NEXT: psubd %xmm4, %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, (%rdi) |
| ; SSE2-NEXT: pxor %xmm8, %xmm0 |
| ; SSE2-NEXT: pcmpgtd %xmm9, %xmm0 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm4 |
| ; SSE2-NEXT: pxor %xmm8, %xmm4 |
| ; SSE2-NEXT: psubd %xmm5, %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rdi) |
| ; SSE2-NEXT: pxor %xmm8, %xmm1 |
| ; SSE2-NEXT: pcmpgtd %xmm4, %xmm1 |
| ; SSE2-NEXT: movdqa %xmm2, %xmm4 |
| ; SSE2-NEXT: pxor %xmm8, %xmm4 |
| ; SSE2-NEXT: psubd %xmm6, %xmm2 |
| ; SSE2-NEXT: movdqa %xmm2, 32(%rdi) |
| ; SSE2-NEXT: pxor %xmm8, %xmm2 |
| ; SSE2-NEXT: pcmpgtd %xmm4, %xmm2 |
| ; SSE2-NEXT: movdqa %xmm3, %xmm4 |
| ; SSE2-NEXT: pxor %xmm8, %xmm4 |
| ; SSE2-NEXT: psubd %xmm7, %xmm3 |
| ; SSE2-NEXT: pxor %xmm3, %xmm8 |
| ; SSE2-NEXT: pcmpgtd %xmm4, %xmm8 |
| ; SSE2-NEXT: movdqa %xmm3, 48(%rdi) |
| ; SSE2-NEXT: movdqa %xmm8, %xmm3 |
| ; SSE2-NEXT: retq |
| ; |
| ; SSSE3-LABEL: usubo_v16i32: |
| ; SSSE3: # %bb.0: |
| ; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,2147483648,2147483648,2147483648] |
| ; SSSE3-NEXT: movdqa %xmm0, %xmm9 |
| ; SSSE3-NEXT: pxor %xmm8, %xmm9 |
| ; SSSE3-NEXT: psubd %xmm4, %xmm0 |
| ; SSSE3-NEXT: movdqa %xmm0, (%rdi) |
| ; SSSE3-NEXT: pxor %xmm8, %xmm0 |
| ; SSSE3-NEXT: pcmpgtd %xmm9, %xmm0 |
| ; SSSE3-NEXT: movdqa %xmm1, %xmm4 |
| ; SSSE3-NEXT: pxor %xmm8, %xmm4 |
| ; SSSE3-NEXT: psubd %xmm5, %xmm1 |
| ; SSSE3-NEXT: movdqa %xmm1, 16(%rdi) |
| ; SSSE3-NEXT: pxor %xmm8, %xmm1 |
| ; SSSE3-NEXT: pcmpgtd %xmm4, %xmm1 |
| ; SSSE3-NEXT: movdqa %xmm2, %xmm4 |
| ; SSSE3-NEXT: pxor %xmm8, %xmm4 |
| ; SSSE3-NEXT: psubd %xmm6, %xmm2 |
| ; SSSE3-NEXT: movdqa %xmm2, 32(%rdi) |
| ; SSSE3-NEXT: pxor %xmm8, %xmm2 |
| ; SSSE3-NEXT: pcmpgtd %xmm4, %xmm2 |
| ; SSSE3-NEXT: movdqa %xmm3, %xmm4 |
| ; SSSE3-NEXT: pxor %xmm8, %xmm4 |
| ; SSSE3-NEXT: psubd %xmm7, %xmm3 |
| ; SSSE3-NEXT: pxor %xmm3, %xmm8 |
| ; SSSE3-NEXT: pcmpgtd %xmm4, %xmm8 |
| ; SSSE3-NEXT: movdqa %xmm3, 48(%rdi) |
| ; SSSE3-NEXT: movdqa %xmm8, %xmm3 |
| ; SSSE3-NEXT: retq |
| ; |
| ; SSE41-LABEL: usubo_v16i32: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: movdqa %xmm0, %xmm8 |
| ; SSE41-NEXT: psubd %xmm4, %xmm8 |
| ; SSE41-NEXT: pminud %xmm8, %xmm0 |
| ; SSE41-NEXT: pcmpeqd %xmm8, %xmm0 |
| ; SSE41-NEXT: pcmpeqd %xmm9, %xmm9 |
| ; SSE41-NEXT: pxor %xmm9, %xmm0 |
| ; SSE41-NEXT: movdqa %xmm1, %xmm4 |
| ; SSE41-NEXT: psubd %xmm5, %xmm4 |
| ; SSE41-NEXT: pminud %xmm4, %xmm1 |
| ; SSE41-NEXT: pcmpeqd %xmm4, %xmm1 |
| ; SSE41-NEXT: pxor %xmm9, %xmm1 |
| ; SSE41-NEXT: movdqa %xmm2, %xmm5 |
| ; SSE41-NEXT: psubd %xmm6, %xmm5 |
| ; SSE41-NEXT: pminud %xmm5, %xmm2 |
| ; SSE41-NEXT: pcmpeqd %xmm5, %xmm2 |
| ; SSE41-NEXT: pxor %xmm9, %xmm2 |
| ; SSE41-NEXT: movdqa %xmm3, %xmm6 |
| ; SSE41-NEXT: psubd %xmm7, %xmm6 |
| ; SSE41-NEXT: pminud %xmm6, %xmm3 |
| ; SSE41-NEXT: pcmpeqd %xmm6, %xmm3 |
| ; SSE41-NEXT: pxor %xmm9, %xmm3 |
| ; SSE41-NEXT: movdqa %xmm6, 48(%rdi) |
| ; SSE41-NEXT: movdqa %xmm5, 32(%rdi) |
| ; SSE41-NEXT: movdqa %xmm4, 16(%rdi) |
| ; SSE41-NEXT: movdqa %xmm8, (%rdi) |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-LABEL: usubo_v16i32: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 |
| ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5 |
| ; AVX1-NEXT: vpsubd %xmm4, %xmm5, %xmm4 |
| ; AVX1-NEXT: vpminud %xmm5, %xmm4, %xmm5 |
| ; AVX1-NEXT: vpcmpeqd %xmm5, %xmm4, %xmm5 |
| ; AVX1-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6 |
| ; AVX1-NEXT: vpxor %xmm6, %xmm5, %xmm5 |
| ; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm3 |
| ; AVX1-NEXT: vpminud %xmm1, %xmm3, %xmm1 |
| ; AVX1-NEXT: vpcmpeqd %xmm1, %xmm3, %xmm1 |
| ; AVX1-NEXT: vpxor %xmm6, %xmm1, %xmm1 |
| ; AVX1-NEXT: vpackssdw %xmm5, %xmm1, %xmm1 |
| ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5 |
| ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7 |
| ; AVX1-NEXT: vpsubd %xmm5, %xmm7, %xmm5 |
| ; AVX1-NEXT: vpminud %xmm7, %xmm5, %xmm7 |
| ; AVX1-NEXT: vpcmpeqd %xmm7, %xmm5, %xmm7 |
| ; AVX1-NEXT: vpxor %xmm6, %xmm7, %xmm7 |
| ; AVX1-NEXT: vpsubd %xmm2, %xmm0, %xmm2 |
| ; AVX1-NEXT: vpminud %xmm0, %xmm2, %xmm0 |
| ; AVX1-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm0 |
| ; AVX1-NEXT: vpxor %xmm6, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpackssdw %xmm7, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm1 |
| ; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2 |
| ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3 |
| ; AVX1-NEXT: vpmovsxbd %xmm1, %xmm0 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[1,1,2,3] |
| ; AVX1-NEXT: vpmovsxbd %xmm4, %xmm4 |
| ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] |
| ; AVX1-NEXT: vpmovsxbd %xmm4, %xmm4 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1] |
| ; AVX1-NEXT: vpmovsxbd %xmm1, %xmm1 |
| ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm4, %ymm1 |
| ; AVX1-NEXT: vmovaps %ymm3, 32(%rdi) |
| ; AVX1-NEXT: vmovaps %ymm2, (%rdi) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: usubo_v16i32: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpsubd %ymm3, %ymm1, %ymm3 |
| ; AVX2-NEXT: vpminud %ymm1, %ymm3, %ymm1 |
| ; AVX2-NEXT: vpcmpeqd %ymm1, %ymm3, %ymm1 |
| ; AVX2-NEXT: vpcmpeqd %ymm4, %ymm4, %ymm4 |
| ; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm1 |
| ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm5 |
| ; AVX2-NEXT: vpackssdw %xmm5, %xmm1, %xmm1 |
| ; AVX2-NEXT: vpsubd %ymm2, %ymm0, %ymm2 |
| ; AVX2-NEXT: vpminud %ymm0, %ymm2, %ymm0 |
| ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm2, %ymm0 |
| ; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4 |
| ; AVX2-NEXT: vpackssdw %xmm4, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm1 |
| ; AVX2-NEXT: vpmovsxbd %xmm1, %ymm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] |
| ; AVX2-NEXT: vpmovsxbd %xmm1, %ymm1 |
| ; AVX2-NEXT: vmovdqa %ymm3, 32(%rdi) |
| ; AVX2-NEXT: vmovdqa %ymm2, (%rdi) |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: usubo_v16i32: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpsubd %zmm1, %zmm0, %zmm1 |
| ; AVX512-NEXT: vpcmpnleud %zmm0, %zmm1, %k1 |
| ; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} |
| ; AVX512-NEXT: vmovdqa64 %zmm1, (%rdi) |
| ; AVX512-NEXT: retq |
| %t = call {<16 x i32>, <16 x i1>} @llvm.usub.with.overflow.v16i32(<16 x i32> %a0, <16 x i32> %a1) |
| %val = extractvalue {<16 x i32>, <16 x i1>} %t, 0 |
| %obit = extractvalue {<16 x i32>, <16 x i1>} %t, 1 |
| %res = sext <16 x i1> %obit to <16 x i32> |
| store <16 x i32> %val, <16 x i32>* %p2 |
| ret <16 x i32> %res |
| } |
| |
| define <16 x i32> @usubo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nounwind { |
| ; SSE2-LABEL: usubo_v16i8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa %xmm0, %xmm4 |
| ; SSE2-NEXT: psubb %xmm1, %xmm4 |
| ; SSE2-NEXT: pminub %xmm4, %xmm0 |
| ; SSE2-NEXT: pcmpeqb %xmm4, %xmm0 |
| ; SSE2-NEXT: pcmpeqd %xmm3, %xmm3 |
| ; SSE2-NEXT: pxor %xmm0, %xmm3 |
| ; SSE2-NEXT: movdqa %xmm3, %xmm1 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] |
| ; SSE2-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] |
| ; SSE2-NEXT: pslld $31, %xmm0 |
| ; SSE2-NEXT: psrad $31, %xmm0 |
| ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] |
| ; SSE2-NEXT: pslld $31, %xmm1 |
| ; SSE2-NEXT: psrad $31, %xmm1 |
| ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] |
| ; SSE2-NEXT: movdqa %xmm3, %xmm2 |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] |
| ; SSE2-NEXT: pslld $31, %xmm2 |
| ; SSE2-NEXT: psrad $31, %xmm2 |
| ; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] |
| ; SSE2-NEXT: pslld $31, %xmm3 |
| ; SSE2-NEXT: psrad $31, %xmm3 |
| ; SSE2-NEXT: movdqa %xmm4, (%rdi) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSSE3-LABEL: usubo_v16i8: |
| ; SSSE3: # %bb.0: |
| ; SSSE3-NEXT: movdqa %xmm0, %xmm4 |
| ; SSSE3-NEXT: psubb %xmm1, %xmm4 |
| ; SSSE3-NEXT: pminub %xmm4, %xmm0 |
| ; SSSE3-NEXT: pcmpeqb %xmm4, %xmm0 |
| ; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3 |
| ; SSSE3-NEXT: pxor %xmm0, %xmm3 |
| ; SSSE3-NEXT: movdqa %xmm3, %xmm1 |
| ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] |
| ; SSSE3-NEXT: movdqa %xmm1, %xmm0 |
| ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] |
| ; SSSE3-NEXT: pslld $31, %xmm0 |
| ; SSSE3-NEXT: psrad $31, %xmm0 |
| ; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] |
| ; SSSE3-NEXT: pslld $31, %xmm1 |
| ; SSSE3-NEXT: psrad $31, %xmm1 |
| ; SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] |
| ; SSSE3-NEXT: movdqa %xmm3, %xmm2 |
| ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] |
| ; SSSE3-NEXT: pslld $31, %xmm2 |
| ; SSSE3-NEXT: psrad $31, %xmm2 |
| ; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] |
| ; SSSE3-NEXT: pslld $31, %xmm3 |
| ; SSSE3-NEXT: psrad $31, %xmm3 |
| ; SSSE3-NEXT: movdqa %xmm4, (%rdi) |
| ; SSSE3-NEXT: retq |
| ; |
| ; SSE41-LABEL: usubo_v16i8: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: movdqa %xmm0, %xmm4 |
| ; SSE41-NEXT: psubb %xmm1, %xmm4 |
| ; SSE41-NEXT: pminub %xmm4, %xmm0 |
| ; SSE41-NEXT: pcmpeqb %xmm4, %xmm0 |
| ; SSE41-NEXT: pcmpeqd %xmm3, %xmm3 |
| ; SSE41-NEXT: pxor %xmm0, %xmm3 |
| ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero |
| ; SSE41-NEXT: pslld $31, %xmm0 |
| ; SSE41-NEXT: psrad $31, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,2,3] |
| ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero |
| ; SSE41-NEXT: pslld $31, %xmm1 |
| ; SSE41-NEXT: psrad $31, %xmm1 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1] |
| ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero |
| ; SSE41-NEXT: pslld $31, %xmm2 |
| ; SSE41-NEXT: psrad $31, %xmm2 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,2,3] |
| ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero |
| ; SSE41-NEXT: pslld $31, %xmm3 |
| ; SSE41-NEXT: psrad $31, %xmm3 |
| ; SSE41-NEXT: movdqa %xmm4, (%rdi) |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-LABEL: usubo_v16i8: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm2 |
| ; AVX1-NEXT: vpminub %xmm0, %xmm2, %xmm0 |
| ; AVX1-NEXT: vpcmpeqb %xmm0, %xmm2, %xmm0 |
| ; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 |
| ; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm1 |
| ; AVX1-NEXT: vpmovsxbd %xmm1, %xmm0 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,2,3] |
| ; AVX1-NEXT: vpmovsxbd %xmm3, %xmm3 |
| ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] |
| ; AVX1-NEXT: vpmovsxbd %xmm3, %xmm3 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1] |
| ; AVX1-NEXT: vpmovsxbd %xmm1, %xmm1 |
| ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 |
| ; AVX1-NEXT: vmovdqa %xmm2, (%rdi) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: usubo_v16i8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm2 |
| ; AVX2-NEXT: vpminub %xmm0, %xmm2, %xmm0 |
| ; AVX2-NEXT: vpcmpeqb %xmm0, %xmm2, %xmm0 |
| ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 |
| ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm1 |
| ; AVX2-NEXT: vpmovsxbd %xmm1, %ymm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] |
| ; AVX2-NEXT: vpmovsxbd %xmm1, %ymm1 |
| ; AVX2-NEXT: vmovdqa %xmm2, (%rdi) |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: usubo_v16i8: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpsubb %xmm1, %xmm0, %xmm1 |
| ; AVX512-NEXT: vpcmpnleub %xmm0, %xmm1, %k1 |
| ; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} |
| ; AVX512-NEXT: vmovdqa %xmm1, (%rdi) |
| ; AVX512-NEXT: retq |
| %t = call {<16 x i8>, <16 x i1>} @llvm.usub.with.overflow.v16i8(<16 x i8> %a0, <16 x i8> %a1) |
| %val = extractvalue {<16 x i8>, <16 x i1>} %t, 0 |
| %obit = extractvalue {<16 x i8>, <16 x i1>} %t, 1 |
| %res = sext <16 x i1> %obit to <16 x i32> |
| store <16 x i8> %val, <16 x i8>* %p2 |
| ret <16 x i32> %res |
| } |
| |
| define <8 x i32> @usubo_v8i16(<8 x i16> %a0, <8 x i16> %a1, <8 x i16>* %p2) nounwind { |
| ; SSE2-LABEL: usubo_v8i16: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE2-NEXT: pxor %xmm2, %xmm3 |
| ; SSE2-NEXT: psubw %xmm1, %xmm0 |
| ; SSE2-NEXT: pxor %xmm0, %xmm2 |
| ; SSE2-NEXT: pcmpgtw %xmm3, %xmm2 |
| ; SSE2-NEXT: movdqa %xmm2, %xmm1 |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] |
| ; SSE2-NEXT: pslld $31, %xmm1 |
| ; SSE2-NEXT: psrad $31, %xmm1 |
| ; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] |
| ; SSE2-NEXT: pslld $31, %xmm2 |
| ; SSE2-NEXT: psrad $31, %xmm2 |
| ; SSE2-NEXT: movdqa %xmm0, (%rdi) |
| ; SSE2-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE2-NEXT: movdqa %xmm2, %xmm1 |
| ; SSE2-NEXT: retq |
| ; |
| ; SSSE3-LABEL: usubo_v8i16: |
| ; SSSE3: # %bb.0: |
| ; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768] |
| ; SSSE3-NEXT: movdqa %xmm0, %xmm3 |
| ; SSSE3-NEXT: pxor %xmm2, %xmm3 |
| ; SSSE3-NEXT: psubw %xmm1, %xmm0 |
| ; SSSE3-NEXT: pxor %xmm0, %xmm2 |
| ; SSSE3-NEXT: pcmpgtw %xmm3, %xmm2 |
| ; SSSE3-NEXT: movdqa %xmm2, %xmm1 |
| ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] |
| ; SSSE3-NEXT: pslld $31, %xmm1 |
| ; SSSE3-NEXT: psrad $31, %xmm1 |
| ; SSSE3-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] |
| ; SSSE3-NEXT: pslld $31, %xmm2 |
| ; SSSE3-NEXT: psrad $31, %xmm2 |
| ; SSSE3-NEXT: movdqa %xmm0, (%rdi) |
| ; SSSE3-NEXT: movdqa %xmm1, %xmm0 |
| ; SSSE3-NEXT: movdqa %xmm2, %xmm1 |
| ; SSSE3-NEXT: retq |
| ; |
| ; SSE41-LABEL: usubo_v8i16: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE41-NEXT: psubw %xmm1, %xmm2 |
| ; SSE41-NEXT: pminuw %xmm2, %xmm0 |
| ; SSE41-NEXT: pcmpeqw %xmm2, %xmm0 |
| ; SSE41-NEXT: pcmpeqd %xmm1, %xmm1 |
| ; SSE41-NEXT: pxor %xmm0, %xmm1 |
| ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero |
| ; SSE41-NEXT: pslld $31, %xmm0 |
| ; SSE41-NEXT: psrad $31, %xmm0 |
| ; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] |
| ; SSE41-NEXT: pslld $31, %xmm1 |
| ; SSE41-NEXT: psrad $31, %xmm1 |
| ; SSE41-NEXT: movdqa %xmm2, (%rdi) |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-LABEL: usubo_v8i16: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm1 |
| ; AVX1-NEXT: vpminuw %xmm0, %xmm1, %xmm0 |
| ; AVX1-NEXT: vpcmpeqw %xmm0, %xmm1, %xmm0 |
| ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 |
| ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpmovsxwd %xmm0, %xmm2 |
| ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] |
| ; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 |
| ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 |
| ; AVX1-NEXT: vmovdqa %xmm1, (%rdi) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: usubo_v8i16: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpsubw %xmm1, %xmm0, %xmm1 |
| ; AVX2-NEXT: vpminuw %xmm0, %xmm1, %xmm0 |
| ; AVX2-NEXT: vpcmpeqw %xmm0, %xmm1, %xmm0 |
| ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 |
| ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %xmm1, (%rdi) |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: usubo_v8i16: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpsubw %xmm1, %xmm0, %xmm1 |
| ; AVX512-NEXT: vpcmpnleuw %xmm0, %xmm1, %k1 |
| ; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0 |
| ; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} |
| ; AVX512-NEXT: vmovdqa %xmm1, (%rdi) |
| ; AVX512-NEXT: retq |
| %t = call {<8 x i16>, <8 x i1>} @llvm.usub.with.overflow.v8i16(<8 x i16> %a0, <8 x i16> %a1) |
| %val = extractvalue {<8 x i16>, <8 x i1>} %t, 0 |
| %obit = extractvalue {<8 x i16>, <8 x i1>} %t, 1 |
| %res = sext <8 x i1> %obit to <8 x i32> |
| store <8 x i16> %val, <8 x i16>* %p2 |
| ret <8 x i32> %res |
| } |
| |
| define <2 x i32> @usubo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) nounwind { |
| ; SSE-LABEL: usubo_v2i64: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456] |
| ; SSE-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE-NEXT: pxor %xmm2, %xmm3 |
| ; SSE-NEXT: psubq %xmm1, %xmm0 |
| ; SSE-NEXT: pxor %xmm0, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, %xmm1 |
| ; SSE-NEXT: pcmpgtd %xmm3, %xmm1 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2] |
| ; SSE-NEXT: pcmpeqd %xmm3, %xmm2 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] |
| ; SSE-NEXT: pand %xmm4, %xmm2 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] |
| ; SSE-NEXT: por %xmm2, %xmm1 |
| ; SSE-NEXT: movdqa %xmm0, (%rdi) |
| ; SSE-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-LABEL: usubo_v2i64: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] |
| ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3 |
| ; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm1 |
| ; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm0 |
| ; AVX1-NEXT: vpcmpgtq %xmm3, %xmm0, %xmm0 |
| ; AVX1-NEXT: vmovdqa %xmm1, (%rdi) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: usubo_v2i64: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] |
| ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3 |
| ; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm1 |
| ; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm0 |
| ; AVX2-NEXT: vpcmpgtq %xmm3, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovdqa %xmm1, (%rdi) |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: usubo_v2i64: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpsubq %xmm1, %xmm0, %xmm1 |
| ; AVX512-NEXT: vpcmpnleuq %xmm0, %xmm1, %k1 |
| ; AVX512-NEXT: vmovdqa %xmm1, (%rdi) |
| ; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} |
| ; AVX512-NEXT: retq |
| %t = call {<2 x i64>, <2 x i1>} @llvm.usub.with.overflow.v2i64(<2 x i64> %a0, <2 x i64> %a1) |
| %val = extractvalue {<2 x i64>, <2 x i1>} %t, 0 |
| %obit = extractvalue {<2 x i64>, <2 x i1>} %t, 1 |
| %res = sext <2 x i1> %obit to <2 x i32> |
| store <2 x i64> %val, <2 x i64>* %p2 |
| ret <2 x i32> %res |
| } |
| |
| define <4 x i32> @usubo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) nounwind { |
| ; SSE2-LABEL: usubo_v4i24: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0] |
| ; SSE2-NEXT: pand %xmm3, %xmm1 |
| ; SSE2-NEXT: pand %xmm3, %xmm2 |
| ; SSE2-NEXT: psubd %xmm1, %xmm2 |
| ; SSE2-NEXT: pand %xmm2, %xmm3 |
| ; SSE2-NEXT: pcmpeqd %xmm2, %xmm3 |
| ; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 |
| ; SSE2-NEXT: pxor %xmm3, %xmm0 |
| ; SSE2-NEXT: movd %xmm2, %eax |
| ; SSE2-NEXT: movw %ax, (%rdi) |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[3,1,2,3] |
| ; SSE2-NEXT: movd %xmm1, %ecx |
| ; SSE2-NEXT: movw %cx, 9(%rdi) |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1] |
| ; SSE2-NEXT: movd %xmm1, %edx |
| ; SSE2-NEXT: movw %dx, 6(%rdi) |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,2,3] |
| ; SSE2-NEXT: movd %xmm1, %esi |
| ; SSE2-NEXT: movw %si, 3(%rdi) |
| ; SSE2-NEXT: shrl $16, %eax |
| ; SSE2-NEXT: movb %al, 2(%rdi) |
| ; SSE2-NEXT: shrl $16, %ecx |
| ; SSE2-NEXT: movb %cl, 11(%rdi) |
| ; SSE2-NEXT: shrl $16, %edx |
| ; SSE2-NEXT: movb %dl, 8(%rdi) |
| ; SSE2-NEXT: shrl $16, %esi |
| ; SSE2-NEXT: movb %sil, 5(%rdi) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSSE3-LABEL: usubo_v4i24: |
| ; SSSE3: # %bb.0: |
| ; SSSE3-NEXT: movdqa %xmm0, %xmm2 |
| ; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0] |
| ; SSSE3-NEXT: pand %xmm3, %xmm1 |
| ; SSSE3-NEXT: pand %xmm3, %xmm2 |
| ; SSSE3-NEXT: psubd %xmm1, %xmm2 |
| ; SSSE3-NEXT: pand %xmm2, %xmm3 |
| ; SSSE3-NEXT: pcmpeqd %xmm2, %xmm3 |
| ; SSSE3-NEXT: pcmpeqd %xmm0, %xmm0 |
| ; SSSE3-NEXT: pxor %xmm3, %xmm0 |
| ; SSSE3-NEXT: movd %xmm2, %eax |
| ; SSSE3-NEXT: movw %ax, (%rdi) |
| ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[3,1,2,3] |
| ; SSSE3-NEXT: movd %xmm1, %ecx |
| ; SSSE3-NEXT: movw %cx, 9(%rdi) |
| ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1] |
| ; SSSE3-NEXT: movd %xmm1, %edx |
| ; SSSE3-NEXT: movw %dx, 6(%rdi) |
| ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,2,3] |
| ; SSSE3-NEXT: movd %xmm1, %esi |
| ; SSSE3-NEXT: movw %si, 3(%rdi) |
| ; SSSE3-NEXT: shrl $16, %eax |
| ; SSSE3-NEXT: movb %al, 2(%rdi) |
| ; SSSE3-NEXT: shrl $16, %ecx |
| ; SSSE3-NEXT: movb %cl, 11(%rdi) |
| ; SSSE3-NEXT: shrl $16, %edx |
| ; SSSE3-NEXT: movb %dl, 8(%rdi) |
| ; SSSE3-NEXT: shrl $16, %esi |
| ; SSSE3-NEXT: movb %sil, 5(%rdi) |
| ; SSSE3-NEXT: retq |
| ; |
| ; SSE41-LABEL: usubo_v4i24: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0] |
| ; SSE41-NEXT: pand %xmm2, %xmm1 |
| ; SSE41-NEXT: pand %xmm2, %xmm0 |
| ; SSE41-NEXT: psubd %xmm1, %xmm0 |
| ; SSE41-NEXT: pand %xmm0, %xmm2 |
| ; SSE41-NEXT: pcmpeqd %xmm0, %xmm2 |
| ; SSE41-NEXT: pcmpeqd %xmm1, %xmm1 |
| ; SSE41-NEXT: pxor %xmm2, %xmm1 |
| ; SSE41-NEXT: pextrd $3, %xmm0, %eax |
| ; SSE41-NEXT: movw %ax, 9(%rdi) |
| ; SSE41-NEXT: pextrd $2, %xmm0, %ecx |
| ; SSE41-NEXT: movw %cx, 6(%rdi) |
| ; SSE41-NEXT: pextrd $1, %xmm0, %edx |
| ; SSE41-NEXT: movw %dx, 3(%rdi) |
| ; SSE41-NEXT: movd %xmm0, %esi |
| ; SSE41-NEXT: movw %si, (%rdi) |
| ; SSE41-NEXT: shrl $16, %eax |
| ; SSE41-NEXT: movb %al, 11(%rdi) |
| ; SSE41-NEXT: shrl $16, %ecx |
| ; SSE41-NEXT: movb %cl, 8(%rdi) |
| ; SSE41-NEXT: shrl $16, %edx |
| ; SSE41-NEXT: movb %dl, 5(%rdi) |
| ; SSE41-NEXT: shrl $16, %esi |
| ; SSE41-NEXT: movb %sil, 2(%rdi) |
| ; SSE41-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-LABEL: usubo_v4i24: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [2.35098856E-38,2.35098856E-38,2.35098856E-38,2.35098856E-38] |
| ; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm1 |
| ; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm1 |
| ; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm0 |
| ; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 |
| ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpextrd $3, %xmm1, %eax |
| ; AVX1-NEXT: movw %ax, 9(%rdi) |
| ; AVX1-NEXT: vpextrd $2, %xmm1, %ecx |
| ; AVX1-NEXT: movw %cx, 6(%rdi) |
| ; AVX1-NEXT: vpextrd $1, %xmm1, %edx |
| ; AVX1-NEXT: movw %dx, 3(%rdi) |
| ; AVX1-NEXT: vmovd %xmm1, %esi |
| ; AVX1-NEXT: movw %si, (%rdi) |
| ; AVX1-NEXT: shrl $16, %eax |
| ; AVX1-NEXT: movb %al, 11(%rdi) |
| ; AVX1-NEXT: shrl $16, %ecx |
| ; AVX1-NEXT: movb %cl, 8(%rdi) |
| ; AVX1-NEXT: shrl $16, %edx |
| ; AVX1-NEXT: movb %dl, 5(%rdi) |
| ; AVX1-NEXT: shrl $16, %esi |
| ; AVX1-NEXT: movb %sil, 2(%rdi) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: usubo_v4i24: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [16777215,16777215,16777215,16777215] |
| ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1 |
| ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm1 |
| ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm0 |
| ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 |
| ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpextrd $3, %xmm1, %eax |
| ; AVX2-NEXT: movw %ax, 9(%rdi) |
| ; AVX2-NEXT: vpextrd $2, %xmm1, %ecx |
| ; AVX2-NEXT: movw %cx, 6(%rdi) |
| ; AVX2-NEXT: vpextrd $1, %xmm1, %edx |
| ; AVX2-NEXT: movw %dx, 3(%rdi) |
| ; AVX2-NEXT: vmovd %xmm1, %esi |
| ; AVX2-NEXT: movw %si, (%rdi) |
| ; AVX2-NEXT: shrl $16, %eax |
| ; AVX2-NEXT: movb %al, 11(%rdi) |
| ; AVX2-NEXT: shrl $16, %ecx |
| ; AVX2-NEXT: movb %cl, 8(%rdi) |
| ; AVX2-NEXT: shrl $16, %edx |
| ; AVX2-NEXT: movb %dl, 5(%rdi) |
| ; AVX2-NEXT: shrl $16, %esi |
| ; AVX2-NEXT: movb %sil, 2(%rdi) |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: usubo_v4i24: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [16777215,16777215,16777215,16777215] |
| ; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm1 |
| ; AVX512-NEXT: vpand %xmm2, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpsubd %xmm1, %xmm0, %xmm1 |
| ; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm0 |
| ; AVX512-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0 |
| ; AVX512-NEXT: vpextrd $3, %xmm1, %eax |
| ; AVX512-NEXT: movw %ax, 9(%rdi) |
| ; AVX512-NEXT: vpextrd $2, %xmm1, %ecx |
| ; AVX512-NEXT: movw %cx, 6(%rdi) |
| ; AVX512-NEXT: vpextrd $1, %xmm1, %edx |
| ; AVX512-NEXT: movw %dx, 3(%rdi) |
| ; AVX512-NEXT: vmovd %xmm1, %esi |
| ; AVX512-NEXT: movw %si, (%rdi) |
| ; AVX512-NEXT: shrl $16, %eax |
| ; AVX512-NEXT: movb %al, 11(%rdi) |
| ; AVX512-NEXT: shrl $16, %ecx |
| ; AVX512-NEXT: movb %cl, 8(%rdi) |
| ; AVX512-NEXT: shrl $16, %edx |
| ; AVX512-NEXT: movb %dl, 5(%rdi) |
| ; AVX512-NEXT: shrl $16, %esi |
| ; AVX512-NEXT: movb %sil, 2(%rdi) |
| ; AVX512-NEXT: retq |
| %t = call {<4 x i24>, <4 x i1>} @llvm.usub.with.overflow.v4i24(<4 x i24> %a0, <4 x i24> %a1) |
| %val = extractvalue {<4 x i24>, <4 x i1>} %t, 0 |
| %obit = extractvalue {<4 x i24>, <4 x i1>} %t, 1 |
| %res = sext <4 x i1> %obit to <4 x i32> |
| store <4 x i24> %val, <4 x i24>* %p2 |
| ret <4 x i32> %res |
| } |
| |
| define <4 x i32> @usubo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind { |
| ; SSE-LABEL: usubo_v4i1: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1] |
| ; SSE-NEXT: pand %xmm2, %xmm1 |
| ; SSE-NEXT: pand %xmm2, %xmm0 |
| ; SSE-NEXT: psubd %xmm1, %xmm0 |
| ; SSE-NEXT: pand %xmm0, %xmm2 |
| ; SSE-NEXT: pcmpeqd %xmm0, %xmm2 |
| ; SSE-NEXT: pcmpeqd %xmm1, %xmm1 |
| ; SSE-NEXT: pxor %xmm2, %xmm1 |
| ; SSE-NEXT: pslld $31, %xmm0 |
| ; SSE-NEXT: movmskps %xmm0, %eax |
| ; SSE-NEXT: movb %al, (%rdi) |
| ; SSE-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-LABEL: usubo_v4i1: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,1,1,1] |
| ; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1 |
| ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm1 |
| ; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm0 |
| ; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 |
| ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpslld $31, %xmm1, %xmm1 |
| ; AVX1-NEXT: vmovmskps %xmm1, %eax |
| ; AVX1-NEXT: movb %al, (%rdi) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: usubo_v4i1: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1] |
| ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1 |
| ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm1 |
| ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm0 |
| ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 |
| ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 |
| ; AVX2-NEXT: vmovmskps %xmm1, %eax |
| ; AVX2-NEXT: movb %al, (%rdi) |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: usubo_v4i1: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpslld $31, %xmm0, %xmm0 |
| ; AVX512-NEXT: vptestmd %xmm0, %xmm0, %k0 |
| ; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 |
| ; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k1 |
| ; AVX512-NEXT: kxorw %k1, %k0, %k1 |
| ; AVX512-NEXT: vptestnmd %xmm0, %xmm0, %k2 {%k1} |
| ; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k2} {z} |
| ; AVX512-NEXT: kmovd %k1, %eax |
| ; AVX512-NEXT: movb %al, (%rdi) |
| ; AVX512-NEXT: retq |
| %t = call {<4 x i1>, <4 x i1>} @llvm.usub.with.overflow.v4i1(<4 x i1> %a0, <4 x i1> %a1) |
| %val = extractvalue {<4 x i1>, <4 x i1>} %t, 0 |
| %obit = extractvalue {<4 x i1>, <4 x i1>} %t, 1 |
| %res = sext <4 x i1> %obit to <4 x i32> |
| store <4 x i1> %val, <4 x i1>* %p2 |
| ret <4 x i32> %res |
| } |
| |
| define <2 x i32> @usubo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2) nounwind { |
| ; SSE2-LABEL: usubo_v2i128: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; SSE2-NEXT: subq {{[0-9]+}}(%rsp), %rdx |
| ; SSE2-NEXT: sbbq {{[0-9]+}}(%rsp), %rcx |
| ; SSE2-NEXT: setb %al |
| ; SSE2-NEXT: movzbl %al, %r11d |
| ; SSE2-NEXT: subq %r8, %rdi |
| ; SSE2-NEXT: sbbq %r9, %rsi |
| ; SSE2-NEXT: setb %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: movd %eax, %xmm0 |
| ; SSE2-NEXT: pinsrw $4, %r11d, %xmm0 |
| ; SSE2-NEXT: movq %rdx, 16(%r10) |
| ; SSE2-NEXT: movq %rdi, (%r10) |
| ; SSE2-NEXT: movq %rcx, 24(%r10) |
| ; SSE2-NEXT: movq %rsi, 8(%r10) |
| ; SSE2-NEXT: psllq $63, %xmm0 |
| ; SSE2-NEXT: psrad $31, %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] |
| ; SSE2-NEXT: retq |
| ; |
| ; SSSE3-LABEL: usubo_v2i128: |
| ; SSSE3: # %bb.0: |
| ; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; SSSE3-NEXT: subq {{[0-9]+}}(%rsp), %rdx |
| ; SSSE3-NEXT: sbbq {{[0-9]+}}(%rsp), %rcx |
| ; SSSE3-NEXT: setb %al |
| ; SSSE3-NEXT: movzbl %al, %r11d |
| ; SSSE3-NEXT: subq %r8, %rdi |
| ; SSSE3-NEXT: sbbq %r9, %rsi |
| ; SSSE3-NEXT: setb %al |
| ; SSSE3-NEXT: movzbl %al, %eax |
| ; SSSE3-NEXT: movd %eax, %xmm0 |
| ; SSSE3-NEXT: pinsrw $4, %r11d, %xmm0 |
| ; SSSE3-NEXT: movq %rdx, 16(%r10) |
| ; SSSE3-NEXT: movq %rdi, (%r10) |
| ; SSSE3-NEXT: movq %rcx, 24(%r10) |
| ; SSSE3-NEXT: movq %rsi, 8(%r10) |
| ; SSSE3-NEXT: psllq $63, %xmm0 |
| ; SSSE3-NEXT: psrad $31, %xmm0 |
| ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] |
| ; SSSE3-NEXT: retq |
| ; |
| ; SSE41-LABEL: usubo_v2i128: |
| ; SSE41: # %bb.0: |
| ; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; SSE41-NEXT: subq {{[0-9]+}}(%rsp), %rdx |
| ; SSE41-NEXT: sbbq {{[0-9]+}}(%rsp), %rcx |
| ; SSE41-NEXT: setb %al |
| ; SSE41-NEXT: movzbl %al, %r11d |
| ; SSE41-NEXT: subq %r8, %rdi |
| ; SSE41-NEXT: sbbq %r9, %rsi |
| ; SSE41-NEXT: setb %al |
| ; SSE41-NEXT: movzbl %al, %eax |
| ; SSE41-NEXT: movd %eax, %xmm0 |
| ; SSE41-NEXT: pinsrb $8, %r11d, %xmm0 |
| ; SSE41-NEXT: movq %rdx, 16(%r10) |
| ; SSE41-NEXT: movq %rdi, (%r10) |
| ; SSE41-NEXT: movq %rcx, 24(%r10) |
| ; SSE41-NEXT: movq %rsi, 8(%r10) |
| ; SSE41-NEXT: psllq $63, %xmm0 |
| ; SSE41-NEXT: psrad $31, %xmm0 |
| ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] |
| ; SSE41-NEXT: retq |
| ; |
| ; AVX1-LABEL: usubo_v2i128: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; AVX1-NEXT: subq {{[0-9]+}}(%rsp), %rdx |
| ; AVX1-NEXT: sbbq {{[0-9]+}}(%rsp), %rcx |
| ; AVX1-NEXT: setb %al |
| ; AVX1-NEXT: movzbl %al, %r11d |
| ; AVX1-NEXT: subq %r8, %rdi |
| ; AVX1-NEXT: sbbq %r9, %rsi |
| ; AVX1-NEXT: setb %al |
| ; AVX1-NEXT: movzbl %al, %eax |
| ; AVX1-NEXT: vmovd %eax, %xmm0 |
| ; AVX1-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0 |
| ; AVX1-NEXT: movq %rdx, 16(%r10) |
| ; AVX1-NEXT: movq %rdi, (%r10) |
| ; AVX1-NEXT: movq %rcx, 24(%r10) |
| ; AVX1-NEXT: movq %rsi, 8(%r10) |
| ; AVX1-NEXT: vpsllq $63, %xmm0, %xmm0 |
| ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0 |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX2-LABEL: usubo_v2i128: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; AVX2-NEXT: subq {{[0-9]+}}(%rsp), %rdx |
| ; AVX2-NEXT: sbbq {{[0-9]+}}(%rsp), %rcx |
| ; AVX2-NEXT: setb %al |
| ; AVX2-NEXT: movzbl %al, %r11d |
| ; AVX2-NEXT: subq %r8, %rdi |
| ; AVX2-NEXT: sbbq %r9, %rsi |
| ; AVX2-NEXT: setb %al |
| ; AVX2-NEXT: movzbl %al, %eax |
| ; AVX2-NEXT: vmovd %eax, %xmm0 |
| ; AVX2-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0 |
| ; AVX2-NEXT: movq %rdx, 16(%r10) |
| ; AVX2-NEXT: movq %rdi, (%r10) |
| ; AVX2-NEXT: movq %rcx, 24(%r10) |
| ; AVX2-NEXT: movq %rsi, 8(%r10) |
| ; AVX2-NEXT: vpsllq $63, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0 |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: usubo_v2i128: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; AVX512-NEXT: subq {{[0-9]+}}(%rsp), %rdx |
| ; AVX512-NEXT: sbbq {{[0-9]+}}(%rsp), %rcx |
| ; AVX512-NEXT: setb %al |
| ; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp) |
| ; AVX512-NEXT: subq %r8, %rdi |
| ; AVX512-NEXT: sbbq %r9, %rsi |
| ; AVX512-NEXT: setb %al |
| ; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp) |
| ; AVX512-NEXT: kmovw -{{[0-9]+}}(%rsp), %k1 |
| ; AVX512-NEXT: movq %rdx, 16(%r10) |
| ; AVX512-NEXT: movq %rdi, (%r10) |
| ; AVX512-NEXT: movq %rcx, 24(%r10) |
| ; AVX512-NEXT: movq %rsi, 8(%r10) |
| ; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} |
| ; AVX512-NEXT: retq |
| %t = call {<2 x i128>, <2 x i1>} @llvm.usub.with.overflow.v2i128(<2 x i128> %a0, <2 x i128> %a1) |
| %val = extractvalue {<2 x i128>, <2 x i1>} %t, 0 |
| %obit = extractvalue {<2 x i128>, <2 x i1>} %t, 1 |
| %res = sext <2 x i1> %obit to <2 x i32> |
| store <2 x i128> %val, <2 x i128>* %p2 |
| ret <2 x i32> %res |
| } |