blob: 47c849264a9a5ab5b71e1ffe2e6e157ecc10ded5 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=ALL,SSE,SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=ALL,SSE,SSE41
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=ALL,AVX,AVX1,AVX1-FALLBACK
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX,AVX2,AVX2-FALLBACK
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop | FileCheck %s --check-prefixes=ALL,XOP,XOP-FALLBACK
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=ALL,XOP,AVX,AVX1,XOPAVX,XOPAVX1,XOPAVX1-FALLBACK
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=ALL,XOP,AVX,AVX2,XOPAVX,XOPAVX2,XOPAVX2-FALLBACK
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=ALL,AVX512,AVX512F
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VL,AVX512VL-FALLBACK
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512,AVX512BW,AVX512BW-FALLBACK
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VL,AVX512BW,AVX512VLBW
; These test cases are inspired by C++2a std::midpoint().
; See https://bugs.llvm.org/show_bug.cgi?id=40965
; Using 128-bit vector regs.
; ---------------------------------------------------------------------------- ;
; 32-bit width. 128 / 32 = 4 elts.
; ---------------------------------------------------------------------------- ;
; Values come from regs
define <4 x i32> @vec128_i32_signed_reg_reg(<4 x i32> %a1, <4 x i32> %a2) nounwind {
; SSE2-LABEL: vec128_i32_signed_reg_reg:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1]
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm5
; SSE2-NEXT: pandn %xmm1, %xmm4
; SSE2-NEXT: por %xmm5, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm2, %xmm5
; SSE2-NEXT: pandn %xmm1, %xmm2
; SSE2-NEXT: por %xmm5, %xmm2
; SSE2-NEXT: psubd %xmm4, %xmm2
; SSE2-NEXT: psrld $1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm3, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm1, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: paddd %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vec128_i32_signed_reg_reg:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pcmpgtd %xmm1, %xmm2
; SSE41-NEXT: por {{.*}}(%rip), %xmm2
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: pminsd %xmm1, %xmm3
; SSE41-NEXT: pmaxsd %xmm0, %xmm1
; SSE41-NEXT: psubd %xmm3, %xmm1
; SSE41-NEXT: psrld $1, %xmm1
; SSE41-NEXT: pmulld %xmm1, %xmm2
; SSE41-NEXT: paddd %xmm0, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-FALLBACK-LABEL: vec128_i32_signed_reg_reg:
; AVX1-FALLBACK: # %bb.0:
; AVX1-FALLBACK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
; AVX1-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX1-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpmulld %xmm2, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX1-FALLBACK-NEXT: retq
;
; AVX2-FALLBACK-LABEL: vec128_i32_signed_reg_reg:
; AVX2-FALLBACK: # %bb.0:
; AVX2-FALLBACK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
; AVX2-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX2-FALLBACK-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
; AVX2-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX2-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpmulld %xmm2, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX2-FALLBACK-NEXT: retq
;
; XOP-FALLBACK-LABEL: vec128_i32_signed_reg_reg:
; XOP-FALLBACK: # %bb.0:
; XOP-FALLBACK-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
; XOP-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOP-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
; XOP-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; XOP-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
; XOP-FALLBACK-NEXT: retq
;
; XOPAVX1-LABEL: vec128_i32_signed_reg_reg:
; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
; XOPAVX1-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOPAVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm3
; XOPAVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; XOPAVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsrld $1, %xmm1, %xmm1
; XOPAVX1-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: vec128_i32_signed_reg_reg:
; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; XOPAVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
; XOPAVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm3
; XOPAVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; XOPAVX2-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; XOPAVX2-NEXT: vpsrld $1, %xmm1, %xmm1
; XOPAVX2-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
; XOPAVX2-NEXT: retq
;
; AVX512F-LABEL: vec128_i32_signed_reg_reg:
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
; AVX512F-NEXT: vpminsd %xmm1, %xmm0, %xmm2
; AVX512F-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX512F-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX512F-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX512F-NEXT: vpmulld %xmm3, %xmm1, %xmm1
; AVX512F-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: vec128_i32_signed_reg_reg:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX512VL-NEXT: vmovdqa32 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminsd %xmm1, %xmm0, %xmm2
; AVX512VL-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX512VL-NEXT: vpmulld %xmm3, %xmm1, %xmm1
; AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i32_signed_reg_reg:
; AVX512BW-FALLBACK: # %bb.0:
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-FALLBACK-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX512BW-FALLBACK-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
; AVX512BW-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpmulld %xmm3, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX512BW-FALLBACK-NEXT: vzeroupper
; AVX512BW-FALLBACK-NEXT: retq
%t3 = icmp sgt <4 x i32> %a1, %a2 ; signed
%t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
%t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
%t7 = sub <4 x i32> %t6, %t5
%t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
%t9 = mul nsw <4 x i32> %t8, %t4 ; signed
%a10 = add nsw <4 x i32> %t9, %a1 ; signed
ret <4 x i32> %a10
}
define <4 x i32> @vec128_i32_unsigned_reg_reg(<4 x i32> %a1, <4 x i32> %a2) nounwind {
; SSE2-LABEL: vec128_i32_unsigned_reg_reg:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm4
; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [1,1,1,1]
; SSE2-NEXT: por %xmm4, %xmm5
; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pand %xmm3, %xmm2
; SSE2-NEXT: pandn %xmm1, %xmm3
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pand %xmm4, %xmm2
; SSE2-NEXT: pandn %xmm1, %xmm4
; SSE2-NEXT: por %xmm2, %xmm4
; SSE2-NEXT: psubd %xmm3, %xmm4
; SSE2-NEXT: psrld $1, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm5, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm1, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: paddd %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vec128_i32_unsigned_reg_reg:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pminud %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: pcmpeqd %xmm2, %xmm3
; SSE41-NEXT: pcmpeqd %xmm4, %xmm4
; SSE41-NEXT: pxor %xmm3, %xmm4
; SSE41-NEXT: por {{.*}}(%rip), %xmm4
; SSE41-NEXT: pmaxud %xmm0, %xmm1
; SSE41-NEXT: psubd %xmm2, %xmm1
; SSE41-NEXT: psrld $1, %xmm1
; SSE41-NEXT: pmulld %xmm1, %xmm4
; SSE41-NEXT: paddd %xmm4, %xmm0
; SSE41-NEXT: retq
;
; AVX1-FALLBACK-LABEL: vec128_i32_unsigned_reg_reg:
; AVX1-FALLBACK: # %bb.0:
; AVX1-FALLBACK-NEXT: vpminud %xmm1, %xmm0, %xmm2
; AVX1-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm3
; AVX1-FALLBACK-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX1-FALLBACK-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm3, %xmm3
; AVX1-FALLBACK-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
; AVX1-FALLBACK-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpmulld %xmm3, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX1-FALLBACK-NEXT: retq
;
; AVX2-FALLBACK-LABEL: vec128_i32_unsigned_reg_reg:
; AVX2-FALLBACK: # %bb.0:
; AVX2-FALLBACK-NEXT: vpminud %xmm1, %xmm0, %xmm2
; AVX2-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm3
; AVX2-FALLBACK-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX2-FALLBACK-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX2-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm4 = [1,1,1,1]
; AVX2-FALLBACK-NEXT: vpor %xmm4, %xmm3, %xmm3
; AVX2-FALLBACK-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
; AVX2-FALLBACK-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpmulld %xmm3, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX2-FALLBACK-NEXT: retq
;
; XOP-FALLBACK-LABEL: vec128_i32_unsigned_reg_reg:
; XOP-FALLBACK: # %bb.0:
; XOP-FALLBACK-NEXT: vpcomgtud %xmm1, %xmm0, %xmm2
; XOP-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOP-FALLBACK-NEXT: vpminud %xmm1, %xmm0, %xmm3
; XOP-FALLBACK-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
; XOP-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
; XOP-FALLBACK-NEXT: retq
;
; XOPAVX1-LABEL: vec128_i32_unsigned_reg_reg:
; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpcomgtud %xmm1, %xmm0, %xmm2
; XOPAVX1-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOPAVX1-NEXT: vpminud %xmm1, %xmm0, %xmm3
; XOPAVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
; XOPAVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsrld $1, %xmm1, %xmm1
; XOPAVX1-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: vec128_i32_unsigned_reg_reg:
; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpcomgtud %xmm1, %xmm0, %xmm2
; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; XOPAVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
; XOPAVX2-NEXT: vpminud %xmm1, %xmm0, %xmm3
; XOPAVX2-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
; XOPAVX2-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; XOPAVX2-NEXT: vpsrld $1, %xmm1, %xmm1
; XOPAVX2-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
; XOPAVX2-NEXT: retq
;
; AVX512F-LABEL: vec128_i32_unsigned_reg_reg:
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
; AVX512F-NEXT: vpminud %xmm1, %xmm0, %xmm2
; AVX512F-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
; AVX512F-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX512F-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX512F-NEXT: vpmulld %xmm3, %xmm1, %xmm1
; AVX512F-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: vec128_i32_unsigned_reg_reg:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpcmpnleud %xmm1, %xmm0, %k1
; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX512VL-NEXT: vmovdqa32 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminud %xmm1, %xmm0, %xmm2
; AVX512VL-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX512VL-NEXT: vpmulld %xmm3, %xmm1, %xmm1
; AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i32_unsigned_reg_reg:
; AVX512BW-FALLBACK: # %bb.0:
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-FALLBACK-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX512BW-FALLBACK-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
; AVX512BW-FALLBACK-NEXT: vpminud %xmm1, %xmm0, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpmulld %xmm3, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX512BW-FALLBACK-NEXT: vzeroupper
; AVX512BW-FALLBACK-NEXT: retq
%t3 = icmp ugt <4 x i32> %a1, %a2
%t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
%t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
%t7 = sub <4 x i32> %t6, %t5
%t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
%t9 = mul <4 x i32> %t8, %t4
%a10 = add <4 x i32> %t9, %a1
ret <4 x i32> %a10
}
; Values are loaded. Only check signed case.
define <4 x i32> @vec128_i32_signed_mem_reg(<4 x i32>* %a1_addr, <4 x i32> %a2) nounwind {
; SSE2-LABEL: vec128_i32_signed_mem_reg:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1]
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm5
; SSE2-NEXT: pandn %xmm0, %xmm4
; SSE2-NEXT: por %xmm5, %xmm4
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pand %xmm2, %xmm5
; SSE2-NEXT: pandn %xmm0, %xmm2
; SSE2-NEXT: por %xmm5, %xmm2
; SSE2-NEXT: psubd %xmm4, %xmm2
; SSE2-NEXT: psrld $1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm3, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm4, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: paddd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vec128_i32_signed_mem_reg:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa (%rdi), %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
; SSE41-NEXT: por {{.*}}(%rip), %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSE41-NEXT: pminsd %xmm0, %xmm3
; SSE41-NEXT: pmaxsd %xmm1, %xmm0
; SSE41-NEXT: psubd %xmm3, %xmm0
; SSE41-NEXT: psrld $1, %xmm0
; SSE41-NEXT: pmulld %xmm2, %xmm0
; SSE41-NEXT: paddd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-FALLBACK-LABEL: vec128_i32_signed_mem_reg:
; AVX1-FALLBACK: # %bb.0:
; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX1-FALLBACK-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm2
; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpminsd %xmm0, %xmm1, %xmm3
; AVX1-FALLBACK-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
; AVX1-FALLBACK-NEXT: vpsubd %xmm3, %xmm0, %xmm0
; AVX1-FALLBACK-NEXT: vpsrld $1, %xmm0, %xmm0
; AVX1-FALLBACK-NEXT: vpmulld %xmm2, %xmm0, %xmm0
; AVX1-FALLBACK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX1-FALLBACK-NEXT: retq
;
; AVX2-FALLBACK-LABEL: vec128_i32_signed_mem_reg:
; AVX2-FALLBACK: # %bb.0:
; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX2-FALLBACK-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm2
; AVX2-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX2-FALLBACK-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpminsd %xmm0, %xmm1, %xmm3
; AVX2-FALLBACK-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
; AVX2-FALLBACK-NEXT: vpsubd %xmm3, %xmm0, %xmm0
; AVX2-FALLBACK-NEXT: vpsrld $1, %xmm0, %xmm0
; AVX2-FALLBACK-NEXT: vpmulld %xmm2, %xmm0, %xmm0
; AVX2-FALLBACK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX2-FALLBACK-NEXT: retq
;
; XOP-FALLBACK-LABEL: vec128_i32_signed_mem_reg:
; XOP-FALLBACK: # %bb.0:
; XOP-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; XOP-FALLBACK-NEXT: vpcomgtd %xmm0, %xmm1, %xmm2
; XOP-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOP-FALLBACK-NEXT: vpminsd %xmm0, %xmm1, %xmm3
; XOP-FALLBACK-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
; XOP-FALLBACK-NEXT: vpsubd %xmm3, %xmm0, %xmm0
; XOP-FALLBACK-NEXT: vpsrld $1, %xmm0, %xmm0
; XOP-FALLBACK-NEXT: vpmacsdd %xmm1, %xmm2, %xmm0, %xmm0
; XOP-FALLBACK-NEXT: retq
;
; XOPAVX1-LABEL: vec128_i32_signed_mem_reg:
; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vmovdqa (%rdi), %xmm1
; XOPAVX1-NEXT: vpcomgtd %xmm0, %xmm1, %xmm2
; XOPAVX1-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOPAVX1-NEXT: vpminsd %xmm0, %xmm1, %xmm3
; XOPAVX1-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
; XOPAVX1-NEXT: vpsubd %xmm3, %xmm0, %xmm0
; XOPAVX1-NEXT: vpsrld $1, %xmm0, %xmm0
; XOPAVX1-NEXT: vpmacsdd %xmm1, %xmm2, %xmm0, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: vec128_i32_signed_mem_reg:
; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vmovdqa (%rdi), %xmm1
; XOPAVX2-NEXT: vpcomgtd %xmm0, %xmm1, %xmm2
; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; XOPAVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
; XOPAVX2-NEXT: vpminsd %xmm0, %xmm1, %xmm3
; XOPAVX2-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
; XOPAVX2-NEXT: vpsubd %xmm3, %xmm0, %xmm0
; XOPAVX2-NEXT: vpsrld $1, %xmm0, %xmm0
; XOPAVX2-NEXT: vpmacsdd %xmm1, %xmm2, %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
; AVX512F-LABEL: vec128_i32_signed_mem_reg:
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
; AVX512F-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
; AVX512F-NEXT: vpminsd %xmm0, %xmm1, %xmm2
; AVX512F-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: vpsubd %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: vpsrld $1, %xmm0, %xmm0
; AVX512F-NEXT: vpmulld %xmm3, %xmm0, %xmm0
; AVX512F-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: vec128_i32_signed_mem_reg:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm1
; AVX512VL-NEXT: vpcmpgtd %xmm0, %xmm1, %k1
; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX512VL-NEXT: vmovdqa32 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminsd %xmm0, %xmm1, %xmm2
; AVX512VL-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: vpsubd %xmm2, %xmm0, %xmm0
; AVX512VL-NEXT: vpsrld $1, %xmm0, %xmm0
; AVX512VL-NEXT: vpmulld %xmm3, %xmm0, %xmm0
; AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i32_signed_mem_reg:
; AVX512BW-FALLBACK: # %bb.0:
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX512BW-FALLBACK-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX512BW-FALLBACK-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
; AVX512BW-FALLBACK-NEXT: vpminsd %xmm0, %xmm1, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
; AVX512BW-FALLBACK-NEXT: vpsubd %xmm2, %xmm0, %xmm0
; AVX512BW-FALLBACK-NEXT: vpsrld $1, %xmm0, %xmm0
; AVX512BW-FALLBACK-NEXT: vpmulld %xmm3, %xmm0, %xmm0
; AVX512BW-FALLBACK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX512BW-FALLBACK-NEXT: vzeroupper
; AVX512BW-FALLBACK-NEXT: retq
%a1 = load <4 x i32>, <4 x i32>* %a1_addr
%t3 = icmp sgt <4 x i32> %a1, %a2 ; signed
%t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
%t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
%t7 = sub <4 x i32> %t6, %t5
%t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
%t9 = mul nsw <4 x i32> %t8, %t4 ; signed
%a10 = add nsw <4 x i32> %t9, %a1 ; signed
ret <4 x i32> %a10
}
define <4 x i32> @vec128_i32_signed_reg_mem(<4 x i32> %a1, <4 x i32>* %a2_addr) nounwind {
; SSE2-LABEL: vec128_i32_signed_reg_mem:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1]
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm5
; SSE2-NEXT: pandn %xmm1, %xmm4
; SSE2-NEXT: por %xmm5, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm2, %xmm5
; SSE2-NEXT: pandn %xmm1, %xmm2
; SSE2-NEXT: por %xmm5, %xmm2
; SSE2-NEXT: psubd %xmm4, %xmm2
; SSE2-NEXT: psrld $1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm3, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm1, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: paddd %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vec128_i32_signed_reg_mem:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa (%rdi), %xmm2
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pcmpgtd %xmm2, %xmm1
; SSE41-NEXT: por {{.*}}(%rip), %xmm1
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: pminsd %xmm2, %xmm3
; SSE41-NEXT: pmaxsd %xmm0, %xmm2
; SSE41-NEXT: psubd %xmm3, %xmm2
; SSE41-NEXT: psrld $1, %xmm2
; SSE41-NEXT: pmulld %xmm2, %xmm1
; SSE41-NEXT: paddd %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-FALLBACK-LABEL: vec128_i32_signed_reg_mem:
; AVX1-FALLBACK: # %bb.0:
; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX1-FALLBACK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
; AVX1-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX1-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpmulld %xmm2, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX1-FALLBACK-NEXT: retq
;
; AVX2-FALLBACK-LABEL: vec128_i32_signed_reg_mem:
; AVX2-FALLBACK: # %bb.0:
; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX2-FALLBACK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
; AVX2-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX2-FALLBACK-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
; AVX2-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX2-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpmulld %xmm2, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX2-FALLBACK-NEXT: retq
;
; XOP-FALLBACK-LABEL: vec128_i32_signed_reg_mem:
; XOP-FALLBACK: # %bb.0:
; XOP-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; XOP-FALLBACK-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
; XOP-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOP-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
; XOP-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; XOP-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
; XOP-FALLBACK-NEXT: retq
;
; XOPAVX1-LABEL: vec128_i32_signed_reg_mem:
; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vmovdqa (%rdi), %xmm1
; XOPAVX1-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
; XOPAVX1-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOPAVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm3
; XOPAVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; XOPAVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsrld $1, %xmm1, %xmm1
; XOPAVX1-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: vec128_i32_signed_reg_mem:
; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vmovdqa (%rdi), %xmm1
; XOPAVX2-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; XOPAVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
; XOPAVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm3
; XOPAVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; XOPAVX2-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; XOPAVX2-NEXT: vpsrld $1, %xmm1, %xmm1
; XOPAVX2-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
; XOPAVX2-NEXT: retq
;
; AVX512F-LABEL: vec128_i32_signed_reg_mem:
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
; AVX512F-NEXT: vpminsd %xmm1, %xmm0, %xmm2
; AVX512F-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX512F-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX512F-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX512F-NEXT: vpmulld %xmm3, %xmm1, %xmm1
; AVX512F-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: vec128_i32_signed_reg_mem:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm1
; AVX512VL-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX512VL-NEXT: vmovdqa32 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminsd %xmm1, %xmm0, %xmm2
; AVX512VL-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX512VL-NEXT: vpmulld %xmm3, %xmm1, %xmm1
; AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i32_signed_reg_mem:
; AVX512BW-FALLBACK: # %bb.0:
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX512BW-FALLBACK-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX512BW-FALLBACK-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
; AVX512BW-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpmulld %xmm3, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX512BW-FALLBACK-NEXT: vzeroupper
; AVX512BW-FALLBACK-NEXT: retq
%a2 = load <4 x i32>, <4 x i32>* %a2_addr
%t3 = icmp sgt <4 x i32> %a1, %a2 ; signed
%t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
%t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
%t7 = sub <4 x i32> %t6, %t5
%t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
%t9 = mul nsw <4 x i32> %t8, %t4 ; signed
%a10 = add nsw <4 x i32> %t9, %a1 ; signed
ret <4 x i32> %a10
}
define <4 x i32> @vec128_i32_signed_mem_mem(<4 x i32>* %a1_addr, <4 x i32>* %a2_addr) nounwind {
; SSE2-LABEL: vec128_i32_signed_mem_mem:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm1
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1]
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm5
; SSE2-NEXT: pandn %xmm0, %xmm4
; SSE2-NEXT: por %xmm5, %xmm4
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pand %xmm2, %xmm5
; SSE2-NEXT: pandn %xmm0, %xmm2
; SSE2-NEXT: por %xmm5, %xmm2
; SSE2-NEXT: psubd %xmm4, %xmm2
; SSE2-NEXT: psrld $1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm3, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm4, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: paddd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vec128_i32_signed_mem_mem:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa (%rdi), %xmm1
; SSE41-NEXT: movdqa (%rsi), %xmm0
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
; SSE41-NEXT: por {{.*}}(%rip), %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSE41-NEXT: pminsd %xmm0, %xmm3
; SSE41-NEXT: pmaxsd %xmm1, %xmm0
; SSE41-NEXT: psubd %xmm3, %xmm0
; SSE41-NEXT: psrld $1, %xmm0
; SSE41-NEXT: pmulld %xmm2, %xmm0
; SSE41-NEXT: paddd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-FALLBACK-LABEL: vec128_i32_signed_mem_mem:
; AVX1-FALLBACK: # %bb.0:
; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
; AVX1-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
; AVX1-FALLBACK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
; AVX1-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX1-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpmulld %xmm2, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX1-FALLBACK-NEXT: retq
;
; AVX2-FALLBACK-LABEL: vec128_i32_signed_mem_mem:
; AVX2-FALLBACK: # %bb.0:
; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
; AVX2-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
; AVX2-FALLBACK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
; AVX2-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX2-FALLBACK-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
; AVX2-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX2-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpmulld %xmm2, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX2-FALLBACK-NEXT: retq
;
; XOP-FALLBACK-LABEL: vec128_i32_signed_mem_mem:
; XOP-FALLBACK: # %bb.0:
; XOP-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
; XOP-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
; XOP-FALLBACK-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
; XOP-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOP-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
; XOP-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; XOP-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
; XOP-FALLBACK-NEXT: retq
;
; XOPAVX1-LABEL: vec128_i32_signed_mem_mem:
; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vmovdqa (%rdi), %xmm0
; XOPAVX1-NEXT: vmovdqa (%rsi), %xmm1
; XOPAVX1-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
; XOPAVX1-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOPAVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm3
; XOPAVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; XOPAVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsrld $1, %xmm1, %xmm1
; XOPAVX1-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: vec128_i32_signed_mem_mem:
; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vmovdqa (%rdi), %xmm0
; XOPAVX2-NEXT: vmovdqa (%rsi), %xmm1
; XOPAVX2-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; XOPAVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
; XOPAVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm3
; XOPAVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; XOPAVX2-NEXT: vpsubd %xmm3, %xmm1, %xmm1
; XOPAVX2-NEXT: vpsrld $1, %xmm1, %xmm1
; XOPAVX2-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
; XOPAVX2-NEXT: retq
;
; AVX512F-LABEL: vec128_i32_signed_mem_mem:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vmovdqa (%rsi), %xmm1
; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
; AVX512F-NEXT: vpminsd %xmm1, %xmm0, %xmm2
; AVX512F-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX512F-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX512F-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX512F-NEXT: vpmulld %xmm3, %xmm1, %xmm1
; AVX512F-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: vec128_i32_signed_mem_mem:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vmovdqa (%rsi), %xmm1
; AVX512VL-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX512VL-NEXT: vmovdqa32 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminsd %xmm1, %xmm0, %xmm2
; AVX512VL-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX512VL-NEXT: vpmulld %xmm3, %xmm1, %xmm1
; AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i32_signed_mem_mem:
; AVX512BW-FALLBACK: # %bb.0:
; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
; AVX512BW-FALLBACK-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
; AVX512BW-FALLBACK-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
; AVX512BW-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpmulld %xmm3, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX512BW-FALLBACK-NEXT: vzeroupper
; AVX512BW-FALLBACK-NEXT: retq
%a1 = load <4 x i32>, <4 x i32>* %a1_addr
%a2 = load <4 x i32>, <4 x i32>* %a2_addr
%t3 = icmp sgt <4 x i32> %a1, %a2 ; signed
%t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
%t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
%t7 = sub <4 x i32> %t6, %t5
%t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
%t9 = mul nsw <4 x i32> %t8, %t4 ; signed
%a10 = add nsw <4 x i32> %t9, %a1 ; signed
ret <4 x i32> %a10
}
; ---------------------------------------------------------------------------- ;
; 64-bit width. 128 / 64 = 2 elts.
; ---------------------------------------------------------------------------- ;
; Values come from regs
define <2 x i64> @vec128_i64_signed_reg_reg(<2 x i64> %a1, <2 x i64> %a2) nounwind {
; SSE2-LABEL: vec128_i64_signed_reg_reg:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
; SSE2-NEXT: pxor %xmm0, %xmm4
; SSE2-NEXT: movdqa %xmm4, %xmm2
; SSE2-NEXT: pcmpgtd %xmm5, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
; SSE2-NEXT: movdqa %xmm4, %xmm6
; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
; SSE2-NEXT: pand %xmm6, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1]
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: pcmpgtd %xmm4, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,0,2,2]
; SSE2-NEXT: pand %xmm6, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
; SSE2-NEXT: por %xmm4, %xmm5
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: pand %xmm5, %xmm4
; SSE2-NEXT: pandn %xmm1, %xmm5
; SSE2-NEXT: por %xmm4, %xmm5
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: pand %xmm2, %xmm4
; SSE2-NEXT: pandn %xmm1, %xmm2
; SSE2-NEXT: por %xmm4, %xmm2
; SSE2-NEXT: psubq %xmm5, %xmm2
; SSE2-NEXT: psrlq $1, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: psrlq $32, %xmm4
; SSE2-NEXT: pmuludq %xmm2, %xmm4
; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: psrlq $32, %xmm1
; SSE2-NEXT: pmuludq %xmm3, %xmm1
; SSE2-NEXT: paddq %xmm4, %xmm1
; SSE2-NEXT: psllq $32, %xmm1
; SSE2-NEXT: pmuludq %xmm3, %xmm2
; SSE2-NEXT: paddq %xmm0, %xmm1
; SSE2-NEXT: paddq %xmm2, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vec128_i64_signed_reg_reg:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm1, %xmm5
; SSE41-NEXT: pxor %xmm0, %xmm5
; SSE41-NEXT: pxor %xmm2, %xmm0
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: pcmpgtd %xmm5, %xmm3
; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
; SSE41-NEXT: movdqa %xmm0, %xmm4
; SSE41-NEXT: pcmpeqd %xmm5, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm4[1,1,3,3]
; SSE41-NEXT: pand %xmm7, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
; SSE41-NEXT: por %xmm6, %xmm4
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [1,1]
; SSE41-NEXT: por %xmm4, %xmm3
; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
; SSE41-NEXT: pand %xmm7, %xmm0
; SSE41-NEXT: por %xmm5, %xmm0
; SSE41-NEXT: movdqa %xmm1, %xmm5
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
; SSE41-NEXT: movdqa %xmm4, %xmm0
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE41-NEXT: psubq %xmm5, %xmm1
; SSE41-NEXT: psrlq $1, %xmm1
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: psrlq $32, %xmm0
; SSE41-NEXT: pmuludq %xmm1, %xmm0
; SSE41-NEXT: movdqa %xmm1, %xmm4
; SSE41-NEXT: psrlq $32, %xmm4
; SSE41-NEXT: pmuludq %xmm3, %xmm4
; SSE41-NEXT: paddq %xmm0, %xmm4
; SSE41-NEXT: psllq $32, %xmm4
; SSE41-NEXT: pmuludq %xmm1, %xmm3
; SSE41-NEXT: paddq %xmm2, %xmm4
; SSE41-NEXT: paddq %xmm4, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: retq
;
; AVX1-FALLBACK-LABEL: vec128_i64_signed_reg_reg:
; AVX1-FALLBACK: # %bb.0:
; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm4
; AVX1-FALLBACK-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
; AVX1-FALLBACK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsubq %xmm4, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm2
; AVX1-FALLBACK-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm4
; AVX1-FALLBACK-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; AVX1-FALLBACK-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX1-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX1-FALLBACK-NEXT: retq
;
; AVX2-FALLBACK-LABEL: vec128_i64_signed_reg_reg:
; AVX2-FALLBACK: # %bb.0:
; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm4
; AVX2-FALLBACK-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
; AVX2-FALLBACK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsubq %xmm4, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm2
; AVX2-FALLBACK-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm4
; AVX2-FALLBACK-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; AVX2-FALLBACK-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX2-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX2-FALLBACK-NEXT: retq
;
; XOP-LABEL: vec128_i64_signed_reg_reg:
; XOP: # %bb.0:
; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm2
; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
; XOP-NEXT: vpcomltq %xmm1, %xmm0, %xmm4
; XOP-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
; XOP-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
; XOP-NEXT: vpsubq %xmm4, %xmm1, %xmm1
; XOP-NEXT: vpsrlq $1, %xmm1, %xmm1
; XOP-NEXT: vpsrlq $32, %xmm3, %xmm2
; XOP-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
; XOP-NEXT: vpsrlq $32, %xmm1, %xmm4
; XOP-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; XOP-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; XOP-NEXT: vpsllq $32, %xmm2, %xmm2
; XOP-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; XOP-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec128_i64_signed_reg_reg:
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm2
; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
; AVX512F-NEXT: vpsubq %xmm2, %xmm1, %xmm1
; AVX512F-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX512F-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512F-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
; AVX512F-NEXT: vpsrlq $32, %xmm3, %xmm4
; AVX512F-NEXT: vpmuludq %xmm4, %xmm1, %xmm4
; AVX512F-NEXT: vpaddq %xmm2, %xmm4, %xmm2
; AVX512F-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX512F-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX512F-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512F-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: vec128_i64_signed_reg_reg:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
; AVX512VL-NEXT: vmovdqa64 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm2
; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpsubq %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX512VL-NEXT: vpsrlq $32, %xmm3, %xmm2
; AVX512VL-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
; AVX512VL-NEXT: vpsrlq $32, %xmm1, %xmm4
; AVX512VL-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; AVX512VL-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; AVX512VL-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX512VL-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX512VL-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512VL-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_reg_reg:
; AVX512BW-FALLBACK: # %bb.0:
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-FALLBACK-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
; AVX512BW-FALLBACK-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; AVX512BW-FALLBACK-NEXT: vpminsq %zmm1, %zmm0, %zmm2
; AVX512BW-FALLBACK-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
; AVX512BW-FALLBACK-NEXT: vpsubq %xmm2, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm4
; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm4, %xmm1, %xmm4
; AVX512BW-FALLBACK-NEXT: vpaddq %xmm2, %xmm4, %xmm2
; AVX512BW-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX512BW-FALLBACK-NEXT: vzeroupper
; AVX512BW-FALLBACK-NEXT: retq
%t3 = icmp sgt <2 x i64> %a1, %a2 ; signed
%t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
%t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
%t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
%t7 = sub <2 x i64> %t6, %t5
%t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
%t9 = mul nsw <2 x i64> %t8, %t4 ; signed
%a10 = add nsw <2 x i64> %t9, %a1 ; signed
ret <2 x i64> %a10
}
define <2 x i64> @vec128_i64_unsigned_reg_reg(<2 x i64> %a1, <2 x i64> %a2) nounwind {
; SSE2-LABEL: vec128_i64_unsigned_reg_reg:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456]
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
; SSE2-NEXT: pxor %xmm0, %xmm4
; SSE2-NEXT: movdqa %xmm4, %xmm2
; SSE2-NEXT: pcmpgtd %xmm5, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
; SSE2-NEXT: movdqa %xmm4, %xmm6
; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
; SSE2-NEXT: pand %xmm6, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1]
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: pcmpgtd %xmm4, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,0,2,2]
; SSE2-NEXT: pand %xmm6, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
; SSE2-NEXT: por %xmm4, %xmm5
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: pand %xmm5, %xmm4
; SSE2-NEXT: pandn %xmm1, %xmm5
; SSE2-NEXT: por %xmm4, %xmm5
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: pand %xmm2, %xmm4
; SSE2-NEXT: pandn %xmm1, %xmm2
; SSE2-NEXT: por %xmm4, %xmm2
; SSE2-NEXT: psubq %xmm5, %xmm2
; SSE2-NEXT: psrlq $1, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: psrlq $32, %xmm4
; SSE2-NEXT: pmuludq %xmm2, %xmm4
; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: psrlq $32, %xmm1
; SSE2-NEXT: pmuludq %xmm3, %xmm1
; SSE2-NEXT: paddq %xmm4, %xmm1
; SSE2-NEXT: psllq $32, %xmm1
; SSE2-NEXT: pmuludq %xmm3, %xmm2
; SSE2-NEXT: paddq %xmm0, %xmm1
; SSE2-NEXT: paddq %xmm2, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vec128_i64_unsigned_reg_reg:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
; SSE41-NEXT: movdqa %xmm1, %xmm5
; SSE41-NEXT: pxor %xmm0, %xmm5
; SSE41-NEXT: pxor %xmm2, %xmm0
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: pcmpgtd %xmm5, %xmm3
; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
; SSE41-NEXT: movdqa %xmm0, %xmm4
; SSE41-NEXT: pcmpeqd %xmm5, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm4[1,1,3,3]
; SSE41-NEXT: pand %xmm7, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
; SSE41-NEXT: por %xmm6, %xmm4
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [1,1]
; SSE41-NEXT: por %xmm4, %xmm3
; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
; SSE41-NEXT: pand %xmm7, %xmm0
; SSE41-NEXT: por %xmm5, %xmm0
; SSE41-NEXT: movdqa %xmm1, %xmm5
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
; SSE41-NEXT: movdqa %xmm4, %xmm0
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE41-NEXT: psubq %xmm5, %xmm1
; SSE41-NEXT: psrlq $1, %xmm1
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: psrlq $32, %xmm0
; SSE41-NEXT: pmuludq %xmm1, %xmm0
; SSE41-NEXT: movdqa %xmm1, %xmm4
; SSE41-NEXT: psrlq $32, %xmm4
; SSE41-NEXT: pmuludq %xmm3, %xmm4
; SSE41-NEXT: paddq %xmm0, %xmm4
; SSE41-NEXT: psllq $32, %xmm4
; SSE41-NEXT: pmuludq %xmm1, %xmm3
; SSE41-NEXT: paddq %xmm2, %xmm4
; SSE41-NEXT: paddq %xmm4, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: retq
;
; AVX1-FALLBACK-LABEL: vec128_i64_unsigned_reg_reg:
; AVX1-FALLBACK: # %bb.0:
; AVX1-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-FALLBACK-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX1-FALLBACK-NEXT: vpxor %xmm2, %xmm0, %xmm2
; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm4
; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm4, %xmm5
; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
; AVX1-FALLBACK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm2
; AVX1-FALLBACK-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsubq %xmm2, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm5, %xmm2
; AVX1-FALLBACK-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm3
; AVX1-FALLBACK-NEXT: vpmuludq %xmm5, %xmm3, %xmm3
; AVX1-FALLBACK-NEXT: vpaddq %xmm3, %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpmuludq %xmm5, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX1-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX1-FALLBACK-NEXT: retq
;
; AVX2-FALLBACK-LABEL: vec128_i64_unsigned_reg_reg:
; AVX2-FALLBACK: # %bb.0:
; AVX2-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-FALLBACK-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX2-FALLBACK-NEXT: vpxor %xmm2, %xmm0, %xmm2
; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm4
; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm4, %xmm5
; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
; AVX2-FALLBACK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm2
; AVX2-FALLBACK-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsubq %xmm2, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm5, %xmm2
; AVX2-FALLBACK-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm3
; AVX2-FALLBACK-NEXT: vpmuludq %xmm5, %xmm3, %xmm3
; AVX2-FALLBACK-NEXT: vpaddq %xmm3, %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpmuludq %xmm5, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX2-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX2-FALLBACK-NEXT: retq
;
; XOP-LABEL: vec128_i64_unsigned_reg_reg:
; XOP: # %bb.0:
; XOP-NEXT: vpcomgtuq %xmm1, %xmm0, %xmm2
; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
; XOP-NEXT: vpcomltuq %xmm1, %xmm0, %xmm4
; XOP-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
; XOP-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
; XOP-NEXT: vpsubq %xmm4, %xmm1, %xmm1
; XOP-NEXT: vpsrlq $1, %xmm1, %xmm1
; XOP-NEXT: vpsrlq $32, %xmm3, %xmm2
; XOP-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
; XOP-NEXT: vpsrlq $32, %xmm1, %xmm4
; XOP-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; XOP-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; XOP-NEXT: vpsllq $32, %xmm2, %xmm2
; XOP-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; XOP-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec128_i64_unsigned_reg_reg:
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; AVX512F-NEXT: vpminuq %zmm1, %zmm0, %zmm2
; AVX512F-NEXT: vpmaxuq %zmm1, %zmm0, %zmm1
; AVX512F-NEXT: vpsubq %xmm2, %xmm1, %xmm1
; AVX512F-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX512F-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512F-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
; AVX512F-NEXT: vpsrlq $32, %xmm3, %xmm4
; AVX512F-NEXT: vpmuludq %xmm4, %xmm1, %xmm4
; AVX512F-NEXT: vpaddq %xmm2, %xmm4, %xmm2
; AVX512F-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX512F-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX512F-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512F-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: vec128_i64_unsigned_reg_reg:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpcmpnleuq %xmm1, %xmm0, %k1
; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
; AVX512VL-NEXT: vmovdqa64 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminuq %xmm1, %xmm0, %xmm2
; AVX512VL-NEXT: vpmaxuq %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpsubq %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX512VL-NEXT: vpsrlq $32, %xmm3, %xmm2
; AVX512VL-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
; AVX512VL-NEXT: vpsrlq $32, %xmm1, %xmm4
; AVX512VL-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; AVX512VL-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; AVX512VL-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX512VL-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX512VL-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512VL-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i64_unsigned_reg_reg:
; AVX512BW-FALLBACK: # %bb.0:
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-FALLBACK-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
; AVX512BW-FALLBACK-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; AVX512BW-FALLBACK-NEXT: vpminuq %zmm1, %zmm0, %zmm2
; AVX512BW-FALLBACK-NEXT: vpmaxuq %zmm1, %zmm0, %zmm1
; AVX512BW-FALLBACK-NEXT: vpsubq %xmm2, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm4
; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm4, %xmm1, %xmm4
; AVX512BW-FALLBACK-NEXT: vpaddq %xmm2, %xmm4, %xmm2
; AVX512BW-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX512BW-FALLBACK-NEXT: vzeroupper
; AVX512BW-FALLBACK-NEXT: retq
%t3 = icmp ugt <2 x i64> %a1, %a2
%t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
%t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
%t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
%t7 = sub <2 x i64> %t6, %t5
%t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
%t9 = mul <2 x i64> %t8, %t4
%a10 = add <2 x i64> %t9, %a1
ret <2 x i64> %a10
}
; Values are loaded. Only check signed case.
define <2 x i64> @vec128_i64_signed_mem_reg(<2 x i64>* %a1_addr, <2 x i64> %a2) nounwind {
; SSE2-LABEL: vec128_i64_signed_mem_reg:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
; SSE2-NEXT: pxor %xmm1, %xmm4
; SSE2-NEXT: movdqa %xmm4, %xmm2
; SSE2-NEXT: pcmpgtd %xmm5, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
; SSE2-NEXT: movdqa %xmm4, %xmm6
; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
; SSE2-NEXT: pand %xmm6, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1]
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: pcmpgtd %xmm4, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,0,2,2]
; SSE2-NEXT: pand %xmm6, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
; SSE2-NEXT: por %xmm4, %xmm5
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pand %xmm5, %xmm4
; SSE2-NEXT: pandn %xmm0, %xmm5
; SSE2-NEXT: por %xmm4, %xmm5
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pand %xmm2, %xmm4
; SSE2-NEXT: pandn %xmm0, %xmm2
; SSE2-NEXT: por %xmm4, %xmm2
; SSE2-NEXT: psubq %xmm5, %xmm2
; SSE2-NEXT: psrlq $1, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: psrlq $32, %xmm4
; SSE2-NEXT: pmuludq %xmm2, %xmm4
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: psrlq $32, %xmm0
; SSE2-NEXT: pmuludq %xmm3, %xmm0
; SSE2-NEXT: paddq %xmm4, %xmm0
; SSE2-NEXT: psllq $32, %xmm0
; SSE2-NEXT: pmuludq %xmm3, %xmm2
; SSE2-NEXT: paddq %xmm1, %xmm0
; SSE2-NEXT: paddq %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vec128_i64_signed_mem_reg:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: movdqa (%rdi), %xmm3
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
; SSE41-NEXT: pxor %xmm5, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm5
; SSE41-NEXT: movdqa %xmm5, %xmm2
; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
; SSE41-NEXT: movdqa %xmm5, %xmm6
; SSE41-NEXT: pcmpeqd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
; SSE41-NEXT: pand %xmm6, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE41-NEXT: por %xmm4, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [1,1]
; SSE41-NEXT: por %xmm2, %xmm4
; SSE41-NEXT: pcmpgtd %xmm5, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2]
; SSE41-NEXT: pand %xmm6, %xmm5
; SSE41-NEXT: por %xmm5, %xmm0
; SSE41-NEXT: movdqa %xmm1, %xmm5
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm5
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1
; SSE41-NEXT: psubq %xmm5, %xmm1
; SSE41-NEXT: psrlq $1, %xmm1
; SSE41-NEXT: movdqa %xmm4, %xmm2
; SSE41-NEXT: psrlq $32, %xmm2
; SSE41-NEXT: pmuludq %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: psrlq $32, %xmm0
; SSE41-NEXT: pmuludq %xmm4, %xmm0
; SSE41-NEXT: paddq %xmm2, %xmm0
; SSE41-NEXT: psllq $32, %xmm0
; SSE41-NEXT: pmuludq %xmm4, %xmm1
; SSE41-NEXT: paddq %xmm3, %xmm0
; SSE41-NEXT: paddq %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-FALLBACK-LABEL: vec128_i64_signed_mem_reg:
; AVX1-FALLBACK: # %bb.0:
; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm4
; AVX1-FALLBACK-NEXT: vblendvpd %xmm4, %xmm1, %xmm0, %xmm4
; AVX1-FALLBACK-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; AVX1-FALLBACK-NEXT: vpsubq %xmm4, %xmm0, %xmm0
; AVX1-FALLBACK-NEXT: vpsrlq $1, %xmm0, %xmm0
; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm2
; AVX1-FALLBACK-NEXT: vpmuludq %xmm2, %xmm0, %xmm2
; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm0, %xmm4
; AVX1-FALLBACK-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; AVX1-FALLBACK-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
; AVX1-FALLBACK-NEXT: vpaddq %xmm1, %xmm2, %xmm1
; AVX1-FALLBACK-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX1-FALLBACK-NEXT: retq
;
; AVX2-FALLBACK-LABEL: vec128_i64_signed_mem_reg:
; AVX2-FALLBACK: # %bb.0:
; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm4
; AVX2-FALLBACK-NEXT: vblendvpd %xmm4, %xmm1, %xmm0, %xmm4
; AVX2-FALLBACK-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; AVX2-FALLBACK-NEXT: vpsubq %xmm4, %xmm0, %xmm0
; AVX2-FALLBACK-NEXT: vpsrlq $1, %xmm0, %xmm0
; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm2
; AVX2-FALLBACK-NEXT: vpmuludq %xmm2, %xmm0, %xmm2
; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm0, %xmm4
; AVX2-FALLBACK-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; AVX2-FALLBACK-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
; AVX2-FALLBACK-NEXT: vpaddq %xmm1, %xmm2, %xmm1
; AVX2-FALLBACK-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX2-FALLBACK-NEXT: retq
;
; XOP-LABEL: vec128_i64_signed_mem_reg:
; XOP: # %bb.0:
; XOP-NEXT: vmovdqa (%rdi), %xmm1
; XOP-NEXT: vpcomgtq %xmm0, %xmm1, %xmm2
; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
; XOP-NEXT: vpcomltq %xmm0, %xmm1, %xmm4
; XOP-NEXT: vblendvpd %xmm4, %xmm1, %xmm0, %xmm4
; XOP-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; XOP-NEXT: vpsubq %xmm4, %xmm0, %xmm0
; XOP-NEXT: vpsrlq $1, %xmm0, %xmm0
; XOP-NEXT: vpsrlq $32, %xmm3, %xmm2
; XOP-NEXT: vpmuludq %xmm2, %xmm0, %xmm2
; XOP-NEXT: vpsrlq $32, %xmm0, %xmm4
; XOP-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; XOP-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; XOP-NEXT: vpsllq $32, %xmm2, %xmm2
; XOP-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
; XOP-NEXT: vpaddq %xmm1, %xmm2, %xmm1
; XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec128_i64_signed_mem_reg:
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
; AVX512F-NEXT: vpcmpgtq %zmm0, %zmm1, %k1
; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; AVX512F-NEXT: vpminsq %zmm0, %zmm1, %zmm2
; AVX512F-NEXT: vpmaxsq %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: vpsubq %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: vpsrlq $1, %xmm0, %xmm0
; AVX512F-NEXT: vpsrlq $32, %xmm0, %xmm2
; AVX512F-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
; AVX512F-NEXT: vpsrlq $32, %xmm3, %xmm4
; AVX512F-NEXT: vpmuludq %xmm4, %xmm0, %xmm4
; AVX512F-NEXT: vpaddq %xmm2, %xmm4, %xmm2
; AVX512F-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX512F-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
; AVX512F-NEXT: vpaddq %xmm1, %xmm2, %xmm1
; AVX512F-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: vec128_i64_signed_mem_reg:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm1
; AVX512VL-NEXT: vpcmpgtq %xmm0, %xmm1, %k1
; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
; AVX512VL-NEXT: vmovdqa64 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminsq %xmm0, %xmm1, %xmm2
; AVX512VL-NEXT: vpmaxsq %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: vpsubq %xmm2, %xmm0, %xmm0
; AVX512VL-NEXT: vpsrlq $1, %xmm0, %xmm0
; AVX512VL-NEXT: vpsrlq $32, %xmm3, %xmm2
; AVX512VL-NEXT: vpmuludq %xmm2, %xmm0, %xmm2
; AVX512VL-NEXT: vpsrlq $32, %xmm0, %xmm4
; AVX512VL-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; AVX512VL-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; AVX512VL-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX512VL-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
; AVX512VL-NEXT: vpaddq %xmm1, %xmm2, %xmm1
; AVX512VL-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_mem_reg:
; AVX512BW-FALLBACK: # %bb.0:
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX512BW-FALLBACK-NEXT: vpcmpgtq %zmm0, %zmm1, %k1
; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
; AVX512BW-FALLBACK-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; AVX512BW-FALLBACK-NEXT: vpminsq %zmm0, %zmm1, %zmm2
; AVX512BW-FALLBACK-NEXT: vpmaxsq %zmm0, %zmm1, %zmm0
; AVX512BW-FALLBACK-NEXT: vpsubq %xmm2, %xmm0, %xmm0
; AVX512BW-FALLBACK-NEXT: vpsrlq $1, %xmm0, %xmm0
; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm0, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm4
; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm4, %xmm0, %xmm4
; AVX512BW-FALLBACK-NEXT: vpaddq %xmm2, %xmm4, %xmm2
; AVX512BW-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
; AVX512BW-FALLBACK-NEXT: vpaddq %xmm1, %xmm2, %xmm1
; AVX512BW-FALLBACK-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512BW-FALLBACK-NEXT: vzeroupper
; AVX512BW-FALLBACK-NEXT: retq
%a1 = load <2 x i64>, <2 x i64>* %a1_addr
%t3 = icmp sgt <2 x i64> %a1, %a2 ; signed
%t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
%t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
%t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
%t7 = sub <2 x i64> %t6, %t5
%t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
%t9 = mul nsw <2 x i64> %t8, %t4 ; signed
%a10 = add nsw <2 x i64> %t9, %a1 ; signed
ret <2 x i64> %a10
}
define <2 x i64> @vec128_i64_signed_reg_mem(<2 x i64> %a1, <2 x i64>* %a2_addr) nounwind {
; SSE2-LABEL: vec128_i64_signed_reg_mem:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
; SSE2-NEXT: pxor %xmm1, %xmm4
; SSE2-NEXT: movdqa %xmm5, %xmm2
; SSE2-NEXT: pcmpgtd %xmm4, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
; SSE2-NEXT: movdqa %xmm5, %xmm6
; SSE2-NEXT: pcmpeqd %xmm4, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
; SSE2-NEXT: pand %xmm6, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1]
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: pcmpgtd %xmm5, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
; SSE2-NEXT: pand %xmm6, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
; SSE2-NEXT: por %xmm5, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm5
; SSE2-NEXT: pandn %xmm1, %xmm4
; SSE2-NEXT: por %xmm5, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm2, %xmm5
; SSE2-NEXT: pandn %xmm1, %xmm2
; SSE2-NEXT: por %xmm5, %xmm2
; SSE2-NEXT: psubq %xmm4, %xmm2
; SSE2-NEXT: psrlq $1, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: psrlq $32, %xmm4
; SSE2-NEXT: pmuludq %xmm2, %xmm4
; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: psrlq $32, %xmm1
; SSE2-NEXT: pmuludq %xmm3, %xmm1
; SSE2-NEXT: paddq %xmm4, %xmm1
; SSE2-NEXT: psllq $32, %xmm1
; SSE2-NEXT: pmuludq %xmm3, %xmm2
; SSE2-NEXT: paddq %xmm0, %xmm1
; SSE2-NEXT: paddq %xmm2, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vec128_i64_signed_reg_mem:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: movdqa (%rdi), %xmm3
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
; SSE41-NEXT: pxor %xmm5, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm5
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pcmpgtd %xmm5, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
; SSE41-NEXT: movdqa %xmm0, %xmm6
; SSE41-NEXT: pcmpeqd %xmm5, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
; SSE41-NEXT: pand %xmm6, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE41-NEXT: por %xmm4, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [1,1]
; SSE41-NEXT: por %xmm2, %xmm4
; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
; SSE41-NEXT: pand %xmm6, %xmm0
; SSE41-NEXT: por %xmm5, %xmm0
; SSE41-NEXT: movdqa %xmm3, %xmm5
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm5
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE41-NEXT: psubq %xmm5, %xmm3
; SSE41-NEXT: psrlq $1, %xmm3
; SSE41-NEXT: movdqa %xmm4, %xmm2
; SSE41-NEXT: psrlq $32, %xmm2
; SSE41-NEXT: pmuludq %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: psrlq $32, %xmm0
; SSE41-NEXT: pmuludq %xmm4, %xmm0
; SSE41-NEXT: paddq %xmm2, %xmm0
; SSE41-NEXT: psllq $32, %xmm0
; SSE41-NEXT: pmuludq %xmm4, %xmm3
; SSE41-NEXT: paddq %xmm1, %xmm0
; SSE41-NEXT: paddq %xmm3, %xmm0
; SSE41-NEXT: retq
;
; AVX1-FALLBACK-LABEL: vec128_i64_signed_reg_mem:
; AVX1-FALLBACK: # %bb.0:
; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm4
; AVX1-FALLBACK-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
; AVX1-FALLBACK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsubq %xmm4, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm2
; AVX1-FALLBACK-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm4
; AVX1-FALLBACK-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; AVX1-FALLBACK-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX1-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX1-FALLBACK-NEXT: retq
;
; AVX2-FALLBACK-LABEL: vec128_i64_signed_reg_mem:
; AVX2-FALLBACK: # %bb.0:
; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm4
; AVX2-FALLBACK-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
; AVX2-FALLBACK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsubq %xmm4, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm2
; AVX2-FALLBACK-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm4
; AVX2-FALLBACK-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; AVX2-FALLBACK-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX2-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX2-FALLBACK-NEXT: retq
;
; XOP-LABEL: vec128_i64_signed_reg_mem:
; XOP: # %bb.0:
; XOP-NEXT: vmovdqa (%rdi), %xmm1
; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm2
; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
; XOP-NEXT: vpcomltq %xmm1, %xmm0, %xmm4
; XOP-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
; XOP-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
; XOP-NEXT: vpsubq %xmm4, %xmm1, %xmm1
; XOP-NEXT: vpsrlq $1, %xmm1, %xmm1
; XOP-NEXT: vpsrlq $32, %xmm3, %xmm2
; XOP-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
; XOP-NEXT: vpsrlq $32, %xmm1, %xmm4
; XOP-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; XOP-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; XOP-NEXT: vpsllq $32, %xmm2, %xmm2
; XOP-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; XOP-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec128_i64_signed_reg_mem:
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm2
; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
; AVX512F-NEXT: vpsubq %xmm2, %xmm1, %xmm1
; AVX512F-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX512F-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512F-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
; AVX512F-NEXT: vpsrlq $32, %xmm3, %xmm4
; AVX512F-NEXT: vpmuludq %xmm4, %xmm1, %xmm4
; AVX512F-NEXT: vpaddq %xmm2, %xmm4, %xmm2
; AVX512F-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX512F-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX512F-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512F-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: vec128_i64_signed_reg_mem:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm1
; AVX512VL-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
; AVX512VL-NEXT: vmovdqa64 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm2
; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpsubq %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX512VL-NEXT: vpsrlq $32, %xmm3, %xmm2
; AVX512VL-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
; AVX512VL-NEXT: vpsrlq $32, %xmm1, %xmm4
; AVX512VL-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; AVX512VL-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; AVX512VL-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX512VL-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX512VL-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512VL-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_reg_mem:
; AVX512BW-FALLBACK: # %bb.0:
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX512BW-FALLBACK-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
; AVX512BW-FALLBACK-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; AVX512BW-FALLBACK-NEXT: vpminsq %zmm1, %zmm0, %zmm2
; AVX512BW-FALLBACK-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
; AVX512BW-FALLBACK-NEXT: vpsubq %xmm2, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm4
; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm4, %xmm1, %xmm4
; AVX512BW-FALLBACK-NEXT: vpaddq %xmm2, %xmm4, %xmm2
; AVX512BW-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX512BW-FALLBACK-NEXT: vzeroupper
; AVX512BW-FALLBACK-NEXT: retq
%a2 = load <2 x i64>, <2 x i64>* %a2_addr
%t3 = icmp sgt <2 x i64> %a1, %a2 ; signed
%t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
%t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
%t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
%t7 = sub <2 x i64> %t6, %t5
%t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
%t9 = mul nsw <2 x i64> %t8, %t4 ; signed
%a10 = add nsw <2 x i64> %t9, %a1 ; signed
ret <2 x i64> %a10
}
define <2 x i64> @vec128_i64_signed_mem_mem(<2 x i64>* %a1_addr, <2 x i64>* %a2_addr) nounwind {
; SSE2-LABEL: vec128_i64_signed_mem_mem:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm1
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
; SSE2-NEXT: pxor %xmm1, %xmm4
; SSE2-NEXT: movdqa %xmm4, %xmm2
; SSE2-NEXT: pcmpgtd %xmm5, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
; SSE2-NEXT: movdqa %xmm4, %xmm6
; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
; SSE2-NEXT: pand %xmm6, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1]
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: pcmpgtd %xmm4, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,0,2,2]
; SSE2-NEXT: pand %xmm6, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
; SSE2-NEXT: por %xmm4, %xmm5
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pand %xmm5, %xmm4
; SSE2-NEXT: pandn %xmm0, %xmm5
; SSE2-NEXT: por %xmm4, %xmm5
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pand %xmm2, %xmm4
; SSE2-NEXT: pandn %xmm0, %xmm2
; SSE2-NEXT: por %xmm4, %xmm2
; SSE2-NEXT: psubq %xmm5, %xmm2
; SSE2-NEXT: psrlq $1, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: psrlq $32, %xmm4
; SSE2-NEXT: pmuludq %xmm2, %xmm4
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: psrlq $32, %xmm0
; SSE2-NEXT: pmuludq %xmm3, %xmm0
; SSE2-NEXT: paddq %xmm4, %xmm0
; SSE2-NEXT: psllq $32, %xmm0
; SSE2-NEXT: pmuludq %xmm3, %xmm2
; SSE2-NEXT: paddq %xmm1, %xmm0
; SSE2-NEXT: paddq %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vec128_i64_signed_mem_mem:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa (%rdi), %xmm3
; SSE41-NEXT: movdqa (%rsi), %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm2, %xmm5
; SSE41-NEXT: pxor %xmm0, %xmm5
; SSE41-NEXT: pxor %xmm3, %xmm0
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pcmpgtd %xmm5, %xmm1
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
; SSE41-NEXT: movdqa %xmm0, %xmm6
; SSE41-NEXT: pcmpeqd %xmm5, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
; SSE41-NEXT: pand %xmm6, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE41-NEXT: por %xmm4, %xmm1
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [1,1]
; SSE41-NEXT: por %xmm1, %xmm4
; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
; SSE41-NEXT: pand %xmm6, %xmm0
; SSE41-NEXT: por %xmm5, %xmm0
; SSE41-NEXT: movdqa %xmm2, %xmm5
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm5
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm2
; SSE41-NEXT: psubq %xmm5, %xmm2
; SSE41-NEXT: psrlq $1, %xmm2
; SSE41-NEXT: movdqa %xmm4, %xmm1
; SSE41-NEXT: psrlq $32, %xmm1
; SSE41-NEXT: pmuludq %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: psrlq $32, %xmm0
; SSE41-NEXT: pmuludq %xmm4, %xmm0
; SSE41-NEXT: paddq %xmm1, %xmm0
; SSE41-NEXT: psllq $32, %xmm0
; SSE41-NEXT: pmuludq %xmm4, %xmm2
; SSE41-NEXT: paddq %xmm3, %xmm0
; SSE41-NEXT: paddq %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-FALLBACK-LABEL: vec128_i64_signed_mem_mem:
; AVX1-FALLBACK: # %bb.0:
; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
; AVX1-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm4
; AVX1-FALLBACK-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
; AVX1-FALLBACK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsubq %xmm4, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm2
; AVX1-FALLBACK-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm4
; AVX1-FALLBACK-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; AVX1-FALLBACK-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX1-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX1-FALLBACK-NEXT: retq
;
; AVX2-FALLBACK-LABEL: vec128_i64_signed_mem_mem:
; AVX2-FALLBACK: # %bb.0:
; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
; AVX2-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm4
; AVX2-FALLBACK-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
; AVX2-FALLBACK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsubq %xmm4, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm2
; AVX2-FALLBACK-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm4
; AVX2-FALLBACK-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; AVX2-FALLBACK-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX2-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX2-FALLBACK-NEXT: retq
;
; XOP-LABEL: vec128_i64_signed_mem_mem:
; XOP: # %bb.0:
; XOP-NEXT: vmovdqa (%rdi), %xmm0
; XOP-NEXT: vmovdqa (%rsi), %xmm1
; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm2
; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
; XOP-NEXT: vpcomltq %xmm1, %xmm0, %xmm4
; XOP-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
; XOP-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
; XOP-NEXT: vpsubq %xmm4, %xmm1, %xmm1
; XOP-NEXT: vpsrlq $1, %xmm1, %xmm1
; XOP-NEXT: vpsrlq $32, %xmm3, %xmm2
; XOP-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
; XOP-NEXT: vpsrlq $32, %xmm1, %xmm4
; XOP-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; XOP-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; XOP-NEXT: vpsllq $32, %xmm2, %xmm2
; XOP-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; XOP-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec128_i64_signed_mem_mem:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vmovdqa (%rsi), %xmm1
; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm2
; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
; AVX512F-NEXT: vpsubq %xmm2, %xmm1, %xmm1
; AVX512F-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX512F-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512F-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
; AVX512F-NEXT: vpsrlq $32, %xmm3, %xmm4
; AVX512F-NEXT: vpmuludq %xmm4, %xmm1, %xmm4
; AVX512F-NEXT: vpaddq %xmm2, %xmm4, %xmm2
; AVX512F-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX512F-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX512F-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512F-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: vec128_i64_signed_mem_mem:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vmovdqa (%rsi), %xmm1
; AVX512VL-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
; AVX512VL-NEXT: vmovdqa64 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm2
; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpsubq %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX512VL-NEXT: vpsrlq $32, %xmm3, %xmm2
; AVX512VL-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
; AVX512VL-NEXT: vpsrlq $32, %xmm1, %xmm4
; AVX512VL-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
; AVX512VL-NEXT: vpaddq %xmm4, %xmm2, %xmm2
; AVX512VL-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX512VL-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX512VL-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512VL-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_mem_mem:
; AVX512BW-FALLBACK: # %bb.0:
; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
; AVX512BW-FALLBACK-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
; AVX512BW-FALLBACK-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; AVX512BW-FALLBACK-NEXT: vpminsq %zmm1, %zmm0, %zmm2
; AVX512BW-FALLBACK-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
; AVX512BW-FALLBACK-NEXT: vpsubq %xmm2, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm4
; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm4, %xmm1, %xmm4
; AVX512BW-FALLBACK-NEXT: vpaddq %xmm2, %xmm4, %xmm2
; AVX512BW-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX512BW-FALLBACK-NEXT: vzeroupper
; AVX512BW-FALLBACK-NEXT: retq
%a1 = load <2 x i64>, <2 x i64>* %a1_addr
%a2 = load <2 x i64>, <2 x i64>* %a2_addr
%t3 = icmp sgt <2 x i64> %a1, %a2 ; signed
%t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
%t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
%t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
%t7 = sub <2 x i64> %t6, %t5
%t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
%t9 = mul nsw <2 x i64> %t8, %t4 ; signed
%a10 = add nsw <2 x i64> %t9, %a1 ; signed
ret <2 x i64> %a10
}
; ---------------------------------------------------------------------------- ;
; 16-bit width. 128 / 16 = 8 elts.
; ---------------------------------------------------------------------------- ;
; Values come from regs
define <8 x i16> @vec128_i16_signed_reg_reg(<8 x i16> %a1, <8 x i16> %a2) nounwind {
; SSE-LABEL: vec128_i16_signed_reg_reg:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pcmpgtw %xmm1, %xmm2
; SSE-NEXT: por {{.*}}(%rip), %xmm2
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: pminsw %xmm1, %xmm3
; SSE-NEXT: pmaxsw %xmm0, %xmm1
; SSE-NEXT: psubw %xmm3, %xmm1
; SSE-NEXT: psrlw $1, %xmm1
; SSE-NEXT: pmullw %xmm1, %xmm2
; SSE-NEXT: paddw %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX1-FALLBACK-LABEL: vec128_i16_signed_reg_reg:
; AVX1-FALLBACK: # %bb.0:
; AVX1-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
; AVX1-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX1-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX1-FALLBACK-NEXT: retq
;
; AVX2-FALLBACK-LABEL: vec128_i16_signed_reg_reg:
; AVX2-FALLBACK: # %bb.0:
; AVX2-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
; AVX2-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX2-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX2-FALLBACK-NEXT: retq
;
; XOP-LABEL: vec128_i16_signed_reg_reg:
; XOP: # %bb.0:
; XOP-NEXT: vpcomgtw %xmm1, %xmm0, %xmm2
; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOP-NEXT: vpminsw %xmm1, %xmm0, %xmm3
; XOP-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; XOP-NEXT: vpsubw %xmm3, %xmm1, %xmm1
; XOP-NEXT: vpsrlw $1, %xmm1, %xmm1
; XOP-NEXT: vpmacsww %xmm0, %xmm2, %xmm1, %xmm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec128_i16_signed_reg_reg:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpminsw %xmm1, %xmm0, %xmm3
; AVX512F-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX512F-NEXT: vpsubw %xmm3, %xmm1, %xmm1
; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512F-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_reg_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
; AVX512VL-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX512VL-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX512VL-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i16_signed_reg_reg:
; AVX512BW-FALLBACK: # %bb.0:
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-FALLBACK-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
; AVX512BW-FALLBACK-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
; AVX512BW-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512BW-FALLBACK-NEXT: vzeroupper
; AVX512BW-FALLBACK-NEXT: retq
;
; AVX512VLBW-LABEL: vec128_i16_signed_reg_reg:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
; AVX512VLBW-NEXT: vmovdqu16 %xmm2, %xmm3 {%k1}
; AVX512VLBW-NEXT: vpminsw %xmm1, %xmm0, %xmm2
; AVX512VLBW-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpmullw %xmm3, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512VLBW-NEXT: retq
%t3 = icmp sgt <8 x i16> %a1, %a2 ; signed
%t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
%t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
%t7 = sub <8 x i16> %t6, %t5
%t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t9 = mul nsw <8 x i16> %t8, %t4 ; signed
%a10 = add nsw <8 x i16> %t9, %a1 ; signed
ret <8 x i16> %a10
}
define <8 x i16> @vec128_i16_unsigned_reg_reg(<8 x i16> %a1, <8 x i16> %a2) nounwind {
; SSE2-LABEL: vec128_i16_unsigned_reg_reg:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: pxor %xmm3, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm4
; SSE2-NEXT: pcmpgtw %xmm1, %xmm4
; SSE2-NEXT: por {{.*}}(%rip), %xmm4
; SSE2-NEXT: movdqa %xmm2, %xmm5
; SSE2-NEXT: pminsw %xmm1, %xmm5
; SSE2-NEXT: pxor %xmm3, %xmm5
; SSE2-NEXT: pmaxsw %xmm1, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm2
; SSE2-NEXT: psubw %xmm5, %xmm2
; SSE2-NEXT: psrlw $1, %xmm2
; SSE2-NEXT: pmullw %xmm4, %xmm2
; SSE2-NEXT: paddw %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vec128_i16_unsigned_reg_reg:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pminuw %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: pcmpeqw %xmm2, %xmm3
; SSE41-NEXT: pcmpeqd %xmm4, %xmm4
; SSE41-NEXT: pxor %xmm3, %xmm4
; SSE41-NEXT: por {{.*}}(%rip), %xmm4
; SSE41-NEXT: pmaxuw %xmm0, %xmm1
; SSE41-NEXT: psubw %xmm2, %xmm1
; SSE41-NEXT: psrlw $1, %xmm1
; SSE41-NEXT: pmullw %xmm1, %xmm4
; SSE41-NEXT: paddw %xmm4, %xmm0
; SSE41-NEXT: retq
;
; AVX1-FALLBACK-LABEL: vec128_i16_unsigned_reg_reg:
; AVX1-FALLBACK: # %bb.0:
; AVX1-FALLBACK-NEXT: vpminuw %xmm1, %xmm0, %xmm2
; AVX1-FALLBACK-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm3
; AVX1-FALLBACK-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX1-FALLBACK-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm3, %xmm3
; AVX1-FALLBACK-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
; AVX1-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX1-FALLBACK-NEXT: retq
;
; AVX2-FALLBACK-LABEL: vec128_i16_unsigned_reg_reg:
; AVX2-FALLBACK: # %bb.0:
; AVX2-FALLBACK-NEXT: vpminuw %xmm1, %xmm0, %xmm2
; AVX2-FALLBACK-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm3
; AVX2-FALLBACK-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX2-FALLBACK-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm3, %xmm3
; AVX2-FALLBACK-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
; AVX2-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX2-FALLBACK-NEXT: retq
;
; XOP-LABEL: vec128_i16_unsigned_reg_reg:
; XOP: # %bb.0:
; XOP-NEXT: vpcomgtuw %xmm1, %xmm0, %xmm2
; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOP-NEXT: vpminuw %xmm1, %xmm0, %xmm3
; XOP-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
; XOP-NEXT: vpsubw %xmm3, %xmm1, %xmm1
; XOP-NEXT: vpsrlw $1, %xmm1, %xmm1
; XOP-NEXT: vpmacsww %xmm0, %xmm2, %xmm1, %xmm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec128_i16_unsigned_reg_reg:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpminuw %xmm1, %xmm0, %xmm2
; AVX512F-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm3
; AVX512F-NEXT: vpternlogq $15, %zmm3, %zmm3, %zmm3
; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm3, %xmm3
; AVX512F-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
; AVX512F-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512F-NEXT: vpmullw %xmm3, %xmm1, %xmm1
; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec128_i16_unsigned_reg_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vpminuw %xmm1, %xmm0, %xmm2
; AVX512VL-FALLBACK-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm3
; AVX512VL-FALLBACK-NEXT: vpternlogq $15, %xmm3, %xmm3, %xmm3
; AVX512VL-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm3, %xmm3
; AVX512VL-FALLBACK-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
; AVX512VL-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i16_unsigned_reg_reg:
; AVX512BW-FALLBACK: # %bb.0:
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-FALLBACK-NEXT: vpcmpnleuw %zmm1, %zmm0, %k1
; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
; AVX512BW-FALLBACK-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
; AVX512BW-FALLBACK-NEXT: vpminuw %xmm1, %xmm0, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512BW-FALLBACK-NEXT: vzeroupper
; AVX512BW-FALLBACK-NEXT: retq
;
; AVX512VLBW-LABEL: vec128_i16_unsigned_reg_reg:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpcmpnleuw %xmm1, %xmm0, %k1
; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
; AVX512VLBW-NEXT: vmovdqu16 %xmm2, %xmm3 {%k1}
; AVX512VLBW-NEXT: vpminuw %xmm1, %xmm0, %xmm2
; AVX512VLBW-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpmullw %xmm3, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512VLBW-NEXT: retq
%t3 = icmp ugt <8 x i16> %a1, %a2
%t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
%t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
%t7 = sub <8 x i16> %t6, %t5
%t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t9 = mul <8 x i16> %t8, %t4
%a10 = add <8 x i16> %t9, %a1
ret <8 x i16> %a10
}
; Values are loaded. Only check signed case.
define <8 x i16> @vec128_i16_signed_mem_reg(<8 x i16>* %a1_addr, <8 x i16> %a2) nounwind {
; SSE-LABEL: vec128_i16_signed_mem_reg:
; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: pcmpgtw %xmm0, %xmm2
; SSE-NEXT: por {{.*}}(%rip), %xmm2
; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: pminsw %xmm0, %xmm3
; SSE-NEXT: pmaxsw %xmm1, %xmm0
; SSE-NEXT: psubw %xmm3, %xmm0
; SSE-NEXT: psrlw $1, %xmm0
; SSE-NEXT: pmullw %xmm2, %xmm0
; SSE-NEXT: paddw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-FALLBACK-LABEL: vec128_i16_signed_mem_reg:
; AVX1-FALLBACK: # %bb.0:
; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX1-FALLBACK-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm2
; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpminsw %xmm0, %xmm1, %xmm3
; AVX1-FALLBACK-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
; AVX1-FALLBACK-NEXT: vpsubw %xmm3, %xmm0, %xmm0
; AVX1-FALLBACK-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX1-FALLBACK-NEXT: vpmullw %xmm2, %xmm0, %xmm0
; AVX1-FALLBACK-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX1-FALLBACK-NEXT: retq
;
; AVX2-FALLBACK-LABEL: vec128_i16_signed_mem_reg:
; AVX2-FALLBACK: # %bb.0:
; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX2-FALLBACK-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm2
; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpminsw %xmm0, %xmm1, %xmm3
; AVX2-FALLBACK-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
; AVX2-FALLBACK-NEXT: vpsubw %xmm3, %xmm0, %xmm0
; AVX2-FALLBACK-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX2-FALLBACK-NEXT: vpmullw %xmm2, %xmm0, %xmm0
; AVX2-FALLBACK-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX2-FALLBACK-NEXT: retq
;
; XOP-LABEL: vec128_i16_signed_mem_reg:
; XOP: # %bb.0:
; XOP-NEXT: vmovdqa (%rdi), %xmm1
; XOP-NEXT: vpcomgtw %xmm0, %xmm1, %xmm2
; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOP-NEXT: vpminsw %xmm0, %xmm1, %xmm3
; XOP-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
; XOP-NEXT: vpsubw %xmm3, %xmm0, %xmm0
; XOP-NEXT: vpsrlw $1, %xmm0, %xmm0
; XOP-NEXT: vpmacsww %xmm1, %xmm2, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec128_i16_signed_mem_reg:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
; AVX512F-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm2
; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpminsw %xmm0, %xmm1, %xmm3
; AVX512F-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: vpsubw %xmm3, %xmm0, %xmm0
; AVX512F-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX512F-NEXT: vpmullw %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_mem_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm2
; AVX512VL-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX512VL-FALLBACK-NEXT: vpminsw %xmm0, %xmm1, %xmm3
; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
; AVX512VL-FALLBACK-NEXT: vpsubw %xmm3, %xmm0, %xmm0
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX512VL-FALLBACK-NEXT: vpmullw %xmm2, %xmm0, %xmm0
; AVX512VL-FALLBACK-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i16_signed_mem_reg:
; AVX512BW-FALLBACK: # %bb.0:
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX512BW-FALLBACK-NEXT: vpcmpgtw %zmm0, %zmm1, %k1
; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
; AVX512BW-FALLBACK-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
; AVX512BW-FALLBACK-NEXT: vpminsw %xmm0, %xmm1, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
; AVX512BW-FALLBACK-NEXT: vpsubw %xmm2, %xmm0, %xmm0
; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX512BW-FALLBACK-NEXT: vpmullw %xmm3, %xmm0, %xmm0
; AVX512BW-FALLBACK-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX512BW-FALLBACK-NEXT: vzeroupper
; AVX512BW-FALLBACK-NEXT: retq
;
; AVX512VLBW-LABEL: vec128_i16_signed_mem_reg:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm1
; AVX512VLBW-NEXT: vpcmpgtw %xmm0, %xmm1, %k1
; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
; AVX512VLBW-NEXT: vmovdqu16 %xmm2, %xmm3 {%k1}
; AVX512VLBW-NEXT: vpminsw %xmm0, %xmm1, %xmm2
; AVX512VLBW-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm0, %xmm0
; AVX512VLBW-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX512VLBW-NEXT: vpmullw %xmm3, %xmm0, %xmm0
; AVX512VLBW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX512VLBW-NEXT: retq
%a1 = load <8 x i16>, <8 x i16>* %a1_addr
%t3 = icmp sgt <8 x i16> %a1, %a2 ; signed
%t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
%t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
%t7 = sub <8 x i16> %t6, %t5
%t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t9 = mul nsw <8 x i16> %t8, %t4 ; signed
%a10 = add nsw <8 x i16> %t9, %a1 ; signed
ret <8 x i16> %a10
}
define <8 x i16> @vec128_i16_signed_reg_mem(<8 x i16> %a1, <8 x i16>* %a2_addr) nounwind {
; SSE-LABEL: vec128_i16_signed_reg_mem:
; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm2
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: pcmpgtw %xmm2, %xmm1
; SSE-NEXT: por {{.*}}(%rip), %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: pminsw %xmm2, %xmm3
; SSE-NEXT: pmaxsw %xmm0, %xmm2
; SSE-NEXT: psubw %xmm3, %xmm2
; SSE-NEXT: psrlw $1, %xmm2
; SSE-NEXT: pmullw %xmm2, %xmm1
; SSE-NEXT: paddw %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-FALLBACK-LABEL: vec128_i16_signed_reg_mem:
; AVX1-FALLBACK: # %bb.0:
; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX1-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
; AVX1-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX1-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX1-FALLBACK-NEXT: retq
;
; AVX2-FALLBACK-LABEL: vec128_i16_signed_reg_mem:
; AVX2-FALLBACK: # %bb.0:
; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX2-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
; AVX2-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX2-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX2-FALLBACK-NEXT: retq
;
; XOP-LABEL: vec128_i16_signed_reg_mem:
; XOP: # %bb.0:
; XOP-NEXT: vmovdqa (%rdi), %xmm1
; XOP-NEXT: vpcomgtw %xmm1, %xmm0, %xmm2
; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOP-NEXT: vpminsw %xmm1, %xmm0, %xmm3
; XOP-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; XOP-NEXT: vpsubw %xmm3, %xmm1, %xmm1
; XOP-NEXT: vpsrlw $1, %xmm1, %xmm1
; XOP-NEXT: vpmacsww %xmm0, %xmm2, %xmm1, %xmm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec128_i16_signed_reg_mem:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
; AVX512F-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpminsw %xmm1, %xmm0, %xmm3
; AVX512F-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX512F-NEXT: vpsubw %xmm3, %xmm1, %xmm1
; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512F-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_reg_mem:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
; AVX512VL-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX512VL-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX512VL-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i16_signed_reg_mem:
; AVX512BW-FALLBACK: # %bb.0:
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
; AVX512BW-FALLBACK-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
; AVX512BW-FALLBACK-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
; AVX512BW-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512BW-FALLBACK-NEXT: vzeroupper
; AVX512BW-FALLBACK-NEXT: retq
;
; AVX512VLBW-LABEL: vec128_i16_signed_reg_mem:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm1
; AVX512VLBW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
; AVX512VLBW-NEXT: vmovdqu16 %xmm2, %xmm3 {%k1}
; AVX512VLBW-NEXT: vpminsw %xmm1, %xmm0, %xmm2
; AVX512VLBW-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpmullw %xmm3, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512VLBW-NEXT: retq
%a2 = load <8 x i16>, <8 x i16>* %a2_addr
%t3 = icmp sgt <8 x i16> %a1, %a2 ; signed
%t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
%t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
%t7 = sub <8 x i16> %t6, %t5
%t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t9 = mul nsw <8 x i16> %t8, %t4 ; signed
%a10 = add nsw <8 x i16> %t9, %a1 ; signed
ret <8 x i16> %a10
}
define <8 x i16> @vec128_i16_signed_mem_mem(<8 x i16>* %a1_addr, <8 x i16>* %a2_addr) nounwind {
; SSE-LABEL: vec128_i16_signed_mem_mem:
; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: movdqa (%rsi), %xmm0
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: pcmpgtw %xmm0, %xmm2
; SSE-NEXT: por {{.*}}(%rip), %xmm2
; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: pminsw %xmm0, %xmm3
; SSE-NEXT: pmaxsw %xmm1, %xmm0
; SSE-NEXT: psubw %xmm3, %xmm0
; SSE-NEXT: psrlw $1, %xmm0
; SSE-NEXT: pmullw %xmm2, %xmm0
; SSE-NEXT: paddw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-FALLBACK-LABEL: vec128_i16_signed_mem_mem:
; AVX1-FALLBACK: # %bb.0:
; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
; AVX1-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
; AVX1-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
; AVX1-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX1-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX1-FALLBACK-NEXT: retq
;
; AVX2-FALLBACK-LABEL: vec128_i16_signed_mem_mem:
; AVX2-FALLBACK: # %bb.0:
; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
; AVX2-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
; AVX2-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
; AVX2-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX2-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX2-FALLBACK-NEXT: retq
;
; XOP-LABEL: vec128_i16_signed_mem_mem:
; XOP: # %bb.0:
; XOP-NEXT: vmovdqa (%rdi), %xmm0
; XOP-NEXT: vmovdqa (%rsi), %xmm1
; XOP-NEXT: vpcomgtw %xmm1, %xmm0, %xmm2
; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOP-NEXT: vpminsw %xmm1, %xmm0, %xmm3
; XOP-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; XOP-NEXT: vpsubw %xmm3, %xmm1, %xmm1
; XOP-NEXT: vpsrlw $1, %xmm1, %xmm1
; XOP-NEXT: vpmacsww %xmm0, %xmm2, %xmm1, %xmm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec128_i16_signed_mem_mem:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vmovdqa (%rsi), %xmm1
; AVX512F-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpminsw %xmm1, %xmm0, %xmm3
; AVX512F-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX512F-NEXT: vpsubw %xmm3, %xmm1, %xmm1
; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512F-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_mem_mem:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
; AVX512VL-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX512VL-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX512VL-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i16_signed_mem_mem:
; AVX512BW-FALLBACK: # %bb.0:
; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
; AVX512BW-FALLBACK-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
; AVX512BW-FALLBACK-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
; AVX512BW-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512BW-FALLBACK-NEXT: vzeroupper
; AVX512BW-FALLBACK-NEXT: retq
;
; AVX512VLBW-LABEL: vec128_i16_signed_mem_mem:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VLBW-NEXT: vmovdqa (%rsi), %xmm1
; AVX512VLBW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
; AVX512VLBW-NEXT: vmovdqu16 %xmm2, %xmm3 {%k1}
; AVX512VLBW-NEXT: vpminsw %xmm1, %xmm0, %xmm2
; AVX512VLBW-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpmullw %xmm3, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512VLBW-NEXT: retq
%a1 = load <8 x i16>, <8 x i16>* %a1_addr
%a2 = load <8 x i16>, <8 x i16>* %a2_addr
%t3 = icmp sgt <8 x i16> %a1, %a2 ; signed
%t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
%t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
%t7 = sub <8 x i16> %t6, %t5
%t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t9 = mul nsw <8 x i16> %t8, %t4 ; signed
%a10 = add nsw <8 x i16> %t9, %a1 ; signed
ret <8 x i16> %a10
}
; ---------------------------------------------------------------------------- ;
; 8-bit width. 128 / 8 = 16 elts.
; ---------------------------------------------------------------------------- ;
; Values come from regs
define <16 x i8> @vec128_i8_signed_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwind {
; SSE2-LABEL: vec128_i8_signed_reg_reg:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pcmpgtb %xmm0, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm5
; SSE2-NEXT: pandn %xmm1, %xmm4
; SSE2-NEXT: por %xmm5, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm2, %xmm5
; SSE2-NEXT: pandn %xmm1, %xmm2
; SSE2-NEXT: por %xmm5, %xmm2
; SSE2-NEXT: psubb %xmm4, %xmm2
; SSE2-NEXT: psrlw $1, %xmm2
; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
; SSE2-NEXT: pmullw %xmm1, %xmm4
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm1, %xmm4
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: packuswb %xmm4, %xmm2
; SSE2-NEXT: paddb %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vec128_i8_signed_reg_reg:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pcmpgtb %xmm1, %xmm2
; SSE41-NEXT: por {{.*}}(%rip), %xmm2
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: pminsb %xmm1, %xmm3
; SSE41-NEXT: pmaxsb %xmm0, %xmm1
; SSE41-NEXT: psubb %xmm3, %xmm1
; SSE41-NEXT: psrlw $1, %xmm1
; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; SSE41-NEXT: pmullw %xmm1, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
; SSE41-NEXT: pand %xmm1, %xmm2
; SSE41-NEXT: pmullw %xmm4, %xmm3
; SSE41-NEXT: pand %xmm1, %xmm3
; SSE41-NEXT: packuswb %xmm2, %xmm3
; SSE41-NEXT: paddb %xmm3, %xmm0
; SSE41-NEXT: retq
;
; AVX1-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
; AVX1-FALLBACK: # %bb.0:
; AVX1-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
; AVX1-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
; AVX1-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; AVX1-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; AVX1-FALLBACK-NEXT: vpmullw %xmm4, %xmm3, %xmm3
; AVX1-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; AVX1-FALLBACK-NEXT: vpand %xmm4, %xmm3, %xmm3
; AVX1-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX1-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; AVX1-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpand %xmm4, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; AVX1-FALLBACK-NEXT: retq
;
; AVX2-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
; AVX2-FALLBACK: # %bb.0:
; AVX2-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX2-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
; AVX2-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
; AVX2-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX2-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; AVX2-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
; AVX2-FALLBACK-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; AVX2-FALLBACK-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-FALLBACK-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; AVX2-FALLBACK-NEXT: vzeroupper
; AVX2-FALLBACK-NEXT: retq
;
; XOP-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
; XOP-FALLBACK: # %bb.0:
; XOP-FALLBACK-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
; XOP-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOP-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
; XOP-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
; XOP-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; XOP-FALLBACK-NEXT: vpshlb %xmm3, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; XOP-FALLBACK-NEXT: vpmullw %xmm4, %xmm3, %xmm3
; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; XOP-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; XOP-FALLBACK-NEXT: retq
;
; XOPAVX1-LABEL: vec128_i8_signed_reg_reg:
; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
; XOPAVX1-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOPAVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm3
; XOPAVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
; XOPAVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
; XOPAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; XOPAVX1-NEXT: vpshlb %xmm3, %xmm1, %xmm1
; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; XOPAVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; XOPAVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: vec128_i8_signed_reg_reg:
; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
; XOPAVX2-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOPAVX2-NEXT: vpminsb %xmm1, %xmm0, %xmm3
; XOPAVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
; XOPAVX2-NEXT: vpsubb %xmm3, %xmm1, %xmm1
; XOPAVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; XOPAVX2-NEXT: vpshlb %xmm3, %xmm1, %xmm1
; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; XOPAVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; XOPAVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; XOPAVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; XOPAVX2-NEXT: vzeroupper
; XOPAVX2-NEXT: retq
;
; AVX512F-LABEL: vec128_i8_signed_reg_reg:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpminsb %xmm1, %xmm0, %xmm3
; AVX512F-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
; AVX512F-NEXT: vpsubb %xmm3, %xmm1, %xmm1
; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
; AVX512F-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
; AVX512VL-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; AVX512VL-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
; AVX512VL-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
; AVX512VL-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512VL-FALLBACK-NEXT: vpmovdb %zmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; AVX512VL-FALLBACK-NEXT: vzeroupper
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
; AVX512BW-FALLBACK: # %bb.0:
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-FALLBACK-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512BW-FALLBACK-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
; AVX512BW-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm2
; AVX512BW-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
; AVX512BW-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
; AVX512BW-FALLBACK-NEXT: vpmovwb %zmm1, %ymm1
; AVX512BW-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; AVX512BW-FALLBACK-NEXT: vzeroupper
; AVX512BW-FALLBACK-NEXT: retq
;
; AVX512VLBW-LABEL: vec128_i8_signed_reg_reg:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512VLBW-NEXT: vmovdqu8 %xmm2, %xmm3 {%k1}
; AVX512VLBW-NEXT: vpminsb %xmm1, %xmm0, %xmm2
; AVX512VLBW-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
; AVX512VLBW-NEXT: vpmullw %ymm2, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpmovwb %ymm1, %xmm1
; AVX512VLBW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; AVX512VLBW-NEXT: vzeroupper
; AVX512VLBW-NEXT: retq
%t3 = icmp sgt <16 x i8> %a1, %a2 ; signed
%t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%t5 = select <16 x i1> %t3, <16 x i8> %a2, <16 x i8> %a1
%t6 = select <16 x i1> %t3, <16 x i8> %a1, <16 x i8> %a2
%t7 = sub <16 x i8> %t6, %t5
%t8 = lshr <16 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%t9 = mul nsw <16 x i8> %t8, %t4 ; signed
%a10 = add nsw <16 x i8> %t9, %a1 ; signed
ret <16 x i8> %a10
}
define <16 x i8> @vec128_i8_unsigned_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwind {
; SSE2-LABEL: vec128_i8_unsigned_reg_reg:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pminub %xmm1, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: pcmpeqb %xmm3, %xmm4
; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
; SSE2-NEXT: pxor %xmm4, %xmm2
; SSE2-NEXT: por {{.*}}(%rip), %xmm2
; SSE2-NEXT: pmaxub %xmm0, %xmm1
; SSE2-NEXT: psubb %xmm3, %xmm1
; SSE2-NEXT: psrlw $1, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
; SSE2-NEXT: movdqa %xmm2, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
; SSE2-NEXT: pmullw %xmm3, %xmm4
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm3, %xmm4
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-NEXT: pmullw %xmm1, %xmm2
; SSE2-NEXT: pand %xmm3, %xmm2
; SSE2-NEXT: packuswb %xmm4, %xmm2
; SSE2-NEXT: paddb %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vec128_i8_unsigned_reg_reg:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pminub %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: pcmpeqb %xmm2, %xmm3
; SSE41-NEXT: pcmpeqd %xmm4, %xmm4
; SSE41-NEXT: pxor %xmm3, %xmm4
; SSE41-NEXT: por {{.*}}(%rip), %xmm4
; SSE41-NEXT: pmaxub %xmm0, %xmm1
; SSE41-NEXT: psubb %xmm2, %xmm1
; SSE41-NEXT: psrlw $1, %xmm1
; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
; SSE41-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
; SSE41-NEXT: pmullw %xmm1, %xmm4
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
; SSE41-NEXT: pand %xmm1, %xmm4
; SSE41-NEXT: pmullw %xmm3, %xmm2
; SSE41-NEXT: pand %xmm1, %xmm2
; SSE41-NEXT: packuswb %xmm4, %xmm2
; SSE41-NEXT: paddb %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
; AVX1-FALLBACK: # %bb.0:
; AVX1-FALLBACK-NEXT: vpminub %xmm1, %xmm0, %xmm2
; AVX1-FALLBACK-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm3
; AVX1-FALLBACK-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX1-FALLBACK-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm3, %xmm3
; AVX1-FALLBACK-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
; AVX1-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; AVX1-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
; AVX1-FALLBACK-NEXT: vpmullw %xmm4, %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; AVX1-FALLBACK-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX1-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX1-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
; AVX1-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpand %xmm4, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX1-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; AVX1-FALLBACK-NEXT: retq
;
; AVX2-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
; AVX2-FALLBACK: # %bb.0:
; AVX2-FALLBACK-NEXT: vpminub %xmm1, %xmm0, %xmm2
; AVX2-FALLBACK-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm3
; AVX2-FALLBACK-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX2-FALLBACK-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm3, %xmm3
; AVX2-FALLBACK-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
; AVX2-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX2-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
; AVX2-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
; AVX2-FALLBACK-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; AVX2-FALLBACK-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-FALLBACK-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX2-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; AVX2-FALLBACK-NEXT: vzeroupper
; AVX2-FALLBACK-NEXT: retq
;
; XOP-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
; XOP-FALLBACK: # %bb.0:
; XOP-FALLBACK-NEXT: vpcomgtub %xmm1, %xmm0, %xmm2
; XOP-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOP-FALLBACK-NEXT: vpminub %xmm1, %xmm0, %xmm3
; XOP-FALLBACK-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
; XOP-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; XOP-FALLBACK-NEXT: vpshlb %xmm3, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; XOP-FALLBACK-NEXT: vpmullw %xmm4, %xmm3, %xmm3
; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; XOP-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; XOP-FALLBACK-NEXT: retq
;
; XOPAVX1-LABEL: vec128_i8_unsigned_reg_reg:
; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpcomgtub %xmm1, %xmm0, %xmm2
; XOPAVX1-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOPAVX1-NEXT: vpminub %xmm1, %xmm0, %xmm3
; XOPAVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
; XOPAVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
; XOPAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; XOPAVX1-NEXT: vpshlb %xmm3, %xmm1, %xmm1
; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; XOPAVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; XOPAVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: vec128_i8_unsigned_reg_reg:
; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpcomgtub %xmm1, %xmm0, %xmm2
; XOPAVX2-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
; XOPAVX2-NEXT: vpminub %xmm1, %xmm0, %xmm3
; XOPAVX2-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
; XOPAVX2-NEXT: vpsubb %xmm3, %xmm1, %xmm1
; XOPAVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; XOPAVX2-NEXT: vpshlb %xmm3, %xmm1, %xmm1
; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; XOPAVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2