blob: b19dd4586f5f73a1c1c00fc49a881277cc046c0c [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=ALL,AVX512F
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512VL-FALLBACK
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512BW
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512BW
; These test cases are inspired by C++2a std::midpoint().
; See https://bugs.llvm.org/show_bug.cgi?id=40965
; Using 512-bit vector regs.
; ---------------------------------------------------------------------------- ;
; 32-bit width. 512 / 32 = 16 elts.
; ---------------------------------------------------------------------------- ;
; Values come from regs
define <16 x i32> @vec512_i32_signed_reg_reg(<16 x i32> %a1, <16 x i32> %a2) nounwind {
; ALL-LABEL: vec512_i32_signed_reg_reg:
; ALL: # %bb.0:
; ALL-NEXT: vpminsd %zmm1, %zmm0, %zmm2
; ALL-NEXT: vpmaxsd %zmm1, %zmm0, %zmm1
; ALL-NEXT: vpsubd %zmm2, %zmm1, %zmm1
; ALL-NEXT: vpsrld $1, %zmm1, %zmm1
; ALL-NEXT: vpmulld %zmm1, %zmm1, %zmm1
; ALL-NEXT: vpaddd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%t3 = icmp sgt <16 x i32> %a1, %a2 ; signed
%t4 = select <16 x i1> %t3, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%t5 = select <16 x i1> %t3, <16 x i32> %a2, <16 x i32> %a1
%t6 = select <16 x i1> %t3, <16 x i32> %a1, <16 x i32> %a2
%t7 = sub <16 x i32> %t6, %t5
%t16 = lshr <16 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%t9 = mul nsw <16 x i32> %t16, %t16 ; signed
%a10 = add nsw <16 x i32> %t9, %a1 ; signed
ret <16 x i32> %a10
}
define <16 x i32> @vec512_i32_unsigned_reg_reg(<16 x i32> %a1, <16 x i32> %a2) nounwind {
; ALL-LABEL: vec512_i32_unsigned_reg_reg:
; ALL: # %bb.0:
; ALL-NEXT: vpminud %zmm1, %zmm0, %zmm2
; ALL-NEXT: vpmaxud %zmm1, %zmm0, %zmm1
; ALL-NEXT: vpsubd %zmm2, %zmm1, %zmm1
; ALL-NEXT: vpsrld $1, %zmm1, %zmm1
; ALL-NEXT: vpmulld %zmm1, %zmm1, %zmm1
; ALL-NEXT: vpaddd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%t3 = icmp ugt <16 x i32> %a1, %a2
%t4 = select <16 x i1> %t3, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%t5 = select <16 x i1> %t3, <16 x i32> %a2, <16 x i32> %a1
%t6 = select <16 x i1> %t3, <16 x i32> %a1, <16 x i32> %a2
%t7 = sub <16 x i32> %t6, %t5
%t16 = lshr <16 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%t9 = mul <16 x i32> %t16, %t16
%a10 = add <16 x i32> %t9, %a1
ret <16 x i32> %a10
}
; Values are loaded. Only check signed case.
define <16 x i32> @vec512_i32_signed_mem_reg(<16 x i32>* %a1_addr, <16 x i32> %a2) nounwind {
; ALL-LABEL: vec512_i32_signed_mem_reg:
; ALL: # %bb.0:
; ALL-NEXT: vmovdqa64 (%rdi), %zmm1
; ALL-NEXT: vpminsd %zmm0, %zmm1, %zmm2
; ALL-NEXT: vpmaxsd %zmm0, %zmm1, %zmm0
; ALL-NEXT: vpsubd %zmm2, %zmm0, %zmm0
; ALL-NEXT: vpsrld $1, %zmm0, %zmm0
; ALL-NEXT: vpmulld %zmm0, %zmm0, %zmm0
; ALL-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; ALL-NEXT: retq
%a1 = load <16 x i32>, <16 x i32>* %a1_addr
%t3 = icmp sgt <16 x i32> %a1, %a2 ; signed
%t4 = select <16 x i1> %t3, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%t5 = select <16 x i1> %t3, <16 x i32> %a2, <16 x i32> %a1
%t6 = select <16 x i1> %t3, <16 x i32> %a1, <16 x i32> %a2
%t7 = sub <16 x i32> %t6, %t5
%t16 = lshr <16 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%t9 = mul nsw <16 x i32> %t16, %t16 ; signed
%a10 = add nsw <16 x i32> %t9, %a1 ; signed
ret <16 x i32> %a10
}
define <16 x i32> @vec512_i32_signed_reg_mem(<16 x i32> %a1, <16 x i32>* %a2_addr) nounwind {
; ALL-LABEL: vec512_i32_signed_reg_mem:
; ALL: # %bb.0:
; ALL-NEXT: vmovdqa64 (%rdi), %zmm1
; ALL-NEXT: vpminsd %zmm1, %zmm0, %zmm2
; ALL-NEXT: vpmaxsd %zmm1, %zmm0, %zmm1
; ALL-NEXT: vpsubd %zmm2, %zmm1, %zmm1
; ALL-NEXT: vpsrld $1, %zmm1, %zmm1
; ALL-NEXT: vpmulld %zmm1, %zmm1, %zmm1
; ALL-NEXT: vpaddd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%a2 = load <16 x i32>, <16 x i32>* %a2_addr
%t3 = icmp sgt <16 x i32> %a1, %a2 ; signed
%t4 = select <16 x i1> %t3, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%t5 = select <16 x i1> %t3, <16 x i32> %a2, <16 x i32> %a1
%t6 = select <16 x i1> %t3, <16 x i32> %a1, <16 x i32> %a2
%t7 = sub <16 x i32> %t6, %t5
%t16 = lshr <16 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%t9 = mul nsw <16 x i32> %t16, %t16 ; signed
%a10 = add nsw <16 x i32> %t9, %a1 ; signed
ret <16 x i32> %a10
}
define <16 x i32> @vec512_i32_signed_mem_mem(<16 x i32>* %a1_addr, <16 x i32>* %a2_addr) nounwind {
; ALL-LABEL: vec512_i32_signed_mem_mem:
; ALL: # %bb.0:
; ALL-NEXT: vmovdqa64 (%rdi), %zmm0
; ALL-NEXT: vmovdqa64 (%rsi), %zmm1
; ALL-NEXT: vpminsd %zmm1, %zmm0, %zmm2
; ALL-NEXT: vpmaxsd %zmm1, %zmm0, %zmm1
; ALL-NEXT: vpsubd %zmm2, %zmm1, %zmm1
; ALL-NEXT: vpsrld $1, %zmm1, %zmm1
; ALL-NEXT: vpmulld %zmm1, %zmm1, %zmm1
; ALL-NEXT: vpaddd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%a1 = load <16 x i32>, <16 x i32>* %a1_addr
%a2 = load <16 x i32>, <16 x i32>* %a2_addr
%t3 = icmp sgt <16 x i32> %a1, %a2 ; signed
%t4 = select <16 x i1> %t3, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%t5 = select <16 x i1> %t3, <16 x i32> %a2, <16 x i32> %a1
%t6 = select <16 x i1> %t3, <16 x i32> %a1, <16 x i32> %a2
%t7 = sub <16 x i32> %t6, %t5
%t16 = lshr <16 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%t9 = mul nsw <16 x i32> %t16, %t16 ; signed
%a10 = add nsw <16 x i32> %t9, %a1 ; signed
ret <16 x i32> %a10
}
; ---------------------------------------------------------------------------- ;
; 64-bit width. 512 / 64 = 8 elts.
; ---------------------------------------------------------------------------- ;
; Values come from regs
define <8 x i64> @vec512_i64_signed_reg_reg(<8 x i64> %a1, <8 x i64> %a2) nounwind {
; ALL-LABEL: vec512_i64_signed_reg_reg:
; ALL: # %bb.0:
; ALL-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; ALL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; ALL-NEXT: vpbroadcastq {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1]
; ALL-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; ALL-NEXT: vpminsq %zmm1, %zmm0, %zmm2
; ALL-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
; ALL-NEXT: vpsubq %zmm2, %zmm1, %zmm1
; ALL-NEXT: vpsrlq $1, %zmm1, %zmm2
; ALL-NEXT: vpsrlq $33, %zmm1, %zmm1
; ALL-NEXT: vpmuludq %zmm3, %zmm1, %zmm1
; ALL-NEXT: vpsrlq $32, %zmm3, %zmm4
; ALL-NEXT: vpmuludq %zmm4, %zmm2, %zmm4
; ALL-NEXT: vpaddq %zmm1, %zmm4, %zmm1
; ALL-NEXT: vpsllq $32, %zmm1, %zmm1
; ALL-NEXT: vpmuludq %zmm3, %zmm2, %zmm2
; ALL-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; ALL-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; ALL-NEXT: retq
%t3 = icmp sgt <8 x i64> %a1, %a2 ; signed
%t4 = select <8 x i1> %t3, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
%t5 = select <8 x i1> %t3, <8 x i64> %a2, <8 x i64> %a1
%t6 = select <8 x i1> %t3, <8 x i64> %a1, <8 x i64> %a2
%t7 = sub <8 x i64> %t6, %t5
%t8 = lshr <8 x i64> %t7, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
%t9 = mul nsw <8 x i64> %t8, %t4 ; signed
%a10 = add nsw <8 x i64> %t9, %a1 ; signed
ret <8 x i64> %a10
}
define <8 x i64> @vec512_i64_unsigned_reg_reg(<8 x i64> %a1, <8 x i64> %a2) nounwind {
; ALL-LABEL: vec512_i64_unsigned_reg_reg:
; ALL: # %bb.0:
; ALL-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
; ALL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; ALL-NEXT: vpbroadcastq {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1]
; ALL-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; ALL-NEXT: vpminuq %zmm1, %zmm0, %zmm2
; ALL-NEXT: vpmaxuq %zmm1, %zmm0, %zmm1
; ALL-NEXT: vpsubq %zmm2, %zmm1, %zmm1
; ALL-NEXT: vpsrlq $1, %zmm1, %zmm2
; ALL-NEXT: vpsrlq $33, %zmm1, %zmm1
; ALL-NEXT: vpmuludq %zmm3, %zmm1, %zmm1
; ALL-NEXT: vpsrlq $32, %zmm3, %zmm4
; ALL-NEXT: vpmuludq %zmm4, %zmm2, %zmm4
; ALL-NEXT: vpaddq %zmm1, %zmm4, %zmm1
; ALL-NEXT: vpsllq $32, %zmm1, %zmm1
; ALL-NEXT: vpmuludq %zmm3, %zmm2, %zmm2
; ALL-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; ALL-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; ALL-NEXT: retq
%t3 = icmp ugt <8 x i64> %a1, %a2
%t4 = select <8 x i1> %t3, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
%t5 = select <8 x i1> %t3, <8 x i64> %a2, <8 x i64> %a1
%t6 = select <8 x i1> %t3, <8 x i64> %a1, <8 x i64> %a2
%t7 = sub <8 x i64> %t6, %t5
%t8 = lshr <8 x i64> %t7, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
%t9 = mul <8 x i64> %t8, %t4
%a10 = add <8 x i64> %t9, %a1
ret <8 x i64> %a10
}
; Values are loaded. Only check signed case.
define <8 x i64> @vec512_i64_signed_mem_reg(<8 x i64>* %a1_addr, <8 x i64> %a2) nounwind {
; ALL-LABEL: vec512_i64_signed_mem_reg:
; ALL: # %bb.0:
; ALL-NEXT: vmovdqa64 (%rdi), %zmm1
; ALL-NEXT: vpcmpgtq %zmm0, %zmm1, %k1
; ALL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; ALL-NEXT: vpbroadcastq {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1]
; ALL-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; ALL-NEXT: vpminsq %zmm0, %zmm1, %zmm2
; ALL-NEXT: vpmaxsq %zmm0, %zmm1, %zmm0
; ALL-NEXT: vpsubq %zmm2, %zmm0, %zmm0
; ALL-NEXT: vpsrlq $1, %zmm0, %zmm2
; ALL-NEXT: vpsrlq $33, %zmm0, %zmm0
; ALL-NEXT: vpmuludq %zmm3, %zmm0, %zmm0
; ALL-NEXT: vpsrlq $32, %zmm3, %zmm4
; ALL-NEXT: vpmuludq %zmm4, %zmm2, %zmm4
; ALL-NEXT: vpaddq %zmm0, %zmm4, %zmm0
; ALL-NEXT: vpsllq $32, %zmm0, %zmm0
; ALL-NEXT: vpmuludq %zmm3, %zmm2, %zmm2
; ALL-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; ALL-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; ALL-NEXT: retq
%a1 = load <8 x i64>, <8 x i64>* %a1_addr
%t3 = icmp sgt <8 x i64> %a1, %a2 ; signed
%t4 = select <8 x i1> %t3, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
%t5 = select <8 x i1> %t3, <8 x i64> %a2, <8 x i64> %a1
%t6 = select <8 x i1> %t3, <8 x i64> %a1, <8 x i64> %a2
%t7 = sub <8 x i64> %t6, %t5
%t8 = lshr <8 x i64> %t7, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
%t9 = mul nsw <8 x i64> %t8, %t4 ; signed
%a10 = add nsw <8 x i64> %t9, %a1 ; signed
ret <8 x i64> %a10
}
define <8 x i64> @vec512_i64_signed_reg_mem(<8 x i64> %a1, <8 x i64>* %a2_addr) nounwind {
; ALL-LABEL: vec512_i64_signed_reg_mem:
; ALL: # %bb.0:
; ALL-NEXT: vmovdqa64 (%rdi), %zmm1
; ALL-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; ALL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; ALL-NEXT: vpbroadcastq {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1]
; ALL-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; ALL-NEXT: vpminsq %zmm1, %zmm0, %zmm2
; ALL-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
; ALL-NEXT: vpsubq %zmm2, %zmm1, %zmm1
; ALL-NEXT: vpsrlq $1, %zmm1, %zmm2
; ALL-NEXT: vpsrlq $33, %zmm1, %zmm1
; ALL-NEXT: vpmuludq %zmm3, %zmm1, %zmm1
; ALL-NEXT: vpsrlq $32, %zmm3, %zmm4
; ALL-NEXT: vpmuludq %zmm4, %zmm2, %zmm4
; ALL-NEXT: vpaddq %zmm1, %zmm4, %zmm1
; ALL-NEXT: vpsllq $32, %zmm1, %zmm1
; ALL-NEXT: vpmuludq %zmm3, %zmm2, %zmm2
; ALL-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; ALL-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; ALL-NEXT: retq
%a2 = load <8 x i64>, <8 x i64>* %a2_addr
%t3 = icmp sgt <8 x i64> %a1, %a2 ; signed
%t4 = select <8 x i1> %t3, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
%t5 = select <8 x i1> %t3, <8 x i64> %a2, <8 x i64> %a1
%t6 = select <8 x i1> %t3, <8 x i64> %a1, <8 x i64> %a2
%t7 = sub <8 x i64> %t6, %t5
%t8 = lshr <8 x i64> %t7, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
%t9 = mul nsw <8 x i64> %t8, %t4 ; signed
%a10 = add nsw <8 x i64> %t9, %a1 ; signed
ret <8 x i64> %a10
}
define <8 x i64> @vec512_i64_signed_mem_mem(<8 x i64>* %a1_addr, <8 x i64>* %a2_addr) nounwind {
; ALL-LABEL: vec512_i64_signed_mem_mem:
; ALL: # %bb.0:
; ALL-NEXT: vmovdqa64 (%rdi), %zmm0
; ALL-NEXT: vmovdqa64 (%rsi), %zmm1
; ALL-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; ALL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; ALL-NEXT: vpbroadcastq {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1]
; ALL-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; ALL-NEXT: vpminsq %zmm1, %zmm0, %zmm2
; ALL-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
; ALL-NEXT: vpsubq %zmm2, %zmm1, %zmm1
; ALL-NEXT: vpsrlq $1, %zmm1, %zmm2
; ALL-NEXT: vpsrlq $33, %zmm1, %zmm1
; ALL-NEXT: vpmuludq %zmm3, %zmm1, %zmm1
; ALL-NEXT: vpsrlq $32, %zmm3, %zmm4
; ALL-NEXT: vpmuludq %zmm4, %zmm2, %zmm4
; ALL-NEXT: vpaddq %zmm1, %zmm4, %zmm1
; ALL-NEXT: vpsllq $32, %zmm1, %zmm1
; ALL-NEXT: vpmuludq %zmm3, %zmm2, %zmm2
; ALL-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; ALL-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; ALL-NEXT: retq
%a1 = load <8 x i64>, <8 x i64>* %a1_addr
%a2 = load <8 x i64>, <8 x i64>* %a2_addr
%t3 = icmp sgt <8 x i64> %a1, %a2 ; signed
%t4 = select <8 x i1> %t3, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
%t5 = select <8 x i1> %t3, <8 x i64> %a2, <8 x i64> %a1
%t6 = select <8 x i1> %t3, <8 x i64> %a1, <8 x i64> %a2
%t7 = sub <8 x i64> %t6, %t5
%t8 = lshr <8 x i64> %t7, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
%t9 = mul nsw <8 x i64> %t8, %t4 ; signed
%a10 = add nsw <8 x i64> %t9, %a1 ; signed
ret <8 x i64> %a10
}
; ---------------------------------------------------------------------------- ;
; 16-bit width. 512 / 16 = 32 elts.
; ---------------------------------------------------------------------------- ;
; Values come from regs
define <32 x i16> @vec512_i16_signed_reg_reg(<32 x i16> %a1, <32 x i16> %a2) nounwind {
; AVX512F-LABEL: vec512_i16_signed_reg_reg:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4
; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5
; AVX512F-NEXT: vpminsw %ymm2, %ymm3, %ymm6
; AVX512F-NEXT: vpminsw %ymm1, %ymm0, %ymm7
; AVX512F-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpsubw %ymm6, %ymm2, %ymm2
; AVX512F-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
; AVX512F-NEXT: vpsubw %ymm7, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512F-NEXT: vpor %ymm6, %ymm5, %ymm5
; AVX512F-NEXT: vpmullw %ymm5, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm6, %ymm4, %ymm4
; AVX512F-NEXT: vpmullw %ymm4, %ymm2, %ymm2
; AVX512F-NEXT: vpaddw %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpaddw %ymm0, %ymm1, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_reg_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5
; AVX512VL-FALLBACK-NEXT: vpminsw %ymm2, %ymm3, %ymm6
; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm0, %ymm7
; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm6, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm7, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512VL-FALLBACK-NEXT: vpor %ymm6, %ymm5, %ymm5
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpor %ymm6, %ymm4, %ymm4
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm1, %ymm0
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i16_signed_reg_reg:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512BW-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminsw %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpmaxsw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsubw %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1
; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%t3 = icmp sgt <32 x i16> %a1, %a2 ; signed
%t4 = select <32 x i1> %t3, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t5 = select <32 x i1> %t3, <32 x i16> %a2, <32 x i16> %a1
%t6 = select <32 x i1> %t3, <32 x i16> %a1, <32 x i16> %a2
%t7 = sub <32 x i16> %t6, %t5
%t16 = lshr <32 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t9 = mul nsw <32 x i16> %t16, %t4 ; signed
%a10 = add nsw <32 x i16> %t9, %a1 ; signed
ret <32 x i16> %a10
}
define <32 x i16> @vec512_i16_unsigned_reg_reg(<32 x i16> %a1, <32 x i16> %a2) nounwind {
; AVX512F-LABEL: vec512_i16_unsigned_reg_reg:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512F-NEXT: vpminuw %ymm2, %ymm3, %ymm4
; AVX512F-NEXT: vpcmpeqw %ymm4, %ymm3, %ymm5
; AVX512F-NEXT: vpternlogq $15, %zmm5, %zmm5, %zmm5
; AVX512F-NEXT: vpminuw %ymm1, %ymm0, %ymm6
; AVX512F-NEXT: vpcmpeqw %ymm6, %ymm0, %ymm7
; AVX512F-NEXT: vpternlogq $15, %zmm7, %zmm7, %zmm7
; AVX512F-NEXT: vpmaxuw %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpmaxuw %ymm1, %ymm0, %ymm1
; AVX512F-NEXT: vpsubw %ymm6, %ymm1, %ymm1
; AVX512F-NEXT: vpsubw %ymm4, %ymm2, %ymm2
; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512F-NEXT: vpor %ymm4, %ymm7, %ymm6
; AVX512F-NEXT: vpmullw %ymm6, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm4, %ymm5, %ymm4
; AVX512F-NEXT: vpmullw %ymm4, %ymm2, %ymm2
; AVX512F-NEXT: vpaddw %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpaddw %ymm0, %ymm1, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i16_unsigned_reg_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512VL-FALLBACK-NEXT: vpminuw %ymm2, %ymm3, %ymm4
; AVX512VL-FALLBACK-NEXT: vpcmpeqw %ymm4, %ymm3, %ymm5
; AVX512VL-FALLBACK-NEXT: vpminuw %ymm1, %ymm0, %ymm6
; AVX512VL-FALLBACK-NEXT: vpcmpeqw %ymm6, %ymm0, %ymm7
; AVX512VL-FALLBACK-NEXT: vpcmpeqd %ymm8, %ymm8, %ymm8
; AVX512VL-FALLBACK-NEXT: vpmaxuw %ymm2, %ymm3, %ymm2
; AVX512VL-FALLBACK-NEXT: vpmaxuw %ymm1, %ymm0, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm6, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm4, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512VL-FALLBACK-NEXT: vpternlogq $222, %ymm8, %ymm4, %ymm7
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm7, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpternlogq $222, %ymm8, %ymm4, %ymm5
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm1, %ymm0
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i16_unsigned_reg_reg:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpnleuw %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512BW-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminuw %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpmaxuw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsubw %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1
; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%t3 = icmp ugt <32 x i16> %a1, %a2
%t4 = select <32 x i1> %t3, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t5 = select <32 x i1> %t3, <32 x i16> %a2, <32 x i16> %a1
%t6 = select <32 x i1> %t3, <32 x i16> %a1, <32 x i16> %a2
%t7 = sub <32 x i16> %t6, %t5
%t16 = lshr <32 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t9 = mul <32 x i16> %t16, %t4
%a10 = add <32 x i16> %t9, %a1
ret <32 x i16> %a10
}
; Values are loaded. Only check signed case.
define <32 x i16> @vec512_i16_signed_mem_reg(<32 x i16>* %a1_addr, <32 x i16> %a2) nounwind {
; AVX512F-LABEL: vec512_i16_signed_mem_reg:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512F-NEXT: vmovdqa (%rdi), %ymm2
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm4
; AVX512F-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm5
; AVX512F-NEXT: vpminsw %ymm1, %ymm3, %ymm6
; AVX512F-NEXT: vpminsw %ymm0, %ymm2, %ymm7
; AVX512F-NEXT: vpmaxsw %ymm1, %ymm3, %ymm1
; AVX512F-NEXT: vpsubw %ymm6, %ymm1, %ymm1
; AVX512F-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0
; AVX512F-NEXT: vpsubw %ymm7, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512F-NEXT: vpor %ymm6, %ymm5, %ymm5
; AVX512F-NEXT: vpmullw %ymm5, %ymm0, %ymm0
; AVX512F-NEXT: vpor %ymm6, %ymm4, %ymm4
; AVX512F-NEXT: vpmullw %ymm4, %ymm1, %ymm1
; AVX512F-NEXT: vpaddw %ymm3, %ymm1, %ymm1
; AVX512F-NEXT: vpaddw %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_mem_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2
; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm4
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm5
; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm3, %ymm6
; AVX512VL-FALLBACK-NEXT: vpminsw %ymm0, %ymm2, %ymm7
; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm3, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm6, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm7, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512VL-FALLBACK-NEXT: vpor %ymm6, %ymm5, %ymm5
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vpor %ymm6, %ymm4, %ymm4
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpaddw %ymm2, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i16_signed_mem_reg:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1
; AVX512BW-NEXT: vpcmpgtw %zmm0, %zmm1, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512BW-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminsw %zmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vpmaxsw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: vpsubw %zmm2, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmullw %zmm3, %zmm0, %zmm0
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%a1 = load <32 x i16>, <32 x i16>* %a1_addr
%t3 = icmp sgt <32 x i16> %a1, %a2 ; signed
%t4 = select <32 x i1> %t3, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t5 = select <32 x i1> %t3, <32 x i16> %a2, <32 x i16> %a1
%t6 = select <32 x i1> %t3, <32 x i16> %a1, <32 x i16> %a2
%t7 = sub <32 x i16> %t6, %t5
%t16 = lshr <32 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t9 = mul nsw <32 x i16> %t16, %t4 ; signed
%a10 = add nsw <32 x i16> %t9, %a1 ; signed
ret <32 x i16> %a10
}
define <32 x i16> @vec512_i16_signed_reg_mem(<32 x i16> %a1, <32 x i16>* %a2_addr) nounwind {
; AVX512F-LABEL: vec512_i16_signed_reg_mem:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm1
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm2
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4
; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5
; AVX512F-NEXT: vpminsw %ymm2, %ymm3, %ymm6
; AVX512F-NEXT: vpminsw %ymm1, %ymm0, %ymm7
; AVX512F-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpsubw %ymm6, %ymm2, %ymm2
; AVX512F-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
; AVX512F-NEXT: vpsubw %ymm7, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512F-NEXT: vpor %ymm6, %ymm5, %ymm5
; AVX512F-NEXT: vpmullw %ymm5, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm6, %ymm4, %ymm4
; AVX512F-NEXT: vpmullw %ymm4, %ymm2, %ymm2
; AVX512F-NEXT: vpaddw %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpaddw %ymm0, %ymm1, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_reg_mem:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm1
; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm2
; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5
; AVX512VL-FALLBACK-NEXT: vpminsw %ymm2, %ymm3, %ymm6
; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm0, %ymm7
; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm6, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm7, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512VL-FALLBACK-NEXT: vpor %ymm6, %ymm5, %ymm5
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpor %ymm6, %ymm4, %ymm4
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm1, %ymm0
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i16_signed_reg_mem:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1
; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512BW-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminsw %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpmaxsw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsubw %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1
; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%a2 = load <32 x i16>, <32 x i16>* %a2_addr
%t3 = icmp sgt <32 x i16> %a1, %a2 ; signed
%t4 = select <32 x i1> %t3, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t5 = select <32 x i1> %t3, <32 x i16> %a2, <32 x i16> %a1
%t6 = select <32 x i1> %t3, <32 x i16> %a1, <32 x i16> %a2
%t7 = sub <32 x i16> %t6, %t5
%t16 = lshr <32 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t9 = mul nsw <32 x i16> %t16, %t4 ; signed
%a10 = add nsw <32 x i16> %t9, %a1 ; signed
ret <32 x i16> %a10
}
define <32 x i16> @vec512_i16_signed_mem_mem(<32 x i16>* %a1_addr, <32 x i16>* %a2_addr) nounwind {
; AVX512F-LABEL: vec512_i16_signed_mem_mem:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rsi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1
; AVX512F-NEXT: vmovdqa (%rdi), %ymm2
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm4
; AVX512F-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm5
; AVX512F-NEXT: vpminsw %ymm1, %ymm3, %ymm6
; AVX512F-NEXT: vpminsw %ymm0, %ymm2, %ymm7
; AVX512F-NEXT: vpmaxsw %ymm1, %ymm3, %ymm1
; AVX512F-NEXT: vpsubw %ymm6, %ymm1, %ymm1
; AVX512F-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0
; AVX512F-NEXT: vpsubw %ymm7, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512F-NEXT: vpor %ymm6, %ymm5, %ymm5
; AVX512F-NEXT: vpmullw %ymm5, %ymm0, %ymm0
; AVX512F-NEXT: vpor %ymm6, %ymm4, %ymm4
; AVX512F-NEXT: vpmullw %ymm4, %ymm1, %ymm1
; AVX512F-NEXT: vpaddw %ymm3, %ymm1, %ymm1
; AVX512F-NEXT: vpaddw %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_mem_mem:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rsi), %ymm0
; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rsi), %ymm1
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2
; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm4
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm5
; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm3, %ymm6
; AVX512VL-FALLBACK-NEXT: vpminsw %ymm0, %ymm2, %ymm7
; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm3, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm6, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm7, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512VL-FALLBACK-NEXT: vpor %ymm6, %ymm5, %ymm5
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vpor %ymm6, %ymm4, %ymm4
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpaddw %ymm2, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i16_signed_mem_mem:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm1
; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512BW-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminsw %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpmaxsw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsubw %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1
; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%a1 = load <32 x i16>, <32 x i16>* %a1_addr
%a2 = load <32 x i16>, <32 x i16>* %a2_addr
%t3 = icmp sgt <32 x i16> %a1, %a2 ; signed
%t4 = select <32 x i1> %t3, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t5 = select <32 x i1> %t3, <32 x i16> %a2, <32 x i16> %a1
%t6 = select <32 x i1> %t3, <32 x i16> %a1, <32 x i16> %a2
%t7 = sub <32 x i16> %t6, %t5
%t16 = lshr <32 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%t9 = mul nsw <32 x i16> %t16, %t4 ; signed
%a10 = add nsw <32 x i16> %t9, %a1 ; signed
ret <32 x i16> %a10
}
; ---------------------------------------------------------------------------- ;
; 8-bit width. 512 / 8 = 64 elts.
; ---------------------------------------------------------------------------- ;
; Values come from regs
define <64 x i8> @vec512_i8_signed_reg_reg(<64 x i8> %a1, <64 x i8> %a2) nounwind {
; AVX512F-LABEL: vec512_i8_signed_reg_reg:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm3
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm2, %ymm4
; AVX512F-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm5
; AVX512F-NEXT: vpminsb %ymm3, %ymm2, %ymm6
; AVX512F-NEXT: vpminsb %ymm1, %ymm0, %ymm7
; AVX512F-NEXT: vpmaxsb %ymm3, %ymm2, %ymm3
; AVX512F-NEXT: vpsubb %ymm6, %ymm3, %ymm3
; AVX512F-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1
; AVX512F-NEXT: vpsubb %ymm7, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vpand %ymm6, %ymm1, %ymm1
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512F-NEXT: vpor %ymm7, %ymm5, %ymm5
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vpmullw %ymm6, %ymm8, %ymm6
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512F-NEXT: vpand %ymm6, %ymm8, %ymm6
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpmullw %ymm5, %ymm1, %ymm1
; AVX512F-NEXT: vpand %ymm1, %ymm8, %ymm1
; AVX512F-NEXT: vpackuswb %ymm6, %ymm1, %ymm1
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vpor %ymm7, %ymm4, %ymm4
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vpmullw %ymm6, %ymm5, %ymm5
; AVX512F-NEXT: vpand %ymm5, %ymm8, %ymm5
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
; AVX512F-NEXT: vpand %ymm3, %ymm8, %ymm3
; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
; AVX512F-NEXT: vpaddb %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpaddb %ymm0, %ymm1, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_reg_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm3
; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm3, %ymm2, %ymm4
; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm5
; AVX512VL-FALLBACK-NEXT: vpminsb %ymm3, %ymm2, %ymm6
; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm0, %ymm7
; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm3, %ymm2, %ymm3
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm7, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512VL-FALLBACK-NEXT: vpor %ymm7, %ymm5, %ymm5
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm8, %ymm6
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm8, %ymm6
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpand %ymm1, %ymm8, %ymm1
; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm6, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vpor %ymm7, %ymm4, %ymm4
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm5, %ymm5
; AVX512VL-FALLBACK-NEXT: vpand %ymm5, %ymm8, %ymm5
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpand %ymm3, %ymm8, %ymm3
; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpaddb %ymm2, %ymm3, %ymm2
; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm1, %ymm0
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i8_signed_reg_reg:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminsb %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpmaxsb %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpmullw %zmm4, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512BW-NEXT: vpandq %zmm4, %zmm2, %zmm2
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq %zmm4, %zmm1, %zmm1
; AVX512BW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%t3 = icmp sgt <64 x i8> %a1, %a2 ; signed
%t4 = select <64 x i1> %t3, <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <64 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%t5 = select <64 x i1> %t3, <64 x i8> %a2, <64 x i8> %a1
%t6 = select <64 x i1> %t3, <64 x i8> %a1, <64 x i8> %a2
%t7 = sub <64 x i8> %t6, %t5
%t8 = lshr <64 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%t9 = mul nsw <64 x i8> %t8, %t4 ; signed
%a10 = add nsw <64 x i8> %t9, %a1 ; signed
ret <64 x i8> %a10
}
define <64 x i8> @vec512_i8_unsigned_reg_reg(<64 x i8> %a1, <64 x i8> %a2) nounwind {
; AVX512F-LABEL: vec512_i8_unsigned_reg_reg:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm4
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512F-NEXT: vpminub %ymm4, %ymm2, %ymm5
; AVX512F-NEXT: vpcmpeqb %ymm5, %ymm2, %ymm3
; AVX512F-NEXT: vpternlogq $15, %zmm3, %zmm3, %zmm3
; AVX512F-NEXT: vpminub %ymm1, %ymm0, %ymm6
; AVX512F-NEXT: vpcmpeqb %ymm6, %ymm0, %ymm7
; AVX512F-NEXT: vpternlogq $15, %zmm7, %zmm7, %zmm7
; AVX512F-NEXT: vpmaxub %ymm4, %ymm2, %ymm4
; AVX512F-NEXT: vpmaxub %ymm1, %ymm0, %ymm1
; AVX512F-NEXT: vpsubb %ymm6, %ymm1, %ymm1
; AVX512F-NEXT: vpsubb %ymm5, %ymm4, %ymm4
; AVX512F-NEXT: vpsrlw $1, %ymm4, %ymm4
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vpand %ymm5, %ymm1, %ymm1
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512F-NEXT: vpor %ymm6, %ymm7, %ymm7
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vpmullw %ymm5, %ymm8, %ymm5
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512F-NEXT: vpand %ymm5, %ymm8, %ymm5
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm7 = ymm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpmullw %ymm7, %ymm1, %ymm1
; AVX512F-NEXT: vpand %ymm1, %ymm8, %ymm1
; AVX512F-NEXT: vpackuswb %ymm5, %ymm1, %ymm1
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vpor %ymm6, %ymm3, %ymm3
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vpmullw %ymm6, %ymm5, %ymm5
; AVX512F-NEXT: vpand %ymm5, %ymm8, %ymm5
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm3
; AVX512F-NEXT: vpand %ymm3, %ymm8, %ymm3
; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
; AVX512F-NEXT: vpaddb %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpaddb %ymm0, %ymm1, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i8_unsigned_reg_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm4
; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512VL-FALLBACK-NEXT: vpminub %ymm4, %ymm2, %ymm5
; AVX512VL-FALLBACK-NEXT: vpcmpeqb %ymm5, %ymm2, %ymm3
; AVX512VL-FALLBACK-NEXT: vpminub %ymm1, %ymm0, %ymm6
; AVX512VL-FALLBACK-NEXT: vpcmpeqb %ymm6, %ymm0, %ymm7
; AVX512VL-FALLBACK-NEXT: vpcmpeqd %ymm8, %ymm8, %ymm8
; AVX512VL-FALLBACK-NEXT: vpmaxub %ymm4, %ymm2, %ymm4
; AVX512VL-FALLBACK-NEXT: vpmaxub %ymm1, %ymm0, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm4, %ymm4
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm4, %ymm4
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512VL-FALLBACK-NEXT: vpand %ymm5, %ymm4, %ymm4
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpand %ymm5, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512VL-FALLBACK-NEXT: vpternlogq $222, %ymm8, %ymm6, %ymm7
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm9 = ymm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm9, %ymm5
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512VL-FALLBACK-NEXT: vpand %ymm5, %ymm9, %ymm5
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm7 = ymm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm7, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpand %ymm1, %ymm9, %ymm1
; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm5, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vpternlogq $222, %ymm8, %ymm6, %ymm3
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm5, %ymm5
; AVX512VL-FALLBACK-NEXT: vpand %ymm5, %ymm9, %ymm5
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm3, %ymm4, %ymm3
; AVX512VL-FALLBACK-NEXT: vpand %ymm3, %ymm9, %ymm3
; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpaddb %ymm2, %ymm3, %ymm2
; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm1, %ymm0
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i8_unsigned_reg_reg:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpnleub %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminub %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpmaxub %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpmullw %zmm4, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512BW-NEXT: vpandq %zmm4, %zmm2, %zmm2
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq %zmm4, %zmm1, %zmm1
; AVX512BW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%t3 = icmp ugt <64 x i8> %a1, %a2
%t4 = select <64 x i1> %t3, <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <64 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%t5 = select <64 x i1> %t3, <64 x i8> %a2, <64 x i8> %a1
%t6 = select <64 x i1> %t3, <64 x i8> %a1, <64 x i8> %a2
%t7 = sub <64 x i8> %t6, %t5
%t8 = lshr <64 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%t9 = mul <64 x i8> %t8, %t4
%a10 = add <64 x i8> %t9, %a1
ret <64 x i8> %a10
}
; Values are loaded. Only check signed case.
define <64 x i8> @vec512_i8_signed_mem_reg(<64 x i8>* %a1_addr, <64 x i8> %a2) nounwind {
; AVX512F-LABEL: vec512_i8_signed_mem_reg:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512F-NEXT: vmovdqa (%rdi), %ymm1
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm2
; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm2, %ymm4
; AVX512F-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm5
; AVX512F-NEXT: vpminsb %ymm3, %ymm2, %ymm6
; AVX512F-NEXT: vpminsb %ymm0, %ymm1, %ymm7
; AVX512F-NEXT: vpmaxsb %ymm3, %ymm2, %ymm3
; AVX512F-NEXT: vpsubb %ymm6, %ymm3, %ymm3
; AVX512F-NEXT: vpmaxsb %ymm0, %ymm1, %ymm0
; AVX512F-NEXT: vpsubb %ymm7, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3
; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0
; AVX512F-NEXT: vpand %ymm6, %ymm0, %ymm0
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512F-NEXT: vpor %ymm7, %ymm5, %ymm5
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vpmullw %ymm6, %ymm8, %ymm6
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512F-NEXT: vpand %ymm6, %ymm8, %ymm6
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpmullw %ymm5, %ymm0, %ymm0
; AVX512F-NEXT: vpand %ymm0, %ymm8, %ymm0
; AVX512F-NEXT: vpackuswb %ymm6, %ymm0, %ymm0
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vpor %ymm7, %ymm4, %ymm4
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vpmullw %ymm6, %ymm5, %ymm5
; AVX512F-NEXT: vpand %ymm5, %ymm8, %ymm5
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
; AVX512F-NEXT: vpand %ymm3, %ymm8, %ymm3
; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
; AVX512F-NEXT: vpaddb %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_mem_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm1
; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm2
; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm3, %ymm2, %ymm4
; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm5
; AVX512VL-FALLBACK-NEXT: vpminsb %ymm3, %ymm2, %ymm6
; AVX512VL-FALLBACK-NEXT: vpminsb %ymm0, %ymm1, %ymm7
; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm3, %ymm2, %ymm3
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm0, %ymm1, %ymm0
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm7, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512VL-FALLBACK-NEXT: vpor %ymm7, %ymm5, %ymm5
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm8, %ymm6
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm8, %ymm6
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vpand %ymm0, %ymm8, %ymm0
; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm6, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vpor %ymm7, %ymm4, %ymm4
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm5, %ymm5
; AVX512VL-FALLBACK-NEXT: vpand %ymm5, %ymm8, %ymm5
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpand %ymm3, %ymm8, %ymm3
; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpaddb %ymm2, %ymm3, %ymm2
; AVX512VL-FALLBACK-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i8_signed_mem_reg:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1
; AVX512BW-NEXT: vpcmpgtb %zmm0, %zmm1, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminsb %zmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vpmaxsb %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: vpsubb %zmm2, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpmullw %zmm4, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512BW-NEXT: vpandq %zmm4, %zmm2, %zmm2
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
; AVX512BW-NEXT: vpmullw %zmm3, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm4, %zmm0, %zmm0
; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%a1 = load <64 x i8>, <64 x i8>* %a1_addr
%t3 = icmp sgt <64 x i8> %a1, %a2 ; signed
%t4 = select <64 x i1> %t3, <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <64 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%t5 = select <64 x i1> %t3, <64 x i8> %a2, <64 x i8> %a1
%t6 = select <64 x i1> %t3, <64 x i8> %a1, <64 x i8> %a2
%t7 = sub <64 x i8> %t6, %t5
%t8 = lshr <64 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%t9 = mul nsw <64 x i8> %t8, %t4 ; signed
%a10 = add nsw <64 x i8> %t9, %a1 ; signed
ret <64 x i8> %a10
}
define <64 x i8> @vec512_i8_signed_reg_mem(<64 x i8> %a1, <64 x i8>* %a2_addr) nounwind {
; AVX512F-LABEL: vec512_i8_signed_reg_mem:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm2
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4
; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm5
; AVX512F-NEXT: vpminsb %ymm3, %ymm1, %ymm6
; AVX512F-NEXT: vpminsb %ymm2, %ymm0, %ymm7
; AVX512F-NEXT: vpmaxsb %ymm3, %ymm1, %ymm3
; AVX512F-NEXT: vpsubb %ymm6, %ymm3, %ymm3
; AVX512F-NEXT: vpmaxsb %ymm2, %ymm0, %ymm2
; AVX512F-NEXT: vpsubb %ymm7, %ymm2, %ymm2
; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3
; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512F-NEXT: vpor %ymm7, %ymm5, %ymm5
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vpmullw %ymm6, %ymm8, %ymm6
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512F-NEXT: vpand %ymm6, %ymm8, %ymm6
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpmullw %ymm5, %ymm2, %ymm2
; AVX512F-NEXT: vpand %ymm2, %ymm8, %ymm2
; AVX512F-NEXT: vpackuswb %ymm6, %ymm2, %ymm2
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vpor %ymm7, %ymm4, %ymm4
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vpmullw %ymm6, %ymm5, %ymm5
; AVX512F-NEXT: vpand %ymm5, %ymm8, %ymm5
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
; AVX512F-NEXT: vpand %ymm3, %ymm8, %ymm3
; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
; AVX512F-NEXT: vpaddb %ymm1, %ymm3, %ymm1
; AVX512F-NEXT: vpaddb %ymm0, %ymm2, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_reg_mem:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2
; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4
; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm5
; AVX512VL-FALLBACK-NEXT: vpminsb %ymm3, %ymm1, %ymm6
; AVX512VL-FALLBACK-NEXT: vpminsb %ymm2, %ymm0, %ymm7
; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm3, %ymm1, %ymm3
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm2, %ymm0, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm7, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512VL-FALLBACK-NEXT: vpor %ymm7, %ymm5, %ymm5
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm8, %ymm6
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm8, %ymm6
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpand %ymm2, %ymm8, %ymm2
; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm6, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vpor %ymm7, %ymm4, %ymm4
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm5, %ymm5
; AVX512VL-FALLBACK-NEXT: vpand %ymm5, %ymm8, %ymm5
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpand %ymm3, %ymm8, %ymm3
; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpaddb %ymm1, %ymm3, %ymm1
; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm2, %ymm0
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i8_signed_reg_mem:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1
; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminsb %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpmaxsb %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpmullw %zmm4, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512BW-NEXT: vpandq %zmm4, %zmm2, %zmm2
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq %zmm4, %zmm1, %zmm1
; AVX512BW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%a2 = load <64 x i8>, <64 x i8>* %a2_addr
%t3 = icmp sgt <64 x i8> %a1, %a2 ; signed
%t4 = select <64 x i1> %t3, <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <64 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%t5 = select <64 x i1> %t3, <64 x i8> %a2, <64 x i8> %a1
%t6 = select <64 x i1> %t3, <64 x i8> %a1, <64 x i8> %a2
%t7 = sub <64 x i8> %t6, %t5
%t8 = lshr <64 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%t9 = mul nsw <64 x i8> %t8, %t4 ; signed
%a10 = add nsw <64 x i8> %t9, %a1 ; signed
ret <64 x i8> %a10
}
define <64 x i8> @vec512_i8_signed_mem_mem(<64 x i8>* %a1_addr, <64 x i8>* %a2_addr) nounwind {
; AVX512F-LABEL: vec512_i8_signed_mem_mem:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rsi), %ymm2
; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm3
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4
; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm5
; AVX512F-NEXT: vpminsb %ymm3, %ymm1, %ymm6
; AVX512F-NEXT: vpminsb %ymm2, %ymm0, %ymm7
; AVX512F-NEXT: vpmaxsb %ymm3, %ymm1, %ymm3
; AVX512F-NEXT: vpsubb %ymm6, %ymm3, %ymm3
; AVX512F-NEXT: vpmaxsb %ymm2, %ymm0, %ymm2
; AVX512F-NEXT: vpsubb %ymm7, %ymm2, %ymm2
; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3
; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512F-NEXT: vpor %ymm7, %ymm5, %ymm5
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vpmullw %ymm6, %ymm8, %ymm6
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512F-NEXT: vpand %ymm6, %ymm8, %ymm6
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpmullw %ymm5, %ymm2, %ymm2
; AVX512F-NEXT: vpand %ymm2, %ymm8, %ymm2
; AVX512F-NEXT: vpackuswb %ymm6, %ymm2, %ymm2
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vpor %ymm7, %ymm4, %ymm4
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vpmullw %ymm6, %ymm5, %ymm5
; AVX512F-NEXT: vpand %ymm5, %ymm8, %ymm5
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
; AVX512F-NEXT: vpand %ymm3, %ymm8, %ymm3
; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
; AVX512F-NEXT: vpaddb %ymm1, %ymm3, %ymm1
; AVX512F-NEXT: vpaddb %ymm0, %ymm2, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_mem_mem:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rsi), %ymm2
; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rsi), %ymm3
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4
; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm5
; AVX512VL-FALLBACK-NEXT: vpminsb %ymm3, %ymm1, %ymm6
; AVX512VL-FALLBACK-NEXT: vpminsb %ymm2, %ymm0, %ymm7
; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm3, %ymm1, %ymm3
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm2, %ymm0, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm7, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512VL-FALLBACK-NEXT: vpor %ymm7, %ymm5, %ymm5
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm8, %ymm6
; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm8, %ymm6
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpand %ymm2, %ymm8, %ymm2
; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm6, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vpor %ymm7, %ymm4, %ymm4
; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm5, %ymm5
; AVX512VL-FALLBACK-NEXT: vpand %ymm5, %ymm8, %ymm5
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpand %ymm3, %ymm8, %ymm3
; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpaddb %ymm1, %ymm3, %ymm1
; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm2, %ymm0
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i8_signed_mem_mem:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm1
; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminsb %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpmaxsb %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpmullw %zmm4, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512BW-NEXT: vpandq %zmm4, %zmm2, %zmm2
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq %zmm4, %zmm1, %zmm1
; AVX512BW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%a1 = load <64 x i8>, <64 x i8>* %a1_addr
%a2 = load <64 x i8>, <64 x i8>* %a2_addr
%t3 = icmp sgt <64 x i8> %a1, %a2 ; signed
%t4 = select <64 x i1> %t3, <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <64 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%t5 = select <64 x i1> %t3, <64 x i8> %a2, <64 x i8> %a1
%t6 = select <64 x i1> %t3, <64 x i8> %a1, <64 x i8> %a2
%t7 = sub <64 x i8> %t6, %t5
%t8 = lshr <64 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%t9 = mul nsw <64 x i8> %t8, %t4 ; signed
%a10 = add nsw <64 x i8> %t9, %a1 ; signed
ret <64 x i8> %a10
}