| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2,FALLBACK0 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE,SSE42,FALLBACK1 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1-ONLY,FALLBACK2 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2,AVX2-SLOW,FALLBACK3 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2,AVX2-FAST-PERLANE,FALLBACK4 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2,AVX2-FAST,FALLBACK5 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX512F,AVX512F-SLOW,FALLBACK6 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512F,AVX512F-FAST,FALLBACK7 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX512DQ,AVX512DQ-SLOW,FALLBACK8 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512DQ,AVX512DQ-FAST,FALLBACK9 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512BW,AVX512BW-SLOW,FALLBACK10 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512BW,AVX512BW-FAST,FALLBACK11 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX512BW,AVX512BW-SLOW,FALLBACK12 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512BW,AVX512BW-FAST,FALLBACK13 |
| |
| define void @vec32_i8_widen_to_i16_factor2_broadcast_to_v2i16_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec32_i8_widen_to_i16_factor2_broadcast_to_v2i16_factor2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] |
| ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,7] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,0,2,4,5,6,7] |
| ; SSE2-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE2-NEXT: paddb (%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec32_i8_widen_to_i16_factor2_broadcast_to_v2i16_factor2: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,5,0,7,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; SSE42-NEXT: paddb (%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec32_i8_widen_to_i16_factor2_broadcast_to_v2i16_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,5,0,7,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec32_i8_widen_to_i16_factor2_broadcast_to_v2i16_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,5,0,7,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec32_i8_widen_to_i16_factor2_broadcast_to_v2i16_factor2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,5,0,7,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec32_i8_widen_to_i16_factor2_broadcast_to_v2i16_factor2: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,5,0,7,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec32_i8_widen_to_i16_factor2_broadcast_to_v2i16_factor2: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,5,0,7,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 0, i32 7> |
| %out.bytevec.padded = shufflevector <4 x i8> %broadcast.of.zextinreg, <4 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec64_i8_widen_to_i16_factor2_broadcast_to_v4i16_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec64_i8_widen_to_i16_factor2_broadcast_to_v4i16_factor4: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,0,0,4,5,6,7] |
| ; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] |
| ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,7] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] |
| ; SSE2-NEXT: packuswb %xmm2, %xmm2 |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec64_i8_widen_to_i16_factor2_broadcast_to_v4i16_factor4: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,9,0,11,0,13,0,15,u,u,u,u,u,u,u,u] |
| ; SSE42-NEXT: paddb (%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec64_i8_widen_to_i16_factor2_broadcast_to_v4i16_factor4: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,9,0,11,0,13,0,15,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec64_i8_widen_to_i16_factor2_broadcast_to_v4i16_factor4: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,9,0,11,0,13,0,15,u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec64_i8_widen_to_i16_factor2_broadcast_to_v4i16_factor4: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,9,0,11,0,13,0,15,u,u,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec64_i8_widen_to_i16_factor2_broadcast_to_v4i16_factor4: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,9,0,11,0,13,0,15,u,u,u,u,u,u,u,u] |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec64_i8_widen_to_i16_factor2_broadcast_to_v4i16_factor4: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,9,0,11,0,13,0,15,u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 0, i32 11, i32 0, i32 13, i32 0, i32 15> |
| %out.bytevec.padded = shufflevector <8 x i8> %broadcast.of.zextinreg, <8 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec64_i8_widen_to_i32_factor4_broadcast_to_v2i32_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec64_i8_widen_to_i32_factor4_broadcast_to_v2i32_factor2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [0,65535,65535,65535,0,65535,65535,65535] |
| ; SSE2-NEXT: pand %xmm3, %xmm2 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE2-NEXT: pandn %xmm0, %xmm3 |
| ; SSE2-NEXT: por %xmm2, %xmm3 |
| ; SSE2-NEXT: packuswb %xmm3, %xmm3 |
| ; SSE2-NEXT: paddb (%rdx), %xmm3 |
| ; SSE2-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec64_i8_widen_to_i32_factor4_broadcast_to_v2i32_factor2: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,9,10,11,0,13,14,15,u,u,u,u,u,u,u,u] |
| ; SSE42-NEXT: paddb (%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec64_i8_widen_to_i32_factor4_broadcast_to_v2i32_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,9,10,11,0,13,14,15,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec64_i8_widen_to_i32_factor4_broadcast_to_v2i32_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,9,10,11,0,13,14,15,u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec64_i8_widen_to_i32_factor4_broadcast_to_v2i32_factor2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,9,10,11,0,13,14,15,u,u,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec64_i8_widen_to_i32_factor4_broadcast_to_v2i32_factor2: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,9,10,11,0,13,14,15,u,u,u,u,u,u,u,u] |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec64_i8_widen_to_i32_factor4_broadcast_to_v2i32_factor2: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,9,10,11,0,13,14,15,u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 0, i32 13, i32 14, i32 15> |
| %out.bytevec.padded = shufflevector <8 x i8> %broadcast.of.zextinreg, <8 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec64_i16_widen_to_i32_factor2_broadcast_to_v2i32_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec64_i16_widen_to_i32_factor2_broadcast_to_v2i32_factor2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,7] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,0,2,4,5,6,7] |
| ; SSE2-NEXT: paddb (%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec64_i16_widen_to_i32_factor2_broadcast_to_v2i32_factor2: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,10,11,0,1,14,15,14,15,10,11,12,13,14,15] |
| ; SSE42-NEXT: paddb (%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec64_i16_widen_to_i32_factor2_broadcast_to_v2i32_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,10,11,0,1,14,15,14,15,10,11,12,13,14,15] |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec64_i16_widen_to_i32_factor2_broadcast_to_v2i32_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,10,11,0,1,14,15,14,15,10,11,12,13,14,15] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec64_i16_widen_to_i32_factor2_broadcast_to_v2i32_factor2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,10,11,0,1,14,15,u,u,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec64_i16_widen_to_i32_factor2_broadcast_to_v2i32_factor2: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,10,11,0,1,14,15,u,u,u,u,u,u,u,u] |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec64_i16_widen_to_i32_factor2_broadcast_to_v2i32_factor2: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,10,11,0,1,14,15,u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16> |
| %broadcast.of.zextinreg = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 0, i32 7> |
| %out.bytevec = bitcast <4 x i16> %broadcast.of.zextinreg to <8 x i8> |
| %out.bytevec.padded = shufflevector <8 x i8> %out.bytevec, <8 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec128_i8_widen_to_i16_factor2_broadcast_to_v8i16_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec128_i8_widen_to_i16_factor2_broadcast_to_v8i16_factor8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 16(%rsi), %xmm1 |
| ; SSE2-NEXT: psrlw $8, %xmm1 |
| ; SSE2-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] |
| ; SSE2-NEXT: paddb (%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec128_i8_widen_to_i16_factor2_broadcast_to_v8i16_factor8: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: paddb 16(%rsi), %xmm1 |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u] |
| ; SSE42-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE42-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] |
| ; SSE42-NEXT: paddb (%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec128_i8_widen_to_i16_factor2_broadcast_to_v8i16_factor8: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 16(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec128_i8_widen_to_i16_factor2_broadcast_to_v8i16_factor8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0 |
| ; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec128_i8_widen_to_i16_factor2_broadcast_to_v8i16_factor8: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpbroadcastb %xmm0, %xmm0 |
| ; AVX512F-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec128_i8_widen_to_i16_factor2_broadcast_to_v8i16_factor8: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u] |
| ; AVX512DQ-NEXT: vpbroadcastb %xmm0, %xmm0 |
| ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec128_i8_widen_to_i16_factor2_broadcast_to_v8i16_factor8: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpbroadcastb %xmm0, %xmm0 |
| ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 0, i32 19, i32 0, i32 21, i32 0, i32 23, i32 0, i32 25, i32 0, i32 27, i32 0, i32 29, i32 0, i32 31> |
| %out.bytevec.padded = shufflevector <16 x i8> %broadcast.of.zextinreg, <16 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec128_i8_widen_to_i32_factor4_broadcast_to_v4i32_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec128_i8_widen_to_i32_factor4_broadcast_to_v4i32_factor4: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 16(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255] |
| ; SSE2-NEXT: pand %xmm2, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] |
| ; SSE2-NEXT: pandn %xmm0, %xmm2 |
| ; SSE2-NEXT: por %xmm1, %xmm2 |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec128_i8_widen_to_i32_factor4_broadcast_to_v4i32_factor4: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb 16(%rsi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: palignr {{.*#+}} xmm0 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[15,0,1,2,15,4,5,6,15,8,9,10,15,12,13,14] |
| ; SSE42-NEXT: paddb (%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec128_i8_widen_to_i32_factor4_broadcast_to_v4i32_factor4: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 16(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,0,1,2,15,4,5,6,15,8,9,10,15,12,13,14] |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec128_i8_widen_to_i32_factor4_broadcast_to_v4i32_factor4: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,0,1,2,15,4,5,6,15,8,9,10,15,12,13,14] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec128_i8_widen_to_i32_factor4_broadcast_to_v4i32_factor4: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512F-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,0,1,2,15,4,5,6,15,8,9,10,15,12,13,14] |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec128_i8_widen_to_i32_factor4_broadcast_to_v4i32_factor4: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512DQ-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,0,1,2,15,4,5,6,15,8,9,10,15,12,13,14] |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec128_i8_widen_to_i32_factor4_broadcast_to_v4i32_factor4: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512BW-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,0,1,2,15,4,5,6,15,8,9,10,15,12,13,14] |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 0, i32 21, i32 22, i32 23, i32 0, i32 25, i32 26, i32 27, i32 0, i32 29, i32 30, i32 31> |
| %out.bytevec.padded = shufflevector <16 x i8> %broadcast.of.zextinreg, <16 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec128_i8_widen_to_i64_factor8_broadcast_to_v2i64_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec128_i8_widen_to_i64_factor8_broadcast_to_v2i64_factor2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 16(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255] |
| ; SSE2-NEXT: pand %xmm2, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE2-NEXT: pandn %xmm0, %xmm2 |
| ; SSE2-NEXT: por %xmm1, %xmm2 |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec128_i8_widen_to_i64_factor8_broadcast_to_v2i64_factor2: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb 16(%rsi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: palignr {{.*#+}} xmm0 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[15,0,1,2,3,4,5,6,15,8,9,10,11,12,13,14] |
| ; SSE42-NEXT: paddb (%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec128_i8_widen_to_i64_factor8_broadcast_to_v2i64_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 16(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,0,1,2,3,4,5,6,15,8,9,10,11,12,13,14] |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec128_i8_widen_to_i64_factor8_broadcast_to_v2i64_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,0,1,2,3,4,5,6,15,8,9,10,11,12,13,14] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec128_i8_widen_to_i64_factor8_broadcast_to_v2i64_factor2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512F-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,0,1,2,3,4,5,6,15,8,9,10,11,12,13,14] |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec128_i8_widen_to_i64_factor8_broadcast_to_v2i64_factor2: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512DQ-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,0,1,2,3,4,5,6,15,8,9,10,11,12,13,14] |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec128_i8_widen_to_i64_factor8_broadcast_to_v2i64_factor2: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512BW-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,0,1,2,3,4,5,6,15,8,9,10,11,12,13,14] |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 0, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31> |
| %out.bytevec.padded = shufflevector <16 x i8> %broadcast.of.zextinreg, <16 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb 16(%rsi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7] |
| ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,7] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7] |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; SSE2-NEXT: paddb (%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: paddb 16(%rsi), %xmm1 |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u] |
| ; SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE42-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; SSE42-NEXT: paddb (%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 16(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0 |
| ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512F-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7] |
| ; AVX512F-NEXT: vmovd %xmm0, %eax |
| ; AVX512F-NEXT: vpinsrw $2, %eax, %xmm2, %xmm0 |
| ; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5,6,7] |
| ; AVX512F-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 |
| ; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5],xmm0[6,7] |
| ; AVX512F-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 |
| ; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7] |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7] |
| ; AVX512DQ-NEXT: vmovd %xmm0, %eax |
| ; AVX512DQ-NEXT: vpinsrw $2, %eax, %xmm2, %xmm0 |
| ; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5,6,7] |
| ; AVX512DQ-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 |
| ; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5],xmm0[6,7] |
| ; AVX512DQ-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 |
| ; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7] |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-SLOW-LABEL: vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4: |
| ; AVX512BW-SLOW: # %bb.0: |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,9,0,11,0,13,0,15] |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpermw %zmm0, %zmm1, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-SLOW-NEXT: vzeroupper |
| ; AVX512BW-SLOW-NEXT: retq |
| ; |
| ; AVX512BW-FAST-LABEL: vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4: |
| ; AVX512BW-FAST: # %bb.0: |
| ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} xmm1 = [0,9,0,11,0,13,6,7] |
| ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpermw %zmm0, %zmm1, %zmm1 |
| ; AVX512BW-FAST-NEXT: vmovd %xmm0, %eax |
| ; AVX512BW-FAST-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 |
| ; AVX512BW-FAST-NEXT: vextracti128 $1, %ymm0, %xmm0 |
| ; AVX512BW-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6],xmm0[7] |
| ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-FAST-NEXT: vzeroupper |
| ; AVX512BW-FAST-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16> |
| %broadcast.of.zextinreg = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 0, i32 11, i32 0, i32 13, i32 0, i32 15> |
| %out.bytevec = bitcast <8 x i16> %broadcast.of.zextinreg to <16 x i8> |
| %out.bytevec.padded = shufflevector <16 x i8> %out.bytevec, <16 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec128_i16_widen_to_i64_factor4_broadcast_to_v2i64_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec128_i16_widen_to_i64_factor4_broadcast_to_v2i64_factor2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 16(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,0,65535,65535,65535] |
| ; SSE2-NEXT: pand %xmm2, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE2-NEXT: pandn %xmm0, %xmm2 |
| ; SSE2-NEXT: por %xmm1, %xmm2 |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec128_i16_widen_to_i64_factor4_broadcast_to_v2i64_factor2: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb 16(%rsi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7] |
| ; SSE42-NEXT: paddb (%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec128_i16_widen_to_i64_factor4_broadcast_to_v2i64_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 16(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7] |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec128_i16_widen_to_i64_factor4_broadcast_to_v2i64_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0 |
| ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec128_i16_widen_to_i64_factor4_broadcast_to_v2i64_factor2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512F-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1,2,3],xmm0[4,5,6,7] |
| ; AVX512F-NEXT: vmovd %xmm0, %eax |
| ; AVX512F-NEXT: vpinsrw $4, %eax, %xmm2, %xmm0 |
| ; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7] |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec128_i16_widen_to_i64_factor4_broadcast_to_v2i64_factor2: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1,2,3],xmm0[4,5,6,7] |
| ; AVX512DQ-NEXT: vmovd %xmm0, %eax |
| ; AVX512DQ-NEXT: vpinsrw $4, %eax, %xmm2, %xmm0 |
| ; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7] |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-SLOW-LABEL: vec128_i16_widen_to_i64_factor4_broadcast_to_v2i64_factor2: |
| ; AVX512BW-SLOW: # %bb.0: |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,9,10,11,0,13,6,7] |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpermw %zmm0, %zmm1, %zmm1 |
| ; AVX512BW-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm0 |
| ; AVX512BW-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3] |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-SLOW-NEXT: vzeroupper |
| ; AVX512BW-SLOW-NEXT: retq |
| ; |
| ; AVX512BW-FAST-LABEL: vec128_i16_widen_to_i64_factor4_broadcast_to_v2i64_factor2: |
| ; AVX512BW-FAST: # %bb.0: |
| ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} xmm1 = [0,9,10,11,0,5,6,7] |
| ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpermw %zmm0, %zmm1, %zmm1 |
| ; AVX512BW-FAST-NEXT: vextracti128 $1, %ymm0, %xmm0 |
| ; AVX512BW-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5,6,7] |
| ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-FAST-NEXT: vzeroupper |
| ; AVX512BW-FAST-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16> |
| %broadcast.of.zextinreg = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 0, i32 13, i32 14, i32 15> |
| %out.bytevec = bitcast <8 x i16> %broadcast.of.zextinreg to <16 x i8> |
| %out.bytevec.padded = shufflevector <16 x i8> %out.bytevec, <16 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 16(%rsi), %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE2-NEXT: paddb (%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb 16(%rsi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] |
| ; SSE42-NEXT: paddb (%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 16(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-SLOW-LABEL: vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2: |
| ; AVX2-SLOW: # %bb.0: |
| ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-SLOW-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-SLOW-NEXT: vpbroadcastd %xmm0, %xmm0 |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] |
| ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-SLOW-NEXT: vzeroupper |
| ; AVX2-SLOW-NEXT: retq |
| ; |
| ; AVX2-FAST-PERLANE-LABEL: vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2: |
| ; AVX2-FAST-PERLANE: # %bb.0: |
| ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd %xmm0, %xmm0 |
| ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] |
| ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-FAST-PERLANE-NEXT: vzeroupper |
| ; AVX2-FAST-PERLANE-NEXT: retq |
| ; |
| ; AVX2-FAST-LABEL: vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2: |
| ; AVX2-FAST: # %bb.0: |
| ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm1 = [0,5,0,7] |
| ; AVX2-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0 |
| ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-FAST-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-FAST-NEXT: vzeroupper |
| ; AVX2-FAST-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm0 = [0,5,0,7] |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpermd %zmm1, %zmm0, %zmm0 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm0 = [0,5,0,7] |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpermd %zmm1, %zmm0, %zmm0 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,5,0,7] |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpermd %zmm0, %zmm1, %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <16 x i32> |
| %broadcast.of.zextinreg = shufflevector <16 x i32> %in.vec.cast, <16 x i32> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 0, i32 7> |
| %out.bytevec = bitcast <4 x i32> %broadcast.of.zextinreg to <16 x i8> |
| %out.bytevec.padded = shufflevector <16 x i8> %out.bytevec, <16 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec256_i8_widen_to_i16_factor2_broadcast_to_v16i16_factor16(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec256_i8_widen_to_i16_factor2_broadcast_to_v16i16_factor16: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE2-NEXT: psrlw $8, %xmm1 |
| ; SSE2-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] |
| ; SSE2-NEXT: psrlw $8, %xmm2 |
| ; SSE2-NEXT: packuswb %xmm2, %xmm2 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE2-NEXT: paddb (%rdx), %xmm3 |
| ; SSE2-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec256_i8_widen_to_i16_factor2_broadcast_to_v16i16_factor16: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE42-NEXT: movdqa {{.*#+}} xmm3 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u> |
| ; SSE42-NEXT: pshufb %xmm3, %xmm1 |
| ; SSE42-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE42-NEXT: movdqa %xmm0, %xmm4 |
| ; SSE42-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7] |
| ; SSE42-NEXT: pshufb %xmm3, %xmm2 |
| ; SSE42-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb (%rdx), %xmm4 |
| ; SSE42-NEXT: movdqa %xmm4, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec256_i8_widen_to_i16_factor2_broadcast_to_v16i16_factor16: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 32(%rdi), %xmm1 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm2 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm2, %xmm2 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 32(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vmovddup {{.*#+}} xmm3 = [1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15] |
| ; AVX-NEXT: # xmm3 = mem[0,0] |
| ; AVX-NEXT: vpshufb %xmm3, %xmm1, %xmm1 |
| ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] |
| ; AVX-NEXT: vpshufb %xmm3, %xmm2, %xmm2 |
| ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec256_i8_widen_to_i16_factor2_broadcast_to_v16i16_factor16: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpbroadcastb %xmm1, %ymm1 |
| ; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec256_i8_widen_to_i16_factor2_broadcast_to_v16i16_factor16: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX512F-NEXT: vpbroadcastb %xmm1, %ymm1 |
| ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23] |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec256_i8_widen_to_i16_factor2_broadcast_to_v16i16_factor16: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u] |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vpbroadcastb %xmm1, %ymm1 |
| ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23] |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec256_i8_widen_to_i16_factor2_broadcast_to_v16i16_factor16: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpbroadcastb %xmm0, %ymm0 |
| ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 0, i32 35, i32 0, i32 37, i32 0, i32 39, i32 0, i32 41, i32 0, i32 43, i32 0, i32 45, i32 0, i32 47, i32 0, i32 49, i32 0, i32 51, i32 0, i32 53, i32 0, i32 55, i32 0, i32 57, i32 0, i32 59, i32 0, i32 61, i32 0, i32 63> |
| %out.bytevec.padded = shufflevector <32 x i8> %broadcast.of.zextinreg, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec256_i8_widen_to_i32_factor4_broadcast_to_v8i32_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec256_i8_widen_to_i32_factor4_broadcast_to_v8i32_factor8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255] |
| ; SSE2-NEXT: pand %xmm3, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] |
| ; SSE2-NEXT: pand %xmm3, %xmm2 |
| ; SSE2-NEXT: pandn %xmm0, %xmm3 |
| ; SSE2-NEXT: por %xmm3, %xmm1 |
| ; SSE2-NEXT: por %xmm2, %xmm3 |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm3 |
| ; SSE2-NEXT: paddb (%rdx), %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm3, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec256_i8_widen_to_i32_factor4_broadcast_to_v8i32_factor8: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE42-NEXT: palignr {{.*#+}} xmm3 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm3[0] |
| ; SSE42-NEXT: movdqa {{.*#+}} xmm1 = [15,0,1,2,15,4,5,6,15,8,9,10,15,12,13,14] |
| ; SSE42-NEXT: pshufb %xmm1, %xmm3 |
| ; SSE42-NEXT: palignr {{.*#+}} xmm0 = xmm2[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; SSE42-NEXT: pshufb %xmm1, %xmm0 |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb (%rdx), %xmm3 |
| ; SSE42-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec256_i8_widen_to_i32_factor4_broadcast_to_v8i32_factor8: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 32(%rdi), %xmm1 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm2 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm2, %xmm2 |
| ; AVX-NEXT: vpaddb 32(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [15,0,1,2,15,4,5,6,15,8,9,10,15,12,13,14] |
| ; AVX-NEXT: vpshufb %xmm3, %xmm1, %xmm1 |
| ; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm2[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX-NEXT: vpshufb %xmm3, %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec256_i8_widen_to_i32_factor4_broadcast_to_v8i32_factor8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpbroadcastd %xmm1, %ymm1 |
| ; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255] |
| ; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec256_i8_widen_to_i32_factor4_broadcast_to_v8i32_factor8: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX512F-NEXT: vpbroadcastd %xmm1, %ymm1 |
| ; AVX512F-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec256_i8_widen_to_i32_factor4_broadcast_to_v8i32_factor8: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vpbroadcastd %xmm1, %ymm1 |
| ; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm1, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec256_i8_widen_to_i32_factor4_broadcast_to_v8i32_factor8: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512BW-NEXT: vpbroadcastd %xmm0, %ymm0 |
| ; AVX512BW-NEXT: movl $286331153, %eax # imm = 0x11111111 |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vmovdqu8 %ymm0, %ymm1 {%k1} |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm1, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 34, i32 35, i32 0, i32 37, i32 38, i32 39, i32 0, i32 41, i32 42, i32 43, i32 0, i32 45, i32 46, i32 47, i32 0, i32 49, i32 50, i32 51, i32 0, i32 53, i32 54, i32 55, i32 0, i32 57, i32 58, i32 59, i32 0, i32 61, i32 62, i32 63> |
| %out.bytevec.padded = shufflevector <32 x i8> %broadcast.of.zextinreg, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec256_i8_widen_to_i64_factor8_broadcast_to_v4i64_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec256_i8_widen_to_i64_factor8_broadcast_to_v4i64_factor4: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255] |
| ; SSE2-NEXT: pand %xmm3, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE2-NEXT: pand %xmm3, %xmm2 |
| ; SSE2-NEXT: pandn %xmm0, %xmm3 |
| ; SSE2-NEXT: por %xmm3, %xmm1 |
| ; SSE2-NEXT: por %xmm2, %xmm3 |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm3 |
| ; SSE2-NEXT: paddb (%rdx), %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm3, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec256_i8_widen_to_i64_factor8_broadcast_to_v4i64_factor4: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE42-NEXT: palignr {{.*#+}} xmm3 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm3[0] |
| ; SSE42-NEXT: movdqa {{.*#+}} xmm1 = [15,0,1,2,3,4,5,6,15,8,9,10,11,12,13,14] |
| ; SSE42-NEXT: pshufb %xmm1, %xmm3 |
| ; SSE42-NEXT: palignr {{.*#+}} xmm0 = xmm2[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; SSE42-NEXT: pshufb %xmm1, %xmm0 |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb (%rdx), %xmm3 |
| ; SSE42-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec256_i8_widen_to_i64_factor8_broadcast_to_v4i64_factor4: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 32(%rdi), %xmm1 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm2 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm2, %xmm2 |
| ; AVX-NEXT: vpaddb 32(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [15,0,1,2,3,4,5,6,15,8,9,10,11,12,13,14] |
| ; AVX-NEXT: vpshufb %xmm3, %xmm1, %xmm1 |
| ; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm2[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX-NEXT: vpshufb %xmm3, %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec256_i8_widen_to_i64_factor8_broadcast_to_v4i64_factor4: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpbroadcastq %xmm1, %ymm1 |
| ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255] |
| ; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec256_i8_widen_to_i64_factor8_broadcast_to_v4i64_factor4: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX512F-NEXT: vpbroadcastq %xmm1, %ymm1 |
| ; AVX512F-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec256_i8_widen_to_i64_factor8_broadcast_to_v4i64_factor4: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vpbroadcastq %xmm1, %ymm1 |
| ; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm1, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec256_i8_widen_to_i64_factor8_broadcast_to_v4i64_factor4: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512BW-NEXT: vpbroadcastq %xmm0, %ymm0 |
| ; AVX512BW-NEXT: movl $16843009, %eax # imm = 0x1010101 |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vmovdqu8 %ymm0, %ymm1 {%k1} |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm1, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 0, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 0, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 0, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63> |
| %out.bytevec.padded = shufflevector <32 x i8> %broadcast.of.zextinreg, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE2-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; SSE2-NEXT: pand %xmm3, %xmm1 |
| ; SSE2-NEXT: pand %xmm3, %xmm2 |
| ; SSE2-NEXT: pandn %xmm0, %xmm3 |
| ; SSE2-NEXT: por %xmm3, %xmm1 |
| ; SSE2-NEXT: por %xmm3, %xmm2 |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm2 |
| ; SSE2-NEXT: paddb (%rdx), %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 32(%rdi), %xmm2 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm3 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm3 |
| ; SSE42-NEXT: paddb 32(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb (%rsi), %xmm1 |
| ; SSE42-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; SSE42-NEXT: movdqa %xmm1, %xmm4 |
| ; SSE42-NEXT: pblendvb %xmm0, %xmm2, %xmm4 |
| ; SSE42-NEXT: pblendvb %xmm0, %xmm3, %xmm1 |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm1 |
| ; SSE42-NEXT: paddb (%rdx), %xmm4 |
| ; SSE42-NEXT: movdqa %xmm4, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 32(%rdi), %xmm1 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm2 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm2, %xmm2 |
| ; AVX-NEXT: vpaddb 32(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; AVX-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm1 |
| ; AVX-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; AVX2-NEXT: # ymm2 = mem[0,1,0,1] |
| ; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512F-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; AVX512F-NEXT: # ymm2 = mem[0,1,0,1] |
| ; AVX512F-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm2 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; AVX512DQ-NEXT: # ymm2 = mem[0,1,0,1] |
| ; AVX512DQ-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm2 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm2, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512BW-NEXT: movl $65537, %eax # imm = 0x10001 |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vmovdqu8 %ymm0, %ymm1 {%k1} |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm1, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 0, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63> |
| %out.bytevec.padded = shufflevector <32 x i8> %broadcast.of.zextinreg, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec256_i16_widen_to_i32_factor2_broadcast_to_v8i32_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec256_i16_widen_to_i32_factor2_broadcast_to_v8i32_factor8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE2-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7] |
| ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,7] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[3,1,2,3,4,5,6,7] |
| ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,7] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7] |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE2-NEXT: paddb (%rdx), %xmm3 |
| ; SSE2-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec256_i16_widen_to_i32_factor2_broadcast_to_v8i32_factor8: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15] |
| ; SSE42-NEXT: pshufb %xmm3, %xmm1 |
| ; SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE42-NEXT: movdqa %xmm0, %xmm4 |
| ; SSE42-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3] |
| ; SSE42-NEXT: pshufb %xmm3, %xmm2 |
| ; SSE42-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb (%rdx), %xmm4 |
| ; SSE42-NEXT: movdqa %xmm4, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec256_i16_widen_to_i32_factor2_broadcast_to_v8i32_factor8: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 32(%rdi), %xmm1 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm2 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm2, %xmm2 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 32(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15] |
| ; AVX-NEXT: vpshufb %xmm3, %xmm1, %xmm1 |
| ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; AVX-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; AVX-NEXT: vpshufb %xmm3, %xmm2, %xmm2 |
| ; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec256_i16_widen_to_i32_factor2_broadcast_to_v8i32_factor8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpbroadcastw %xmm1, %ymm1 |
| ; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7],ymm1[8],ymm0[9],ymm1[10],ymm0[11],ymm1[12],ymm0[13],ymm1[14],ymm0[15] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec256_i16_widen_to_i32_factor2_broadcast_to_v8i32_factor8: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX512F-NEXT: vpbroadcastw %xmm1, %ymm1 |
| ; AVX512F-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7],ymm1[8],ymm0[9],ymm1[10],ymm0[11],ymm1[12],ymm0[13],ymm1[14],ymm0[15] |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec256_i16_widen_to_i32_factor2_broadcast_to_v8i32_factor8: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vpbroadcastw %xmm1, %ymm1 |
| ; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7],ymm1[8],ymm0[9],ymm1[10],ymm0[11],ymm1[12],ymm0[13],ymm1[14],ymm0[15] |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec256_i16_widen_to_i32_factor2_broadcast_to_v8i32_factor8: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,17,0,19,0,21,0,23,0,25,0,27,0,29,0,31] |
| ; AVX512BW-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm2, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16> |
| %broadcast.of.zextinreg = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 0, i32 19, i32 0, i32 21, i32 0, i32 23, i32 0, i32 25, i32 0, i32 27, i32 0, i32 29, i32 0, i32 31> |
| %out.bytevec = bitcast <16 x i16> %broadcast.of.zextinreg to <32 x i8> |
| %out.bytevec.padded = shufflevector <32 x i8> %out.bytevec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec256_i16_widen_to_i64_factor4_broadcast_to_v4i64_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec256_i16_widen_to_i64_factor4_broadcast_to_v4i64_factor4: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [0,65535,65535,65535,0,65535,65535,65535] |
| ; SSE2-NEXT: pand %xmm3, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE2-NEXT: pand %xmm3, %xmm2 |
| ; SSE2-NEXT: pandn %xmm0, %xmm3 |
| ; SSE2-NEXT: por %xmm3, %xmm1 |
| ; SSE2-NEXT: por %xmm2, %xmm3 |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm3 |
| ; SSE2-NEXT: paddb (%rdx), %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm3, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec256_i16_widen_to_i64_factor4_broadcast_to_v4i64_factor4: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7] |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm2 |
| ; SSE42-NEXT: paddb (%rdx), %xmm1 |
| ; SSE42-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm2, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec256_i16_widen_to_i64_factor4_broadcast_to_v4i64_factor4: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 32(%rdi), %xmm1 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm2 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm2, %xmm2 |
| ; AVX-NEXT: vpaddb 32(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7] |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7] |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec256_i16_widen_to_i64_factor4_broadcast_to_v4i64_factor4: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpbroadcastq %xmm1, %ymm1 |
| ; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7],ymm1[8],ymm0[9,10,11],ymm1[12],ymm0[13,14,15] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec256_i16_widen_to_i64_factor4_broadcast_to_v4i64_factor4: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX512F-NEXT: vpbroadcastq %xmm1, %ymm1 |
| ; AVX512F-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7],ymm1[8],ymm0[9,10,11],ymm1[12],ymm0[13,14,15] |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec256_i16_widen_to_i64_factor4_broadcast_to_v4i64_factor4: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vpbroadcastq %xmm1, %ymm1 |
| ; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7],ymm1[8],ymm0[9,10,11],ymm1[12],ymm0[13,14,15] |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec256_i16_widen_to_i64_factor4_broadcast_to_v4i64_factor4: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,1,2,3,16,5,6,7,16,9,10,11,16,13,14,15] |
| ; AVX512BW-NEXT: vpermi2w %ymm0, %ymm1, %ymm2 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm2, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16> |
| %broadcast.of.zextinreg = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 0, i32 21, i32 22, i32 23, i32 0, i32 25, i32 26, i32 27, i32 0, i32 29, i32 30, i32 31> |
| %out.bytevec = bitcast <16 x i16> %broadcast.of.zextinreg to <32 x i8> |
| %out.bytevec.padded = shufflevector <32 x i8> %out.bytevec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec256_i16_widen_to_i128_factor8_broadcast_to_v2i128_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec256_i16_widen_to_i128_factor8_broadcast_to_v2i128_factor2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE2-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [0,65535,65535,65535,65535,65535,65535,65535] |
| ; SSE2-NEXT: pand %xmm3, %xmm1 |
| ; SSE2-NEXT: pand %xmm3, %xmm2 |
| ; SSE2-NEXT: pandn %xmm0, %xmm3 |
| ; SSE2-NEXT: por %xmm3, %xmm1 |
| ; SSE2-NEXT: por %xmm3, %xmm2 |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm2 |
| ; SSE2-NEXT: paddb (%rdx), %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec256_i16_widen_to_i128_factor8_broadcast_to_v2i128_factor2: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3,4,5,6,7] |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb (%rdx), %xmm1 |
| ; SSE42-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec256_i16_widen_to_i128_factor8_broadcast_to_v2i128_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 32(%rdi), %xmm1 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm2 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm2, %xmm2 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 32(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3,4,5,6,7] |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec256_i16_widen_to_i128_factor8_broadcast_to_v2i128_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec256_i16_widen_to_i128_factor8_broadcast_to_v2i128_factor2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512F-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512F-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15] |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec256_i16_widen_to_i128_factor8_broadcast_to_v2i128_factor2: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15] |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec256_i16_widen_to_i128_factor8_broadcast_to_v2i128_factor2: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,1,2,3,4,5,6,7,16,9,10,11,12,13,14,15] |
| ; AVX512BW-NEXT: vpermi2w %ymm0, %ymm1, %ymm2 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm2, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16> |
| %broadcast.of.zextinreg = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 0, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31> |
| %out.bytevec = bitcast <16 x i16> %broadcast.of.zextinreg to <32 x i8> |
| %out.bytevec.padded = shufflevector <32 x i8> %out.bytevec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3] |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE2-NEXT: paddb (%rdx), %xmm3 |
| ; SSE2-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm2 |
| ; SSE42-NEXT: paddb (%rdx), %xmm1 |
| ; SSE42-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm2, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 32(%rdi), %xmm1 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm2 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm2, %xmm2 |
| ; AVX-NEXT: vpaddb 32(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[1,3],ymm0[4,4],ymm1[5,7] |
| ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7] |
| ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 16(%rcx) |
| ; AVX-NEXT: vzeroupper |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpbroadcastq %xmm1, %ymm1 |
| ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-SLOW-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4: |
| ; AVX512F-SLOW: # %bb.0: |
| ; AVX512F-SLOW-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX512F-SLOW-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX512F-SLOW-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX512F-SLOW-NEXT: vpbroadcastq %xmm1, %ymm1 |
| ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7] |
| ; AVX512F-SLOW-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-SLOW-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-SLOW-NEXT: vzeroupper |
| ; AVX512F-SLOW-NEXT: retq |
| ; |
| ; AVX512F-FAST-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4: |
| ; AVX512F-FAST: # %bb.0: |
| ; AVX512F-FAST-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX512F-FAST-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX512F-FAST-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,9,0,11,0,13,0,15] |
| ; AVX512F-FAST-NEXT: vpermi2d %ymm0, %ymm1, %ymm2 |
| ; AVX512F-FAST-NEXT: vpaddb (%rdx), %ymm2, %ymm0 |
| ; AVX512F-FAST-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-FAST-NEXT: vzeroupper |
| ; AVX512F-FAST-NEXT: retq |
| ; |
| ; AVX512DQ-SLOW-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4: |
| ; AVX512DQ-SLOW: # %bb.0: |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX512DQ-SLOW-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-SLOW-NEXT: vpbroadcastq %xmm1, %ymm1 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7] |
| ; AVX512DQ-SLOW-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-SLOW-NEXT: vzeroupper |
| ; AVX512DQ-SLOW-NEXT: retq |
| ; |
| ; AVX512DQ-FAST-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4: |
| ; AVX512DQ-FAST: # %bb.0: |
| ; AVX512DQ-FAST-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX512DQ-FAST-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX512DQ-FAST-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,9,0,11,0,13,0,15] |
| ; AVX512DQ-FAST-NEXT: vpermi2d %ymm0, %ymm1, %ymm2 |
| ; AVX512DQ-FAST-NEXT: vpaddb (%rdx), %ymm2, %ymm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-FAST-NEXT: vzeroupper |
| ; AVX512DQ-FAST-NEXT: retq |
| ; |
| ; AVX512BW-SLOW-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4: |
| ; AVX512BW-SLOW: # %bb.0: |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512BW-SLOW-NEXT: vpbroadcastq %xmm0, %ymm0 |
| ; AVX512BW-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-SLOW-NEXT: vzeroupper |
| ; AVX512BW-SLOW-NEXT: retq |
| ; |
| ; AVX512BW-FAST-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4: |
| ; AVX512BW-FAST: # %bb.0: |
| ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,25,0,27,0,29,0,31] |
| ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpermt2d %zmm0, %zmm1, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-FAST-NEXT: vzeroupper |
| ; AVX512BW-FAST-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <16 x i32> |
| %broadcast.of.zextinreg = shufflevector <16 x i32> %in.vec.cast, <16 x i32> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 0, i32 11, i32 0, i32 13, i32 0, i32 15> |
| %out.bytevec = bitcast <8 x i32> %broadcast.of.zextinreg to <32 x i8> |
| %out.bytevec.padded = shufflevector <32 x i8> %out.bytevec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] |
| ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3] |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm2 |
| ; SSE2-NEXT: paddb (%rdx), %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7] |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb (%rdx), %xmm1 |
| ; SSE42-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 32(%rdi), %xmm1 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm2 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm2, %xmm2 |
| ; AVX-NEXT: vpaddb 32(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] |
| ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 16(%rcx) |
| ; AVX-NEXT: vzeroupper |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-SLOW-LABEL: vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2: |
| ; AVX512F-SLOW: # %bb.0: |
| ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-SLOW-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512F-SLOW-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512F-SLOW-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] |
| ; AVX512F-SLOW-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-SLOW-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-SLOW-NEXT: vzeroupper |
| ; AVX512F-SLOW-NEXT: retq |
| ; |
| ; AVX512F-FAST-LABEL: vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2: |
| ; AVX512F-FAST: # %bb.0: |
| ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-FAST-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512F-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-FAST-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [8,1,2,3,8,5,6,7] |
| ; AVX512F-FAST-NEXT: vpermi2d %ymm0, %ymm1, %ymm2 |
| ; AVX512F-FAST-NEXT: vpaddb (%rdx), %ymm2, %ymm0 |
| ; AVX512F-FAST-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-FAST-NEXT: vzeroupper |
| ; AVX512F-FAST-NEXT: retq |
| ; |
| ; AVX512DQ-SLOW-LABEL: vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2: |
| ; AVX512DQ-SLOW: # %bb.0: |
| ; AVX512DQ-SLOW-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512DQ-SLOW-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512DQ-SLOW-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-SLOW-NEXT: vzeroupper |
| ; AVX512DQ-SLOW-NEXT: retq |
| ; |
| ; AVX512DQ-FAST-LABEL: vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2: |
| ; AVX512DQ-FAST: # %bb.0: |
| ; AVX512DQ-FAST-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512DQ-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-FAST-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [8,1,2,3,8,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vpermi2d %ymm0, %ymm1, %ymm2 |
| ; AVX512DQ-FAST-NEXT: vpaddb (%rdx), %ymm2, %ymm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-FAST-NEXT: vzeroupper |
| ; AVX512DQ-FAST-NEXT: retq |
| ; |
| ; AVX512BW-SLOW-LABEL: vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2: |
| ; AVX512BW-SLOW: # %bb.0: |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512BW-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-SLOW-NEXT: vzeroupper |
| ; AVX512BW-SLOW-NEXT: retq |
| ; |
| ; AVX512BW-FAST-LABEL: vec256_i32_widen_to_i128_factor4_broadcast_to_v2i128_factor2: |
| ; AVX512BW-FAST: # %bb.0: |
| ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,9,10,11,0,13,14,15] |
| ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpermd %zmm0, %zmm1, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-FAST-NEXT: vzeroupper |
| ; AVX512BW-FAST-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <16 x i32> |
| %broadcast.of.zextinreg = shufflevector <16 x i32> %in.vec.cast, <16 x i32> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 0, i32 13, i32 14, i32 15> |
| %out.bytevec = bitcast <8 x i32> %broadcast.of.zextinreg to <32 x i8> |
| %out.bytevec.padded = shufflevector <32 x i8> %out.bytevec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE2-NEXT: shufpd {{.*#+}} xmm0 = xmm0[0],xmm2[1] |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE2-NEXT: paddb (%rdx), %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb 32(%rsi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb (%rdx), %xmm1 |
| ; SSE42-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 32(%rdi), %xmm1 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm2 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm2, %xmm2 |
| ; AVX-NEXT: vpaddb 32(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] |
| ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 16(%rcx) |
| ; AVX-NEXT: vzeroupper |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpbroadcastq %xmm1, %ymm1 |
| ; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-SLOW-LABEL: vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2: |
| ; AVX512F-SLOW: # %bb.0: |
| ; AVX512F-SLOW-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX512F-SLOW-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX512F-SLOW-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX512F-SLOW-NEXT: vpbroadcastq %xmm1, %ymm1 |
| ; AVX512F-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3] |
| ; AVX512F-SLOW-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-SLOW-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-SLOW-NEXT: vzeroupper |
| ; AVX512F-SLOW-NEXT: retq |
| ; |
| ; AVX512F-FAST-LABEL: vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2: |
| ; AVX512F-FAST: # %bb.0: |
| ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-FAST-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512F-FAST-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512F-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,5,0,7] |
| ; AVX512F-FAST-NEXT: vpermi2q %ymm1, %ymm0, %ymm2 |
| ; AVX512F-FAST-NEXT: vpaddb (%rdx), %ymm2, %ymm0 |
| ; AVX512F-FAST-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-FAST-NEXT: vzeroupper |
| ; AVX512F-FAST-NEXT: retq |
| ; |
| ; AVX512DQ-SLOW-LABEL: vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2: |
| ; AVX512DQ-SLOW: # %bb.0: |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX512DQ-SLOW-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-SLOW-NEXT: vpbroadcastq %xmm1, %ymm1 |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3] |
| ; AVX512DQ-SLOW-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-SLOW-NEXT: vzeroupper |
| ; AVX512DQ-SLOW-NEXT: retq |
| ; |
| ; AVX512DQ-FAST-LABEL: vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2: |
| ; AVX512DQ-FAST: # %bb.0: |
| ; AVX512DQ-FAST-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512DQ-FAST-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512DQ-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,5,0,7] |
| ; AVX512DQ-FAST-NEXT: vpermi2q %ymm1, %ymm0, %ymm2 |
| ; AVX512DQ-FAST-NEXT: vpaddb (%rdx), %ymm2, %ymm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-FAST-NEXT: vzeroupper |
| ; AVX512DQ-FAST-NEXT: retq |
| ; |
| ; AVX512BW-SLOW-LABEL: vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2: |
| ; AVX512BW-SLOW: # %bb.0: |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512BW-SLOW-NEXT: vpbroadcastq %xmm0, %ymm0 |
| ; AVX512BW-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-SLOW-NEXT: vzeroupper |
| ; AVX512BW-SLOW-NEXT: retq |
| ; |
| ; AVX512BW-FAST-LABEL: vec256_i64_widen_to_i128_factor2_broadcast_to_v2i128_factor2: |
| ; AVX512BW-FAST: # %bb.0: |
| ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,5,0,7] |
| ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpermq %zmm0, %zmm1, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-FAST-NEXT: vzeroupper |
| ; AVX512BW-FAST-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <8 x i64> |
| %broadcast.of.zextinreg = shufflevector <8 x i64> %in.vec.cast, <8 x i64> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 0, i32 7> |
| %out.bytevec = bitcast <4 x i64> %broadcast.of.zextinreg to <32 x i8> |
| %out.bytevec.padded = shufflevector <32 x i8> %out.bytevec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0] |
| ; SSE2-NEXT: pand %xmm2, %xmm3 |
| ; SSE2-NEXT: pandn %xmm1, %xmm2 |
| ; SSE2-NEXT: por %xmm3, %xmm2 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE2-NEXT: paddb %xmm0, %xmm1 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb (%rsi), %xmm1 |
| ; SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,0,0,4,5,6,7] |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0] |
| ; SSE42-NEXT: movaps {{.*#+}} xmm0 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] |
| ; SSE42-NEXT: pblendvb %xmm0, %xmm3, %xmm2 |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero |
| ; SSE42-NEXT: paddb (%rdx), %xmm2 |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb %xmm1, %xmm0 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm1 |
| ; SSE42-NEXT: movdqa %xmm1, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[0,0,0,0,4,5,6,7] |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] |
| ; AVX-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] |
| ; AVX-NEXT: vpblendvb %xmm3, %xmm2, %xmm1, %xmm1 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm2 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,1,0,1] |
| ; AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[0,u,0,u,0,u,0,u,0,u,0,u,0,u,0,u,16],zero,ymm2[16],zero,ymm2[16],zero,ymm2[16],zero,ymm2[16],zero,ymm2[16],zero,ymm2[16],zero,ymm2[16],zero |
| ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm2[2,3] |
| ; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] |
| ; AVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512F-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512F-NEXT: vpbroadcastb %xmm0, %ymm0 |
| ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] |
| ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512DQ-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u] |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512DQ-NEXT: vpbroadcastb %xmm0, %ymm0 |
| ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] |
| ; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm1 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpbroadcastb %xmm0, %ymm0 |
| ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] |
| ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero |
| ; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <48 x i32> <i32 0, i32 49, i32 0, i32 51, i32 0, i32 53, i32 0, i32 55, i32 0, i32 57, i32 0, i32 59, i32 0, i32 61, i32 0, i32 63, i32 0, i32 65, i32 0, i32 67, i32 0, i32 69, i32 0, i32 71, i32 0, i32 73, i32 0, i32 75, i32 0, i32 77, i32 0, i32 79, i32 0, i32 81, i32 0, i32 83, i32 0, i32 85, i32 0, i32 87, i32 0, i32 89, i32 0, i32 91, i32 0, i32 93, i32 0, i32 95> |
| %out.bytevec.padded = shufflevector <48 x i8> %broadcast.of.zextinreg, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i8_widen_to_i24_factor3_broadcast_to_v16i24_factor16(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i8_widen_to_i24_factor3_broadcast_to_v16i24_factor16: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0] |
| ; SSE2-NEXT: pand %xmm2, %xmm1 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] |
| ; SSE2-NEXT: pandn %xmm0, %xmm2 |
| ; SSE2-NEXT: por %xmm1, %xmm2 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,255,0,0,255,0,0,255,0,0,255,0,0,255,0,0] |
| ; SSE2-NEXT: pand %xmm0, %xmm1 |
| ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i8_widen_to_i24_factor3_broadcast_to_v16i24_factor16: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb (%rsi), %xmm1 |
| ; SSE42-NEXT: pxor %xmm0, %xmm0 |
| ; SSE42-NEXT: movdqa %xmm1, %xmm3 |
| ; SSE42-NEXT: pshufb %xmm0, %xmm3 |
| ; SSE42-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0] |
| ; SSE42-NEXT: pblendvb %xmm0, %xmm2, %xmm3 |
| ; SSE42-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero |
| ; SSE42-NEXT: paddb (%rdx), %xmm3 |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm1 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE42-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i8_widen_to_i24_factor3_broadcast_to_v16i24_factor16: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm2 |
| ; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0] |
| ; AVX-NEXT: vpblendvb %xmm3, %xmm1, %xmm2, %xmm1 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2 |
| ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec384_i8_widen_to_i24_factor3_broadcast_to_v16i24_factor16: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpbroadcastb %xmm1, %xmm2 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero |
| ; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 |
| ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] |
| ; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0] |
| ; AVX2-NEXT: # ymm3 = mem[0,1,0,1] |
| ; AVX2-NEXT: vpblendvb %ymm3, %ymm0, %ymm2, %ymm0 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1 |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec384_i8_widen_to_i24_factor3_broadcast_to_v16i24_factor16: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512F-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512F-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[15,0,1,15,3,4,15,6,7,15,9,10,15,12,13,15] |
| ; AVX512F-NEXT: vpbroadcastb %xmm0, %xmm2 |
| ; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec384_i8_widen_to_i24_factor3_broadcast_to_v16i24_factor16: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512DQ-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512DQ-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[15,0,1,15,3,4,15,6,7,15,9,10,15,12,13,15] |
| ; AVX512DQ-NEXT: vpbroadcastb %xmm0, %xmm2 |
| ; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec384_i8_widen_to_i24_factor3_broadcast_to_v16i24_factor16: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm1 |
| ; AVX512BW-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[15,0,1,15,3,4,15,6,7,15,9,10,15,12,13,15] |
| ; AVX512BW-NEXT: vpbroadcastb %xmm0, %xmm2 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 |
| ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero |
| ; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <48 x i32> <i32 0, i32 49, i32 50, i32 0, i32 52, i32 53, i32 0, i32 55, i32 56, i32 0, i32 58, i32 59, i32 0, i32 61, i32 62, i32 0, i32 64, i32 65, i32 0, i32 67, i32 68, i32 0, i32 70, i32 71, i32 0, i32 73, i32 74, i32 0, i32 76, i32 77, i32 0, i32 79, i32 80, i32 0, i32 82, i32 83, i32 0, i32 85, i32 86, i32 0, i32 88, i32 89, i32 0, i32 91, i32 92, i32 0, i32 94, i32 95> |
| %out.bytevec.padded = shufflevector <48 x i8> %broadcast.of.zextinreg, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i8_widen_to_i32_factor4_broadcast_to_v12i32_factor12(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i8_widen_to_i32_factor4_broadcast_to_v12i32_factor12: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255] |
| ; SSE2-NEXT: pand %xmm2, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] |
| ; SSE2-NEXT: pandn %xmm0, %xmm2 |
| ; SSE2-NEXT: por %xmm1, %xmm2 |
| ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE2-NEXT: paddb %xmm0, %xmm1 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i8_widen_to_i32_factor4_broadcast_to_v12i32_factor12: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb (%rsi), %xmm1 |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,0,0] |
| ; SSE42-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255] |
| ; SSE42-NEXT: pblendvb %xmm0, %xmm2, %xmm3 |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero |
| ; SSE42-NEXT: paddb (%rdx), %xmm3 |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb %xmm1, %xmm0 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm1 |
| ; SSE42-NEXT: movdqa %xmm1, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i8_widen_to_i32_factor4_broadcast_to_v12i32_factor12: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,0,0,0] |
| ; AVX-NEXT: vbroadcastss {{.*#+}} xmm3 = [0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255] |
| ; AVX-NEXT: vpblendvb %xmm3, %xmm1, %xmm2, %xmm1 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm2 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec384_i8_widen_to_i32_factor4_broadcast_to_v12i32_factor12: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,1,0,1] |
| ; AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[0,u,u,u,0,u,u,u,0,u,u,u,0,u,u,u,16],zero,zero,zero,ymm2[16],zero,zero,zero,ymm2[16],zero,zero,zero,ymm2[16],zero,zero,zero |
| ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm2[2,3] |
| ; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm3 = [0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255] |
| ; AVX2-NEXT: vpblendvb %ymm3, %ymm1, %ymm2, %ymm1 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec384_i8_widen_to_i32_factor4_broadcast_to_v12i32_factor12: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512F-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255] |
| ; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512F-NEXT: vpbroadcastd %xmm0, %ymm3 |
| ; AVX512F-NEXT: vpandn %ymm3, %ymm2, %ymm2 |
| ; AVX512F-NEXT: vpternlogq $168, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec384_i8_widen_to_i32_factor4_broadcast_to_v12i32_factor12: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512DQ-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255] |
| ; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512DQ-NEXT: vpbroadcastd %xmm0, %ymm3 |
| ; AVX512DQ-NEXT: vpandn %ymm3, %ymm2, %ymm2 |
| ; AVX512DQ-NEXT: vpternlogq $168, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2 |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec384_i8_widen_to_i32_factor4_broadcast_to_v12i32_factor12: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm1 |
| ; AVX512BW-NEXT: vpbroadcastd %xmm0, %ymm2 |
| ; AVX512BW-NEXT: movl $286331153, %eax # imm = 0x11111111 |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vmovdqu8 %ymm2, %ymm1 {%k1} |
| ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero |
| ; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <48 x i32> <i32 0, i32 49, i32 50, i32 51, i32 0, i32 53, i32 54, i32 55, i32 0, i32 57, i32 58, i32 59, i32 0, i32 61, i32 62, i32 63, i32 0, i32 65, i32 66, i32 67, i32 0, i32 69, i32 70, i32 71, i32 0, i32 73, i32 74, i32 75, i32 0, i32 77, i32 78, i32 79, i32 0, i32 81, i32 82, i32 83, i32 0, i32 85, i32 86, i32 87, i32 0, i32 89, i32 90, i32 91, i32 0, i32 93, i32 94, i32 95> |
| %out.bytevec.padded = shufflevector <48 x i8> %broadcast.of.zextinreg, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i8_widen_to_i48_factor6_broadcast_to_v8i48_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i8_widen_to_i48_factor6_broadcast_to_v8i48_factor8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255] |
| ; SSE2-NEXT: pand %xmm2, %xmm1 |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] |
| ; SSE2-NEXT: pandn %xmm0, %xmm2 |
| ; SSE2-NEXT: por %xmm1, %xmm2 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,0,0,0,255,0,0,0,0,0,255,0,0,0,0,0] |
| ; SSE2-NEXT: pand %xmm0, %xmm1 |
| ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i8_widen_to_i48_factor6_broadcast_to_v8i48_factor8: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb (%rsi), %xmm1 |
| ; SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,0,0,4,5,6,7] |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0] |
| ; SSE42-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255] |
| ; SSE42-NEXT: pblendvb %xmm0, %xmm2, %xmm3 |
| ; SSE42-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,xmm1[0],zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,xmm1[0],zero |
| ; SSE42-NEXT: paddb (%rdx), %xmm3 |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm1 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE42-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i8_widen_to_i48_factor6_broadcast_to_v8i48_factor8: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[0,0,0,0,4,5,6,7] |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] |
| ; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255] |
| ; AVX-NEXT: vpblendvb %xmm3, %xmm1, %xmm2, %xmm1 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm0[0],zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,xmm0[0],zero |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2 |
| ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec384_i8_widen_to_i48_factor6_broadcast_to_v8i48_factor8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpbroadcastb %xmm1, %xmm2 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm1[0],zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,xmm1[0],zero |
| ; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 |
| ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] |
| ; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255] |
| ; AVX2-NEXT: # ymm3 = mem[0,1,0,1] |
| ; AVX2-NEXT: vpblendvb %ymm3, %ymm0, %ymm2, %ymm0 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1 |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec384_i8_widen_to_i48_factor6_broadcast_to_v8i48_factor8: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512F-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512F-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[15,0,1,2,3,4,15,6,7,8,9,10,15,12,13,14] |
| ; AVX512F-NEXT: vpbroadcastb %xmm0, %xmm2 |
| ; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec384_i8_widen_to_i48_factor6_broadcast_to_v8i48_factor8: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512DQ-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512DQ-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[15,0,1,2,3,4,15,6,7,8,9,10,15,12,13,14] |
| ; AVX512DQ-NEXT: vpbroadcastb %xmm0, %xmm2 |
| ; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec384_i8_widen_to_i48_factor6_broadcast_to_v8i48_factor8: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm1 |
| ; AVX512BW-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[15,0,1,2,3,4,15,6,7,8,9,10,15,12,13,14] |
| ; AVX512BW-NEXT: vpbroadcastb %xmm0, %xmm2 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 |
| ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero |
| ; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <48 x i32> <i32 0, i32 49, i32 50, i32 51, i32 52, i32 53, i32 0, i32 55, i32 56, i32 57, i32 58, i32 59, i32 0, i32 61, i32 62, i32 63, i32 64, i32 65, i32 0, i32 67, i32 68, i32 69, i32 70, i32 71, i32 0, i32 73, i32 74, i32 75, i32 76, i32 77, i32 0, i32 79, i32 80, i32 81, i32 82, i32 83, i32 0, i32 85, i32 86, i32 87, i32 88, i32 89, i32 0, i32 91, i32 92, i32 93, i32 94, i32 95> |
| %out.bytevec.padded = shufflevector <48 x i8> %broadcast.of.zextinreg, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i8_widen_to_i64_factor8_broadcast_to_v6i64_factor6(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i8_widen_to_i64_factor8_broadcast_to_v6i64_factor6: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255] |
| ; SSE2-NEXT: pand %xmm2, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE2-NEXT: pandn %xmm0, %xmm2 |
| ; SSE2-NEXT: por %xmm1, %xmm2 |
| ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE2-NEXT: paddb %xmm0, %xmm1 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i8_widen_to_i64_factor8_broadcast_to_v6i64_factor6: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb (%rsi), %xmm1 |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,1,0,1] |
| ; SSE42-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255] |
| ; SSE42-NEXT: pblendvb %xmm0, %xmm2, %xmm3 |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero |
| ; SSE42-NEXT: paddb (%rdx), %xmm3 |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb %xmm1, %xmm0 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm1 |
| ; SSE42-NEXT: movdqa %xmm1, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i8_widen_to_i64_factor8_broadcast_to_v6i64_factor6: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,1,0,1] |
| ; AVX-NEXT: vmovddup {{.*#+}} xmm3 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255] |
| ; AVX-NEXT: # xmm3 = mem[0,0] |
| ; AVX-NEXT: vpblendvb %xmm3, %xmm1, %xmm2, %xmm1 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm2 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec384_i8_widen_to_i64_factor8_broadcast_to_v6i64_factor6: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,1,0,1] |
| ; AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[0,u,u,u,u,u,u,u,0,u,u,u,u,u,u,u,16],zero,zero,zero,zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,zero,zero,zero |
| ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm2[2,3] |
| ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255] |
| ; AVX2-NEXT: vpblendvb %ymm3, %ymm1, %ymm2, %ymm1 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec384_i8_widen_to_i64_factor8_broadcast_to_v6i64_factor6: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512F-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255] |
| ; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512F-NEXT: vpbroadcastq %xmm0, %ymm3 |
| ; AVX512F-NEXT: vpandn %ymm3, %ymm2, %ymm2 |
| ; AVX512F-NEXT: vpternlogq $168, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec384_i8_widen_to_i64_factor8_broadcast_to_v6i64_factor6: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512DQ-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255] |
| ; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512DQ-NEXT: vpbroadcastq %xmm0, %ymm3 |
| ; AVX512DQ-NEXT: vpandn %ymm3, %ymm2, %ymm2 |
| ; AVX512DQ-NEXT: vpternlogq $168, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2 |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec384_i8_widen_to_i64_factor8_broadcast_to_v6i64_factor6: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm1 |
| ; AVX512BW-NEXT: vpbroadcastq %xmm0, %ymm2 |
| ; AVX512BW-NEXT: movl $16843009, %eax # imm = 0x1010101 |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vmovdqu8 %ymm2, %ymm1 {%k1} |
| ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero |
| ; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <48 x i32> <i32 0, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 0, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 0, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 0, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 0, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 0, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95> |
| %out.bytevec.padded = shufflevector <48 x i8> %broadcast.of.zextinreg, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i8_widen_to_i96_factor12_broadcast_to_v4i96_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i8_widen_to_i96_factor12_broadcast_to_v4i96_factor4: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255] |
| ; SSE2-NEXT: pand %xmm2, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0] |
| ; SSE2-NEXT: pandn %xmm3, %xmm2 |
| ; SSE2-NEXT: por %xmm1, %xmm2 |
| ; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0] |
| ; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; SSE2-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9,10,11] |
| ; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i8_widen_to_i96_factor12_broadcast_to_v4i96_factor4: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb (%rsi), %xmm1 |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,0,0] |
| ; SSE42-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255] |
| ; SSE42-NEXT: pblendvb %xmm0, %xmm2, %xmm3 |
| ; SSE42-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero |
| ; SSE42-NEXT: paddb (%rdx), %xmm3 |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm1 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE42-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i8_widen_to_i96_factor12_broadcast_to_v4i96_factor4: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,0,0,0] |
| ; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255] |
| ; AVX-NEXT: vpblendvb %xmm3, %xmm1, %xmm2, %xmm1 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2 |
| ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec384_i8_widen_to_i96_factor12_broadcast_to_v4i96_factor4: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpbroadcastb %xmm1, %xmm2 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero |
| ; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 |
| ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] |
| ; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255] |
| ; AVX2-NEXT: # ymm3 = mem[0,1,0,1] |
| ; AVX2-NEXT: vpblendvb %ymm3, %ymm0, %ymm2, %ymm0 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1 |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec384_i8_widen_to_i96_factor12_broadcast_to_v4i96_factor4: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512F-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512F-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[15,0,1,2,3,4,5,6,7,8,9,10,15,12,13,14] |
| ; AVX512F-NEXT: vpbroadcastb %xmm0, %xmm2 |
| ; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec384_i8_widen_to_i96_factor12_broadcast_to_v4i96_factor4: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512DQ-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512DQ-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[15,0,1,2,3,4,5,6,7,8,9,10,15,12,13,14] |
| ; AVX512DQ-NEXT: vpbroadcastb %xmm0, %xmm2 |
| ; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec384_i8_widen_to_i96_factor12_broadcast_to_v4i96_factor4: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm1 |
| ; AVX512BW-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[15,0,1,2,3,4,5,6,7,8,9,10,15,12,13,14] |
| ; AVX512BW-NEXT: vpbroadcastb %xmm0, %xmm2 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 |
| ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <48 x i32> <i32 0, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 0, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 0, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 0, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95> |
| %out.bytevec.padded = shufflevector <48 x i8> %broadcast.of.zextinreg, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; SSE2-NEXT: pand %xmm2, %xmm1 |
| ; SSE2-NEXT: pandn %xmm0, %xmm2 |
| ; SSE2-NEXT: por %xmm1, %xmm2 |
| ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE2-NEXT: paddb %xmm0, %xmm1 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb (%rsi), %xmm1 |
| ; SSE42-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; SSE42-NEXT: movdqa %xmm1, %xmm3 |
| ; SSE42-NEXT: pblendvb %xmm0, %xmm2, %xmm3 |
| ; SSE42-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 |
| ; SSE42-NEXT: paddb (%rdx), %xmm3 |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb %xmm1, %xmm0 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm1 |
| ; SSE42-NEXT: movdqa %xmm1, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm1 |
| ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm2 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 32(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,1,0,1] |
| ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 |
| ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm2[2,3] |
| ; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; AVX2-NEXT: # ymm3 = mem[0,1,0,1] |
| ; AVX2-NEXT: vpblendvb %ymm3, %ymm1, %ymm2, %ymm1 |
| ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512F-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; AVX512F-NEXT: # ymm2 = mem[0,1,0,1] |
| ; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpermq {{.*#+}} ymm3 = ymm0[0,1,0,1] |
| ; AVX512F-NEXT: vpandn %ymm3, %ymm2, %ymm2 |
| ; AVX512F-NEXT: vpternlogq $168, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2 |
| ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512DQ-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; AVX512DQ-NEXT: # ymm2 = mem[0,1,0,1] |
| ; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm0[0,1,0,1] |
| ; AVX512DQ-NEXT: vpandn %ymm3, %ymm2, %ymm2 |
| ; AVX512DQ-NEXT: vpternlogq $168, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2 |
| ; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: movw $1, %ax |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1} {z} |
| ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm2 |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm2, %xmm2 |
| ; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512BW-NEXT: movl $65537, %eax # imm = 0x10001 |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vmovdqu8 %ymm0, %ymm2 {%k1} |
| ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm0 |
| ; AVX512BW-NEXT: vinserti32x4 $2, %xmm1, %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <48 x i32> <i32 0, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 0, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 0, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95> |
| %out.bytevec.padded = shufflevector <48 x i8> %broadcast.of.zextinreg, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i8_widen_to_i192_factor24_broadcast_to_v2i192_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i8_widen_to_i192_factor24_broadcast_to_v2i192_factor2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; SSE2-NEXT: pand %xmm2, %xmm1 |
| ; SSE2-NEXT: pandn %xmm0, %xmm2 |
| ; SSE2-NEXT: por %xmm1, %xmm2 |
| ; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0] |
| ; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] |
| ; SSE2-NEXT: movaps 32(%rdx), %xmm1 |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE2-NEXT: movaps %xmm1, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i8_widen_to_i192_factor24_broadcast_to_v2i192_factor2: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm2 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm2 |
| ; SSE42-NEXT: paddb (%rsi), %xmm1 |
| ; SSE42-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; SSE42-NEXT: movdqa %xmm1, %xmm3 |
| ; SSE42-NEXT: pblendvb %xmm0, %xmm2, %xmm3 |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero |
| ; SSE42-NEXT: movaps 32(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb (%rdx), %xmm3 |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm1 |
| ; SSE42-NEXT: movaps %xmm0, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE42-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i8_widen_to_i192_factor24_broadcast_to_v2i192_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm1 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero |
| ; AVX-NEXT: vmovaps 32(%rdx), %ymm2 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovaps %ymm2, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: vzeroupper |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec384_i8_widen_to_i192_factor24_broadcast_to_v2i192_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpbroadcastb %xmm1, %ymm2 |
| ; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255] |
| ; AVX2-NEXT: # ymm3 = mem[0,1,0,1] |
| ; AVX2-NEXT: vpblendvb %ymm3, %ymm1, %ymm2, %ymm1 |
| ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; AVX2-NEXT: # ymm2 = mem[0,1,0,1] |
| ; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0 |
| ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec384_i8_widen_to_i192_factor24_broadcast_to_v2i192_factor2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512F-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512F-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 |
| ; AVX512F-NEXT: vpbroadcastb %xmm0, %xmm0 |
| ; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 |
| ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1 |
| ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec384_i8_widen_to_i192_factor24_broadcast_to_v2i192_factor2: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512DQ-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512DQ-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 |
| ; AVX512DQ-NEXT: vpbroadcastb %xmm0, %xmm0 |
| ; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 |
| ; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovaps 32(%rdx), %ymm1 |
| ; AVX512DQ-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec384_i8_widen_to_i192_factor24_broadcast_to_v2i192_factor2: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm1 |
| ; AVX512BW-NEXT: movw $1, %ax |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1} |
| ; AVX512BW-NEXT: vpbroadcastb %xmm0, %xmm0 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 |
| ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <48 x i32> <i32 0, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 0, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95> |
| %out.bytevec.padded = shufflevector <48 x i8> %broadcast.of.zextinreg, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7] |
| ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,7] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE2-NEXT: paddb %xmm0, %xmm1 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u] |
| ; SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE42-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero |
| ; SSE42-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; SSE42-NEXT: paddb (%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE42-NEXT: paddb %xmm2, %xmm1 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm2 |
| ; SSE42-NEXT: movdqa %xmm2, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[0,0,0,0,4,5,6,7] |
| ; AVX-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] |
| ; AVX-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm2, %xmm2 |
| ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 32(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0 |
| ; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15] |
| ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512F-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512F-NEXT: vpbroadcastw %xmm0, %ymm0 |
| ; AVX512F-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15] |
| ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512DQ-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512DQ-NEXT: vpbroadcastw %xmm0, %ymm0 |
| ; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15] |
| ; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-SLOW-LABEL: vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12: |
| ; AVX512BW-SLOW: # %bb.0: |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,25,0,27,0,29,0,31,0,41,0,43,0,45,0,47] |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-SLOW-NEXT: vpermi2w %zmm2, %zmm0, %zmm1 |
| ; AVX512BW-SLOW-NEXT: vpbroadcastw %xmm0, %xmm0 |
| ; AVX512BW-SLOW-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero |
| ; AVX512BW-SLOW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-SLOW-NEXT: vzeroupper |
| ; AVX512BW-SLOW-NEXT: retq |
| ; |
| ; AVX512BW-FAST-LABEL: vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12: |
| ; AVX512BW-FAST: # %bb.0: |
| ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,25,0,27,0,29,0,31,0,41,0,43,0,45,0,47] |
| ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-FAST-NEXT: vpermi2w %zmm2, %zmm0, %zmm1 |
| ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1],zero,zero,xmm0[0,1],zero,zero,xmm0[0,1],zero,zero,xmm0[0,1],zero,zero |
| ; AVX512BW-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-FAST-NEXT: vzeroupper |
| ; AVX512BW-FAST-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16> |
| %broadcast.of.zextinreg = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <24 x i32> <i32 0, i32 25, i32 0, i32 27, i32 0, i32 29, i32 0, i32 31, i32 0, i32 33, i32 0, i32 35, i32 0, i32 37, i32 0, i32 39, i32 0, i32 41, i32 0, i32 43, i32 0, i32 45, i32 0, i32 47> |
| %out.bytevec = bitcast <24 x i16> %broadcast.of.zextinreg to <48 x i8> |
| %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,0,65535,65535,0,65535] |
| ; SSE2-NEXT: pand %xmm2, %xmm1 |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] |
| ; SSE2-NEXT: pandn %xmm0, %xmm2 |
| ; SSE2-NEXT: por %xmm1, %xmm2 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,0,65535,0,0,65535,0,0] |
| ; SSE2-NEXT: pand %xmm0, %xmm1 |
| ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7] |
| ; SSE42-NEXT: pxor %xmm2, %xmm2 |
| ; SSE42-NEXT: pxor %xmm3, %xmm3 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2],xmm3[3,4],xmm0[5],xmm3[6,7] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm0[1],xmm2[2,3],xmm0[4],xmm2[5,6],xmm0[7] |
| ; SSE42-NEXT: paddb (%rdx), %xmm1 |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm2 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm3 |
| ; SSE42-NEXT: movdqa %xmm3, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm2, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7] |
| ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7] |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm2, %xmm2 |
| ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 32(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0 |
| ; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7] |
| ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm1 |
| ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-SLOW-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8: |
| ; AVX512F-SLOW: # %bb.0: |
| ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-SLOW-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512F-SLOW-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512F-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512F-SLOW-NEXT: vpbroadcastw %xmm0, %ymm0 |
| ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7] |
| ; AVX512F-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7],ymm2[8],ymm0[9],ymm2[10,11],ymm0[12],ymm2[13,14],ymm0[15] |
| ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512F-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7] |
| ; AVX512F-SLOW-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512F-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-SLOW-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-SLOW-NEXT: vzeroupper |
| ; AVX512F-SLOW-NEXT: retq |
| ; |
| ; AVX512F-FAST-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8: |
| ; AVX512F-FAST: # %bb.0: |
| ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-FAST-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512F-FAST-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512F-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512F-FAST-NEXT: vpbroadcastw %xmm0, %ymm2 |
| ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7] |
| ; AVX512F-FAST-NEXT: vpxor %xmm3, %xmm3, %xmm3 |
| ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7],ymm3[8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13,14],ymm2[15] |
| ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero |
| ; AVX512F-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512F-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-FAST-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-FAST-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-FAST-NEXT: vzeroupper |
| ; AVX512F-FAST-NEXT: retq |
| ; |
| ; AVX512DQ-SLOW-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8: |
| ; AVX512DQ-SLOW: # %bb.0: |
| ; AVX512DQ-SLOW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512DQ-SLOW-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512DQ-SLOW-NEXT: vpbroadcastw %xmm0, %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7] |
| ; AVX512DQ-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7],ymm2[8],ymm0[9],ymm2[10,11],ymm0[12],ymm2[13,14],ymm0[15] |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7] |
| ; AVX512DQ-SLOW-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512DQ-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-SLOW-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-SLOW-NEXT: vzeroupper |
| ; AVX512DQ-SLOW-NEXT: retq |
| ; |
| ; AVX512DQ-FAST-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8: |
| ; AVX512DQ-FAST: # %bb.0: |
| ; AVX512DQ-FAST-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512DQ-FAST-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512DQ-FAST-NEXT: vpbroadcastw %xmm0, %ymm2 |
| ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7] |
| ; AVX512DQ-FAST-NEXT: vpxor %xmm3, %xmm3, %xmm3 |
| ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7],ymm3[8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13,14],ymm2[15] |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero |
| ; AVX512DQ-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512DQ-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-FAST-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-FAST-NEXT: vzeroupper |
| ; AVX512DQ-FAST-NEXT: retq |
| ; |
| ; AVX512BW-SLOW-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8: |
| ; AVX512BW-SLOW: # %bb.0: |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,25,26,0,28,29,0,31,40,0,42,43,0,45,46,0] |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-SLOW-NEXT: vpermi2w %zmm2, %zmm0, %zmm1 |
| ; AVX512BW-SLOW-NEXT: vpbroadcastw %xmm0, %xmm0 |
| ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7] |
| ; AVX512BW-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-SLOW-NEXT: vzeroupper |
| ; AVX512BW-SLOW-NEXT: retq |
| ; |
| ; AVX512BW-FAST-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8: |
| ; AVX512BW-FAST: # %bb.0: |
| ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,25,26,0,28,29,0,31,40,0,42,43,0,45,46,0] |
| ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-FAST-NEXT: vpermi2w %zmm2, %zmm0, %zmm1 |
| ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero |
| ; AVX512BW-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-FAST-NEXT: vzeroupper |
| ; AVX512BW-FAST-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16> |
| %broadcast.of.zextinreg = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <24 x i32> <i32 0, i32 25, i32 26, i32 0, i32 28, i32 29, i32 0, i32 31, i32 32, i32 0, i32 34, i32 35, i32 0, i32 37, i32 38, i32 0, i32 40, i32 41, i32 0, i32 43, i32 44, i32 0, i32 46, i32 47> |
| %out.bytevec = bitcast <24 x i16> %broadcast.of.zextinreg to <48 x i8> |
| %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,0,65535,65535,65535] |
| ; SSE2-NEXT: pand %xmm2, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE2-NEXT: pandn %xmm0, %xmm2 |
| ; SSE2-NEXT: por %xmm1, %xmm2 |
| ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE2-NEXT: paddb %xmm0, %xmm1 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7] |
| ; SSE42-NEXT: pxor %xmm2, %xmm2 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7] |
| ; SSE42-NEXT: paddb (%rdx), %xmm1 |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb %xmm2, %xmm0 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm2 |
| ; SSE42-NEXT: movdqa %xmm2, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7] |
| ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7] |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm2, %xmm2 |
| ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 32(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-SLOW-LABEL: vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6: |
| ; AVX2-SLOW: # %bb.0: |
| ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-SLOW-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX2-SLOW-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX2-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX2-SLOW-NEXT: vpbroadcastq %xmm0, %ymm2 |
| ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7],ymm2[8],ymm1[9,10,11],ymm2[12],ymm1[13,14,15] |
| ; AVX2-SLOW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX2-SLOW-NEXT: vpbroadcastw %xmm0, %xmm0 |
| ; AVX2-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7] |
| ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-SLOW-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-SLOW-NEXT: vzeroupper |
| ; AVX2-SLOW-NEXT: retq |
| ; |
| ; AVX2-FAST-PERLANE-LABEL: vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6: |
| ; AVX2-FAST-PERLANE: # %bb.0: |
| ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-FAST-PERLANE-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX2-FAST-PERLANE-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq %xmm0, %ymm2 |
| ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7],ymm2[8],ymm1[9,10,11],ymm2[12],ymm1[13,14,15] |
| ; AVX2-FAST-PERLANE-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,zero,zero |
| ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-FAST-PERLANE-NEXT: vzeroupper |
| ; AVX2-FAST-PERLANE-NEXT: retq |
| ; |
| ; AVX2-FAST-LABEL: vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6: |
| ; AVX2-FAST: # %bb.0: |
| ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-FAST-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX2-FAST-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX2-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX2-FAST-NEXT: vpbroadcastq %xmm0, %ymm2 |
| ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7],ymm2[8],ymm1[9,10,11],ymm2[12],ymm1[13,14,15] |
| ; AVX2-FAST-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,zero,zero |
| ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-FAST-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-FAST-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-FAST-NEXT: vzeroupper |
| ; AVX2-FAST-NEXT: retq |
| ; |
| ; AVX512F-SLOW-LABEL: vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6: |
| ; AVX512F-SLOW: # %bb.0: |
| ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-SLOW-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512F-SLOW-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512F-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512F-SLOW-NEXT: vpbroadcastq %xmm0, %ymm2 |
| ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7],ymm2[8],ymm1[9,10,11],ymm2[12],ymm1[13,14,15] |
| ; AVX512F-SLOW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512F-SLOW-NEXT: vpbroadcastw %xmm0, %xmm0 |
| ; AVX512F-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7] |
| ; AVX512F-SLOW-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512F-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-SLOW-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-SLOW-NEXT: vzeroupper |
| ; AVX512F-SLOW-NEXT: retq |
| ; |
| ; AVX512F-FAST-LABEL: vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6: |
| ; AVX512F-FAST: # %bb.0: |
| ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-FAST-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512F-FAST-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512F-FAST-NEXT: vpbroadcastq %xmm0, %ymm2 |
| ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7],ymm2[8],ymm1[9,10,11],ymm2[12],ymm1[13,14,15] |
| ; AVX512F-FAST-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,zero,zero |
| ; AVX512F-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512F-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-FAST-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-FAST-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-FAST-NEXT: vzeroupper |
| ; AVX512F-FAST-NEXT: retq |
| ; |
| ; AVX512DQ-SLOW-LABEL: vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6: |
| ; AVX512DQ-SLOW: # %bb.0: |
| ; AVX512DQ-SLOW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512DQ-SLOW-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512DQ-SLOW-NEXT: vpbroadcastq %xmm0, %ymm2 |
| ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7],ymm2[8],ymm1[9,10,11],ymm2[12],ymm1[13,14,15] |
| ; AVX512DQ-SLOW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512DQ-SLOW-NEXT: vpbroadcastw %xmm0, %xmm0 |
| ; AVX512DQ-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512DQ-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-SLOW-NEXT: vzeroupper |
| ; AVX512DQ-SLOW-NEXT: retq |
| ; |
| ; AVX512DQ-FAST-LABEL: vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6: |
| ; AVX512DQ-FAST: # %bb.0: |
| ; AVX512DQ-FAST-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512DQ-FAST-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-FAST-NEXT: vpbroadcastq %xmm0, %ymm2 |
| ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7],ymm2[8],ymm1[9,10,11],ymm2[12],ymm1[13,14,15] |
| ; AVX512DQ-FAST-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,zero,zero |
| ; AVX512DQ-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512DQ-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-FAST-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-FAST-NEXT: vzeroupper |
| ; AVX512DQ-FAST-NEXT: retq |
| ; |
| ; AVX512BW-SLOW-LABEL: vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6: |
| ; AVX512BW-SLOW: # %bb.0: |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,25,26,27,0,29,30,31,0,41,42,43,0,45,46,47] |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-SLOW-NEXT: vpermi2w %zmm2, %zmm0, %zmm1 |
| ; AVX512BW-SLOW-NEXT: vpbroadcastw %xmm0, %xmm0 |
| ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7] |
| ; AVX512BW-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-SLOW-NEXT: vzeroupper |
| ; AVX512BW-SLOW-NEXT: retq |
| ; |
| ; AVX512BW-FAST-LABEL: vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6: |
| ; AVX512BW-FAST: # %bb.0: |
| ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,25,26,27,0,29,30,31,0,41,42,43,0,45,46,47] |
| ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-FAST-NEXT: vpermi2w %zmm2, %zmm0, %zmm1 |
| ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,zero,zero |
| ; AVX512BW-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-FAST-NEXT: vzeroupper |
| ; AVX512BW-FAST-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16> |
| %broadcast.of.zextinreg = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <24 x i32> <i32 0, i32 25, i32 26, i32 27, i32 0, i32 29, i32 30, i32 31, i32 0, i32 33, i32 34, i32 35, i32 0, i32 37, i32 38, i32 39, i32 0, i32 41, i32 42, i32 43, i32 0, i32 45, i32 46, i32 47> |
| %out.bytevec = bitcast <24 x i16> %broadcast.of.zextinreg to <48 x i8> |
| %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,0,65535] |
| ; SSE2-NEXT: pand %xmm2, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0] |
| ; SSE2-NEXT: pandn %xmm3, %xmm2 |
| ; SSE2-NEXT: por %xmm1, %xmm2 |
| ; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1] |
| ; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; SSE2-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9,10,11] |
| ; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,0,0] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm1[1,2,3,4,5],xmm2[6],xmm1[7] |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1] |
| ; SSE42-NEXT: pxor %xmm3, %xmm3 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3,4,5,6,7] |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4],xmm3[5,6,7] |
| ; SSE42-NEXT: paddb (%rdx), %xmm2 |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm1 |
| ; SSE42-NEXT: movdqa %xmm1, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,0,0,0] |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3,4,5],xmm2[6],xmm1[7] |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,1,0,1] |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] |
| ; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3 |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1],xmm0[2],xmm3[3,4,5,6,7] |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm2, %xmm2 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2 |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0 |
| ; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5],xmm0[6],xmm1[7] |
| ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm1 |
| ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4,5,6,7] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-SLOW-LABEL: vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4: |
| ; AVX512F-SLOW: # %bb.0: |
| ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-SLOW-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512F-SLOW-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512F-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512F-SLOW-NEXT: vpbroadcastw %xmm0, %ymm0 |
| ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5],xmm0[6],xmm1[7] |
| ; AVX512F-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4],ymm2[5,6,7,8,9,10,11],ymm0[12],ymm2[13,14,15] |
| ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512F-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4,5,6,7] |
| ; AVX512F-SLOW-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512F-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-SLOW-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-SLOW-NEXT: vzeroupper |
| ; AVX512F-SLOW-NEXT: retq |
| ; |
| ; AVX512F-FAST-LABEL: vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4: |
| ; AVX512F-FAST: # %bb.0: |
| ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-FAST-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512F-FAST-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512F-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512F-FAST-NEXT: vpbroadcastw %xmm0, %ymm2 |
| ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3,4,5],xmm2[6],xmm1[7] |
| ; AVX512F-FAST-NEXT: vpxor %xmm3, %xmm3, %xmm3 |
| ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4],ymm3[5,6,7,8,9,10,11],ymm2[12],ymm3[13,14,15] |
| ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; AVX512F-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512F-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-FAST-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-FAST-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-FAST-NEXT: vzeroupper |
| ; AVX512F-FAST-NEXT: retq |
| ; |
| ; AVX512DQ-SLOW-LABEL: vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4: |
| ; AVX512DQ-SLOW: # %bb.0: |
| ; AVX512DQ-SLOW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512DQ-SLOW-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512DQ-SLOW-NEXT: vpbroadcastw %xmm0, %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5],xmm0[6],xmm1[7] |
| ; AVX512DQ-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4],ymm2[5,6,7,8,9,10,11],ymm0[12],ymm2[13,14,15] |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512DQ-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-SLOW-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-SLOW-NEXT: vzeroupper |
| ; AVX512DQ-SLOW-NEXT: retq |
| ; |
| ; AVX512DQ-FAST-LABEL: vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4: |
| ; AVX512DQ-FAST: # %bb.0: |
| ; AVX512DQ-FAST-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512DQ-FAST-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512DQ-FAST-NEXT: vpbroadcastw %xmm0, %ymm2 |
| ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3,4,5],xmm2[6],xmm1[7] |
| ; AVX512DQ-FAST-NEXT: vpxor %xmm3, %xmm3, %xmm3 |
| ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4],ymm3[5,6,7,8,9,10,11],ymm2[12],ymm3[13,14,15] |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; AVX512DQ-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512DQ-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-FAST-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-FAST-NEXT: vzeroupper |
| ; AVX512DQ-FAST-NEXT: retq |
| ; |
| ; AVX512BW-SLOW-LABEL: vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4: |
| ; AVX512BW-SLOW: # %bb.0: |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,25,26,27,28,29,0,31,40,41,42,43,0,45,46,47] |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-SLOW-NEXT: vpermi2w %zmm2, %zmm0, %zmm1 |
| ; AVX512BW-SLOW-NEXT: vpbroadcastw %xmm0, %xmm0 |
| ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4,5,6,7] |
| ; AVX512BW-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-SLOW-NEXT: vzeroupper |
| ; AVX512BW-SLOW-NEXT: retq |
| ; |
| ; AVX512BW-FAST-LABEL: vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4: |
| ; AVX512BW-FAST: # %bb.0: |
| ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,25,26,27,28,29,0,31,40,41,42,43,0,45,46,47] |
| ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-FAST-NEXT: vpermi2w %zmm2, %zmm0, %zmm1 |
| ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; AVX512BW-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-FAST-NEXT: vzeroupper |
| ; AVX512BW-FAST-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16> |
| %broadcast.of.zextinreg = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <24 x i32> <i32 0, i32 25, i32 26, i32 27, i32 28, i32 29, i32 0, i32 31, i32 32, i32 33, i32 34, i32 35, i32 0, i32 37, i32 38, i32 39, i32 40, i32 41, i32 0, i32 43, i32 44, i32 45, i32 46, i32 47> |
| %out.bytevec = bitcast <24 x i16> %broadcast.of.zextinreg to <48 x i8> |
| %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i16_widen_to_i128_factor8_broadcast_to_v3i128_factor3(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i16_widen_to_i128_factor8_broadcast_to_v3i128_factor3: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,65535,65535] |
| ; SSE2-NEXT: pand %xmm2, %xmm1 |
| ; SSE2-NEXT: pandn %xmm0, %xmm2 |
| ; SSE2-NEXT: por %xmm1, %xmm2 |
| ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE2-NEXT: paddb %xmm0, %xmm1 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i16_widen_to_i128_factor8_broadcast_to_v3i128_factor3: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] |
| ; SSE42-NEXT: pxor %xmm2, %xmm2 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3,4,5,6,7] |
| ; SSE42-NEXT: paddb (%rdx), %xmm1 |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb %xmm2, %xmm0 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm2 |
| ; SSE42-NEXT: movdqa %xmm2, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i16_widen_to_i128_factor8_broadcast_to_v3i128_factor3: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] |
| ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3,4,5,6,7] |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm2, %xmm2 |
| ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 32(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec384_i16_widen_to_i128_factor8_broadcast_to_v3i128_factor3: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,1,0,1] |
| ; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4,5,6,7],ymm2[8],ymm1[9,10,11,12,13,14,15] |
| ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3,4,5,6,7] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec384_i16_widen_to_i128_factor8_broadcast_to_v3i128_factor3: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512F-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512F-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,1,0,1] |
| ; AVX512F-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4,5,6,7],ymm2[8],ymm1[9,10,11,12,13,14,15] |
| ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3,4,5,6,7] |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec384_i16_widen_to_i128_factor8_broadcast_to_v3i128_factor3: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512DQ-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,1,0,1] |
| ; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4,5,6,7],ymm2[8],ymm1[9,10,11,12,13,14,15] |
| ; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3,4,5,6,7] |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec384_i16_widen_to_i128_factor8_broadcast_to_v3i128_factor3: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,25,26,27,28,29,30,31,0,41,42,43,44,45,46,47] |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-NEXT: vpermi2w %zmm2, %zmm0, %zmm1 |
| ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3,4,5,6,7] |
| ; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16> |
| %broadcast.of.zextinreg = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <24 x i32> <i32 0, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 0, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47> |
| %out.bytevec = bitcast <24 x i16> %broadcast.of.zextinreg to <48 x i8> |
| %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,65535,65535] |
| ; SSE2-NEXT: pand %xmm2, %xmm1 |
| ; SSE2-NEXT: pandn %xmm0, %xmm2 |
| ; SSE2-NEXT: por %xmm1, %xmm2 |
| ; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1] |
| ; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] |
| ; SSE2-NEXT: movaps 32(%rdx), %xmm1 |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE2-NEXT: movaps %xmm1, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE42-NEXT: pxor %xmm2, %xmm2 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4],xmm2[5,6,7] |
| ; SSE42-NEXT: movaps 32(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb (%rdx), %xmm1 |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm2 |
| ; SSE42-NEXT: movaps %xmm0, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm2, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; AVX-NEXT: vmovaps 32(%rdx), %ymm2 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovaps %ymm2, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: vzeroupper |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX2-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] |
| ; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0 |
| ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4],ymm2[5,6,7,8,9,10,11],ymm0[12],ymm2[13,14,15] |
| ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512F-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512F-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] |
| ; AVX512F-NEXT: vpbroadcastw %xmm0, %ymm0 |
| ; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512F-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4],ymm2[5,6,7,8,9,10,11],ymm0[12],ymm2[13,14,15] |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1 |
| ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX512DQ-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] |
| ; AVX512DQ-NEXT: vpbroadcastw %xmm0, %ymm0 |
| ; AVX512DQ-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4],ymm2[5,6,7,8,9,10,11],ymm0[12],ymm2[13,14,15] |
| ; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovaps 32(%rdx), %ymm1 |
| ; AVX512DQ-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,25,26,27,28,29,30,31,40,41,42,43,0,45,46,47] |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-NEXT: vpermt2w %zmm2, %zmm1, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa %ymm0, %ymm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16> |
| %broadcast.of.zextinreg = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <24 x i32> <i32 0, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 0, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47> |
| %out.bytevec = bitcast <24 x i16> %broadcast.of.zextinreg to <48 x i8> |
| %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] |
| ; SSE2-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE2-NEXT: paddb (%rdx), %xmm2 |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE2-NEXT: paddb %xmm0, %xmm1 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] |
| ; SSE42-NEXT: pxor %xmm2, %xmm2 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] |
| ; SSE42-NEXT: paddb (%rdx), %xmm1 |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb %xmm2, %xmm0 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm2 |
| ; SSE42-NEXT: movdqa %xmm2, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 |
| ; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,0],ymm1[1,3],ymm2[4,4],ymm1[5,7] |
| ; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm1[0,2,1,3] |
| ; AVX-NEXT: vxorps %xmm3, %xmm3, %xmm3 |
| ; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[4],ymm3[4],ymm1[5],ymm3[5] |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; AVX-NEXT: vxorps %xmm3, %xmm3, %xmm3 |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7] |
| ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm2, %xmm2 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 16(%rcx) |
| ; AVX-NEXT: vzeroupper |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-SLOW-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6: |
| ; AVX2-SLOW: # %bb.0: |
| ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-SLOW-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX2-SLOW-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX2-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX2-SLOW-NEXT: vpbroadcastq %xmm0, %ymm2 |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6],ymm2[7] |
| ; AVX2-SLOW-NEXT: vpbroadcastd %xmm0, %xmm0 |
| ; AVX2-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] |
| ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-SLOW-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-SLOW-NEXT: vzeroupper |
| ; AVX2-SLOW-NEXT: retq |
| ; |
| ; AVX2-FAST-PERLANE-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6: |
| ; AVX2-FAST-PERLANE: # %bb.0: |
| ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-FAST-PERLANE-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX2-FAST-PERLANE-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq %xmm0, %ymm2 |
| ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5,6,7] |
| ; AVX2-FAST-PERLANE-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6],ymm2[7] |
| ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3],zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero |
| ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-FAST-PERLANE-NEXT: vzeroupper |
| ; AVX2-FAST-PERLANE-NEXT: retq |
| ; |
| ; AVX2-FAST-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6: |
| ; AVX2-FAST: # %bb.0: |
| ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-FAST-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX2-FAST-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX2-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX2-FAST-NEXT: vpbroadcastq %xmm0, %ymm2 |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5,6,7] |
| ; AVX2-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6],ymm2[7] |
| ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3],zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero |
| ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-FAST-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-FAST-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-FAST-NEXT: vzeroupper |
| ; AVX2-FAST-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512F-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 |
| ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,13,0,15,0,21,0,23,0,25,0,27,u,u,u,u> |
| ; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 |
| ; AVX512DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,13,0,15,0,21,0,23,0,25,0,27,u,u,u,u> |
| ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-SLOW-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6: |
| ; AVX512BW-SLOW: # %bb.0: |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,13,0,15] |
| ; AVX512BW-SLOW-NEXT: vpermd %zmm0, %zmm1, %zmm1 |
| ; AVX512BW-SLOW-NEXT: vpbroadcastd %xmm0, %xmm0 |
| ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] |
| ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6],ymm2[7] |
| ; AVX512BW-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-SLOW-NEXT: vzeroupper |
| ; AVX512BW-SLOW-NEXT: retq |
| ; |
| ; AVX512BW-FAST-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6: |
| ; AVX512BW-FAST: # %bb.0: |
| ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} xmm1 = [0,13,0,15] |
| ; AVX512BW-FAST-NEXT: vpermd %zmm0, %zmm1, %zmm1 |
| ; AVX512BW-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6],ymm2[7] |
| ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3],zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero |
| ; AVX512BW-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-FAST-NEXT: vzeroupper |
| ; AVX512BW-FAST-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <16 x i32> |
| %broadcast.of.zextinreg = shufflevector <16 x i32> %in.vec.cast, <16 x i32> zeroinitializer, <12 x i32> <i32 0, i32 13, i32 0, i32 15, i32 0, i32 17, i32 0, i32 19, i32 0, i32 21, i32 0, i32 23> |
| %out.bytevec = bitcast <12 x i32> %broadcast.of.zextinreg to <48 x i8> |
| %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: xorps %xmm2, %xmm2 |
| ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3] |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[1,2] |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,3,1] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,0,1,1] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,0,1] |
| ; SSE2-NEXT: paddb (%rdx), %xmm0 |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm2 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm0, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,0,0] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3,4,5],xmm2[6,7] |
| ; SSE42-NEXT: pxor %xmm1, %xmm1 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1] |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,0,1] |
| ; SSE42-NEXT: paddb (%rdx), %xmm2 |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm1 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,0] |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,1,0,1] |
| ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2 |
| ; AVX-NEXT: vxorps %xmm3, %xmm3, %xmm3 |
| ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6],ymm3[7] |
| ; AVX-NEXT: vxorps %xmm3, %xmm3, %xmm3 |
| ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3] |
| ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,0,1,1] |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx) |
| ; AVX-NEXT: vzeroupper |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-SLOW-LABEL: vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4: |
| ; AVX2-SLOW: # %bb.0: |
| ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-SLOW-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-SLOW-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,5,6,0] |
| ; AVX2-SLOW-NEXT: vpermd %ymm1, %ymm2, %ymm1 |
| ; AVX2-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6],ymm2[7] |
| ; AVX2-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,1,1] |
| ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-SLOW-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-SLOW-NEXT: vzeroupper |
| ; AVX2-SLOW-NEXT: retq |
| ; |
| ; AVX2-FAST-PERLANE-LABEL: vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4: |
| ; AVX2-FAST-PERLANE: # %bb.0: |
| ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm2 = [0,5,6,0] |
| ; AVX2-FAST-PERLANE-NEXT: vpermd %ymm1, %ymm2, %ymm1 |
| ; AVX2-FAST-PERLANE-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6],ymm2[7] |
| ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero,zero,zero,zero,zero |
| ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-FAST-PERLANE-NEXT: vzeroupper |
| ; AVX2-FAST-PERLANE-NEXT: retq |
| ; |
| ; AVX2-FAST-LABEL: vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4: |
| ; AVX2-FAST: # %bb.0: |
| ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-FAST-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm2 = [0,5,6,0] |
| ; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1 |
| ; AVX2-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6],ymm2[7] |
| ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero,zero,zero,zero,zero |
| ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-FAST-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-FAST-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-FAST-NEXT: vzeroupper |
| ; AVX2-FAST-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512F-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 |
| ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,13,14,0,20,21,0,23,24,0,26,27,u,u,u,u> |
| ; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 |
| ; AVX512DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,13,14,0,20,21,0,23,24,0,26,27,u,u,u,u> |
| ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-SLOW-LABEL: vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4: |
| ; AVX512BW-SLOW: # %bb.0: |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,13,14,0] |
| ; AVX512BW-SLOW-NEXT: vpermd %zmm0, %zmm1, %zmm1 |
| ; AVX512BW-SLOW-NEXT: vpbroadcastd %xmm0, %xmm0 |
| ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2,3] |
| ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6],ymm2[7] |
| ; AVX512BW-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-SLOW-NEXT: vzeroupper |
| ; AVX512BW-SLOW-NEXT: retq |
| ; |
| ; AVX512BW-FAST-LABEL: vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4: |
| ; AVX512BW-FAST: # %bb.0: |
| ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} xmm1 = [0,13,14,0] |
| ; AVX512BW-FAST-NEXT: vpermd %zmm0, %zmm1, %zmm1 |
| ; AVX512BW-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6],ymm2[7] |
| ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero,zero,zero,zero,zero |
| ; AVX512BW-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-FAST-NEXT: vzeroupper |
| ; AVX512BW-FAST-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <16 x i32> |
| %broadcast.of.zextinreg = shufflevector <16 x i32> %in.vec.cast, <16 x i32> zeroinitializer, <12 x i32> <i32 0, i32 13, i32 14, i32 0, i32 16, i32 17, i32 0, i32 19, i32 20, i32 0, i32 22, i32 23> |
| %out.bytevec = bitcast <12 x i32> %broadcast.of.zextinreg to <48 x i8> |
| %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i32_widen_to_i128_factor4_broadcast_to_v3i128_factor3(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i32_widen_to_i128_factor4_broadcast_to_v3i128_factor3: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] |
| ; SSE2-NEXT: xorps %xmm2, %xmm2 |
| ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3] |
| ; SSE2-NEXT: paddb (%rdx), %xmm1 |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm0 |
| ; SSE2-NEXT: paddb %xmm2, %xmm0 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm2 |
| ; SSE2-NEXT: movdqa %xmm2, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i32_widen_to_i128_factor4_broadcast_to_v3i128_factor3: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] |
| ; SSE42-NEXT: pxor %xmm2, %xmm2 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7] |
| ; SSE42-NEXT: paddb (%rdx), %xmm1 |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb %xmm2, %xmm0 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm2 |
| ; SSE42-NEXT: movdqa %xmm2, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i32_widen_to_i128_factor4_broadcast_to_v3i128_factor3: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] |
| ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 |
| ; AVX-NEXT: vxorps %xmm3, %xmm3, %xmm3 |
| ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7] |
| ; AVX-NEXT: vxorps %xmm3, %xmm3, %xmm3 |
| ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3] |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx) |
| ; AVX-NEXT: vzeroupper |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec384_i32_widen_to_i128_factor4_broadcast_to_v3i128_factor3: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,1,0,1] |
| ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4,5,6,7] |
| ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7] |
| ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7] |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec384_i32_widen_to_i128_factor4_broadcast_to_v3i128_factor3: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512F-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 |
| ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,13,14,15,0,21,22,23,0,25,26,27,u,u,u,u> |
| ; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec384_i32_widen_to_i128_factor4_broadcast_to_v3i128_factor3: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 |
| ; AVX512DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,13,14,15,0,21,22,23,0,25,26,27,u,u,u,u> |
| ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec384_i32_widen_to_i128_factor4_broadcast_to_v3i128_factor3: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,13,14,15,0,1,2,3] |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpermd %zmm0, %zmm1, %zmm1 |
| ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7] |
| ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <16 x i32> |
| %broadcast.of.zextinreg = shufflevector <16 x i32> %in.vec.cast, <16 x i32> zeroinitializer, <12 x i32> <i32 0, i32 13, i32 14, i32 15, i32 0, i32 17, i32 18, i32 19, i32 0, i32 21, i32 22, i32 23> |
| %out.bytevec = bitcast <12 x i32> %broadcast.of.zextinreg to <48 x i8> |
| %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i32_widen_to_i192_factor6_broadcast_to_v2i192_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i32_widen_to_i192_factor6_broadcast_to_v2i192_factor2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] |
| ; SSE2-NEXT: xorps %xmm2, %xmm2 |
| ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3] |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,0,1] |
| ; SSE2-NEXT: movaps 32(%rdx), %xmm2 |
| ; SSE2-NEXT: paddb (%rdx), %xmm1 |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE2-NEXT: movaps %xmm2, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i32_widen_to_i192_factor6_broadcast_to_v2i192_factor2: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] |
| ; SSE42-NEXT: pxor %xmm2, %xmm2 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7] |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,0,1] |
| ; SSE42-NEXT: movaps 32(%rdx), %xmm2 |
| ; SSE42-NEXT: paddb (%rdx), %xmm1 |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE42-NEXT: movaps %xmm2, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i32_widen_to_i192_factor6_broadcast_to_v2i192_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6],ymm2[7] |
| ; AVX-NEXT: vmovaps 32(%rdx), %ymm2 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovaps %ymm2, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: vzeroupper |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec384_i32_widen_to_i192_factor6_broadcast_to_v2i192_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [0,5,6,7] |
| ; AVX2-NEXT: vpermd %ymm0, %ymm1, %ymm0 |
| ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6],ymm1[7] |
| ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec384_i32_widen_to_i192_factor6_broadcast_to_v2i192_factor2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512F-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [16,29,30,31,4,5,16,7,16,29,30,31,4,5,16,7] |
| ; AVX512F-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512F-NEXT: vpermt2d %zmm0, %zmm1, %zmm2 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm0 |
| ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1 |
| ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec384_i32_widen_to_i192_factor6_broadcast_to_v2i192_factor2: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 |
| ; AVX512DQ-NEXT: vbroadcasti32x8 {{.*#+}} zmm1 = [16,29,30,31,4,5,16,7,16,29,30,31,4,5,16,7] |
| ; AVX512DQ-NEXT: # zmm1 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] |
| ; AVX512DQ-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512DQ-NEXT: vpermt2d %zmm0, %zmm1, %zmm2 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm2, %ymm0 |
| ; AVX512DQ-NEXT: vmovaps 32(%rdx), %ymm1 |
| ; AVX512DQ-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec384_i32_widen_to_i192_factor6_broadcast_to_v2i192_factor2: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,13,14,15] |
| ; AVX512BW-NEXT: vpermd %zmm0, %zmm1, %zmm0 |
| ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6],ymm1[7] |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <16 x i32> |
| %broadcast.of.zextinreg = shufflevector <16 x i32> %in.vec.cast, <16 x i32> zeroinitializer, <12 x i32> <i32 0, i32 13, i32 14, i32 15, i32 16, i32 17, i32 0, i32 19, i32 20, i32 21, i32 22, i32 23> |
| %out.bytevec = bitcast <12 x i32> %broadcast.of.zextinreg to <48 x i8> |
| %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i64_widen_to_i128_factor2_broadcast_to_v3i128_factor3(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i64_widen_to_i128_factor2_broadcast_to_v3i128_factor3: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE2-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero |
| ; SSE2-NEXT: paddb (%rdx), %xmm1 |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm2 |
| ; SSE2-NEXT: paddb %xmm0, %xmm2 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i64_widen_to_i128_factor2_broadcast_to_v3i128_factor3: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| ; SSE42-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero |
| ; SSE42-NEXT: paddb (%rdx), %xmm1 |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm2 |
| ; SSE42-NEXT: paddb %xmm0, %xmm2 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm2, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i64_widen_to_i128_factor2_broadcast_to_v3i128_factor3: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 |
| ; AVX-NEXT: vxorps %xmm3, %xmm3, %xmm3 |
| ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7] |
| ; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx) |
| ; AVX-NEXT: vzeroupper |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec384_i64_widen_to_i128_factor2_broadcast_to_v3i128_factor3: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,3,0,3] |
| ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7] |
| ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec384_i64_widen_to_i128_factor2_broadcast_to_v3i128_factor3: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512F-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 |
| ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,7,0,11,0,13,u,u> |
| ; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec384_i64_widen_to_i128_factor2_broadcast_to_v3i128_factor3: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 |
| ; AVX512DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,7,0,11,0,13,u,u> |
| ; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-SLOW-LABEL: vec384_i64_widen_to_i128_factor2_broadcast_to_v3i128_factor3: |
| ; AVX512BW-SLOW: # %bb.0: |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,7,0,11] |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-SLOW-NEXT: vpermi2q %zmm2, %zmm0, %zmm1 |
| ; AVX512BW-SLOW-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero |
| ; AVX512BW-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-SLOW-NEXT: vzeroupper |
| ; AVX512BW-SLOW-NEXT: retq |
| ; |
| ; AVX512BW-FAST-LABEL: vec384_i64_widen_to_i128_factor2_broadcast_to_v3i128_factor3: |
| ; AVX512BW-FAST: # %bb.0: |
| ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,7,0,7] |
| ; AVX512BW-FAST-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpermq %zmm0, %zmm1, %zmm1 |
| ; AVX512BW-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7] |
| ; AVX512BW-FAST-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero |
| ; AVX512BW-FAST-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-FAST-NEXT: vzeroupper |
| ; AVX512BW-FAST-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <8 x i64> |
| %broadcast.of.zextinreg = shufflevector <8 x i64> %in.vec.cast, <8 x i64> zeroinitializer, <6 x i32> <i32 0, i32 7, i32 0, i32 9, i32 0, i32 11> |
| %out.bytevec = bitcast <6 x i64> %broadcast.of.zextinreg to <48 x i8> |
| %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec384_i64_widen_to_i192_factor3_broadcast_to_v2i192_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec384_i64_widen_to_i192_factor3_broadcast_to_v2i192_factor2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] |
| ; SSE2-NEXT: movaps 32(%rdx), %xmm2 |
| ; SSE2-NEXT: paddb (%rdx), %xmm1 |
| ; SSE2-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE2-NEXT: movaps %xmm2, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec384_i64_widen_to_i192_factor3_broadcast_to_v2i192_factor2: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: movdqa 48(%rdi), %xmm1 |
| ; SSE42-NEXT: paddb 48(%rsi), %xmm1 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| ; SSE42-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] |
| ; SSE42-NEXT: movaps 32(%rdx), %xmm2 |
| ; SSE42-NEXT: paddb (%rdx), %xmm1 |
| ; SSE42-NEXT: paddb 16(%rdx), %xmm0 |
| ; SSE42-NEXT: movaps %xmm2, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec384_i64_widen_to_i192_factor3_broadcast_to_v2i192_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 48(%rdi), %xmm1 |
| ; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[2],ymm0[2] |
| ; AVX-NEXT: vmovaps 32(%rdx), %ymm2 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovaps %ymm2, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) |
| ; AVX-NEXT: vzeroupper |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec384_i64_widen_to_i192_factor3_broadcast_to_v2i192_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,0] |
| ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7] |
| ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec384_i64_widen_to_i192_factor3_broadcast_to_v2i192_factor2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512F-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 |
| ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [0,7,10,0,0,7,10,0] |
| ; AVX512F-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm0 |
| ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1 |
| ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec384_i64_widen_to_i192_factor3_broadcast_to_v2i192_factor2: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 |
| ; AVX512DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vbroadcasti32x8 {{.*#+}} zmm2 = [0,7,10,0,0,7,10,0] |
| ; AVX512DQ-NEXT: # zmm2 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] |
| ; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm2, %ymm0 |
| ; AVX512DQ-NEXT: vmovaps 32(%rdx), %ymm1 |
| ; AVX512DQ-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-SLOW-LABEL: vec384_i64_widen_to_i192_factor3_broadcast_to_v2i192_factor2: |
| ; AVX512BW-SLOW: # %bb.0: |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,7,10,0] |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512BW-SLOW-NEXT: vpermt2q %zmm2, %zmm1, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa %ymm0, %ymm0 |
| ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-SLOW-NEXT: vzeroupper |
| ; AVX512BW-SLOW-NEXT: retq |
| ; |
| ; AVX512BW-FAST-LABEL: vec384_i64_widen_to_i192_factor3_broadcast_to_v2i192_factor2: |
| ; AVX512BW-FAST: # %bb.0: |
| ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,7,2,0] |
| ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpermq %zmm0, %zmm1, %zmm0 |
| ; AVX512BW-FAST-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512BW-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7] |
| ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-FAST-NEXT: vzeroupper |
| ; AVX512BW-FAST-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <8 x i64> |
| %broadcast.of.zextinreg = shufflevector <8 x i64> %in.vec.cast, <8 x i64> zeroinitializer, <6 x i32> <i32 0, i32 7, i32 8, i32 0, i32 10, i32 11> |
| %out.bytevec = bitcast <6 x i64> %broadcast.of.zextinreg to <48 x i8> |
| %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec512_i8_widen_to_i16_factor2_broadcast_to_v32i16_factor32(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec512_i8_widen_to_i16_factor2_broadcast_to_v32i16_factor32: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE2-NEXT: paddb %xmm0, %xmm1 |
| ; SSE2-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE2-NEXT: paddb %xmm0, %xmm2 |
| ; SSE2-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE2-NEXT: paddb %xmm0, %xmm3 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec512_i8_widen_to_i16_factor2_broadcast_to_v32i16_factor32: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE42-NEXT: paddb %xmm0, %xmm1 |
| ; SSE42-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE42-NEXT: paddb %xmm0, %xmm2 |
| ; SSE42-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE42-NEXT: paddb %xmm0, %xmm3 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE42-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec512_i8_widen_to_i16_factor2_broadcast_to_v32i16_factor32: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero |
| ; AVX-NEXT: vpaddb 48(%rdx), %xmm0, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm2 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm3 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm3, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 48(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec512_i8_widen_to_i16_factor2_broadcast_to_v32i16_factor32: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec512_i8_widen_to_i16_factor2_broadcast_to_v32i16_factor32: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec512_i8_widen_to_i16_factor2_broadcast_to_v32i16_factor32: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[0],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec512_i8_widen_to_i16_factor2_broadcast_to_v32i16_factor32: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1,0,1,0,1] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[0],zero,zmm0[0],zero,zmm0[0],zero,zmm0[0],zero,zmm0[0],zero,zmm0[0],zero,zmm0[0],zero,zmm0[0],zero,zmm0[16],zero,zmm0[16],zero,zmm0[16],zero,zmm0[16],zero,zmm0[16],zero,zmm0[16],zero,zmm0[16],zero,zmm0[16],zero,zmm0[32],zero,zmm0[32],zero,zmm0[32],zero,zmm0[32],zero,zmm0[32],zero,zmm0[32],zero,zmm0[32],zero,zmm0[32],zero,zmm0[48],zero,zmm0[48],zero,zmm0[48],zero,zmm0[48],zero,zmm0[48],zero,zmm0[48],zero,zmm0[48],zero,zmm0[48],zero |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <64 x i32> <i32 0, i32 65, i32 0, i32 67, i32 0, i32 69, i32 0, i32 71, i32 0, i32 73, i32 0, i32 75, i32 0, i32 77, i32 0, i32 79, i32 0, i32 81, i32 0, i32 83, i32 0, i32 85, i32 0, i32 87, i32 0, i32 89, i32 0, i32 91, i32 0, i32 93, i32 0, i32 95, i32 0, i32 97, i32 0, i32 99, i32 0, i32 101, i32 0, i32 103, i32 0, i32 105, i32 0, i32 107, i32 0, i32 109, i32 0, i32 111, i32 0, i32 113, i32 0, i32 115, i32 0, i32 117, i32 0, i32 119, i32 0, i32 121, i32 0, i32 123, i32 0, i32 125, i32 0, i32 127> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %broadcast.of.zextinreg, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec512_i8_widen_to_i32_factor4_broadcast_to_v16i32_factor16(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec512_i8_widen_to_i32_factor4_broadcast_to_v16i32_factor16: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] |
| ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE2-NEXT: paddb %xmm0, %xmm1 |
| ; SSE2-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE2-NEXT: paddb %xmm0, %xmm2 |
| ; SSE2-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE2-NEXT: paddb %xmm0, %xmm3 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec512_i8_widen_to_i32_factor4_broadcast_to_v16i32_factor16: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE42-NEXT: paddb %xmm0, %xmm1 |
| ; SSE42-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE42-NEXT: paddb %xmm0, %xmm2 |
| ; SSE42-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE42-NEXT: paddb %xmm0, %xmm3 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE42-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec512_i8_widen_to_i32_factor4_broadcast_to_v16i32_factor16: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero |
| ; AVX-NEXT: vpaddb 48(%rdx), %xmm0, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm2 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm3 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm3, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 48(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec512_i8_widen_to_i32_factor4_broadcast_to_v16i32_factor16: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0],zero,zero,zero,ymm0[0],zero,zero,zero,ymm0[0],zero,zero,zero,ymm0[0],zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16],zero,zero,zero |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec512_i8_widen_to_i32_factor4_broadcast_to_v16i32_factor16: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0],zero,zero,zero,ymm0[0],zero,zero,zero,ymm0[0],zero,zero,zero,ymm0[0],zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16],zero,zero,zero |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec512_i8_widen_to_i32_factor4_broadcast_to_v16i32_factor16: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0],zero,zero,zero,ymm0[0],zero,zero,zero,ymm0[0],zero,zero,zero,ymm0[0],zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16],zero,zero,zero |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec512_i8_widen_to_i32_factor4_broadcast_to_v16i32_factor16: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1,0,1,0,1] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[0],zero,zero,zero,zmm0[0],zero,zero,zero,zmm0[0],zero,zero,zero,zmm0[0],zero,zero,zero,zmm0[16],zero,zero,zero,zmm0[16],zero,zero,zero,zmm0[16],zero,zero,zero,zmm0[16],zero,zero,zero,zmm0[32],zero,zero,zero,zmm0[32],zero,zero,zero,zmm0[32],zero,zero,zero,zmm0[32],zero,zero,zero,zmm0[48],zero,zero,zero,zmm0[48],zero,zero,zero,zmm0[48],zero,zero,zero,zmm0[48],zero,zero,zero |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <64 x i32> <i32 0, i32 65, i32 66, i32 67, i32 0, i32 69, i32 70, i32 71, i32 0, i32 73, i32 74, i32 75, i32 0, i32 77, i32 78, i32 79, i32 0, i32 81, i32 82, i32 83, i32 0, i32 85, i32 86, i32 87, i32 0, i32 89, i32 90, i32 91, i32 0, i32 93, i32 94, i32 95, i32 0, i32 97, i32 98, i32 99, i32 0, i32 101, i32 102, i32 103, i32 0, i32 105, i32 106, i32 107, i32 0, i32 109, i32 110, i32 111, i32 0, i32 113, i32 114, i32 115, i32 0, i32 117, i32 118, i32 119, i32 0, i32 121, i32 122, i32 123, i32 0, i32 125, i32 126, i32 127> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %broadcast.of.zextinreg, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec512_i8_widen_to_i64_factor8_broadcast_to_v8i64_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec512_i8_widen_to_i64_factor8_broadcast_to_v8i64_factor8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE2-NEXT: paddb %xmm0, %xmm1 |
| ; SSE2-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE2-NEXT: paddb %xmm0, %xmm2 |
| ; SSE2-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE2-NEXT: paddb %xmm0, %xmm3 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec512_i8_widen_to_i64_factor8_broadcast_to_v8i64_factor8: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE42-NEXT: paddb %xmm0, %xmm1 |
| ; SSE42-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE42-NEXT: paddb %xmm0, %xmm2 |
| ; SSE42-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE42-NEXT: paddb %xmm0, %xmm3 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE42-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec512_i8_widen_to_i64_factor8_broadcast_to_v8i64_factor8: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero |
| ; AVX-NEXT: vpaddb 48(%rdx), %xmm0, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm2 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm3 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm3, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 48(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec512_i8_widen_to_i64_factor8_broadcast_to_v8i64_factor8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0],zero,zero,zero,zero,zero,zero,zero,ymm0[0],zero,zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,zero |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec512_i8_widen_to_i64_factor8_broadcast_to_v8i64_factor8: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0],zero,zero,zero,zero,zero,zero,zero,ymm0[0],zero,zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,zero |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec512_i8_widen_to_i64_factor8_broadcast_to_v8i64_factor8: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0],zero,zero,zero,zero,zero,zero,zero,ymm0[0],zero,zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,zero |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec512_i8_widen_to_i64_factor8_broadcast_to_v8i64_factor8: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1,0,1,0,1] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[0],zero,zero,zero,zero,zero,zero,zero,zmm0[0],zero,zero,zero,zero,zero,zero,zero,zmm0[16],zero,zero,zero,zero,zero,zero,zero,zmm0[16],zero,zero,zero,zero,zero,zero,zero,zmm0[32],zero,zero,zero,zero,zero,zero,zero,zmm0[32],zero,zero,zero,zero,zero,zero,zero,zmm0[48],zero,zero,zero,zero,zero,zero,zero,zmm0[48],zero,zero,zero,zero,zero,zero,zero |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <64 x i32> <i32 0, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 0, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 0, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 0, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 0, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 0, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 0, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 0, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %broadcast.of.zextinreg, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec512_i8_widen_to_i128_factor16_broadcast_to_v4i128_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE-LABEL: vec512_i8_widen_to_i128_factor16_broadcast_to_v4i128_factor4: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE-NEXT: paddb (%rsi), %xmm0 |
| ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE-NEXT: paddb %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE-NEXT: paddb %xmm0, %xmm2 |
| ; SSE-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE-NEXT: paddb %xmm0, %xmm3 |
| ; SSE-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX-LABEL: vec512_i8_widen_to_i128_factor16_broadcast_to_v4i128_factor4: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 48(%rdx), %xmm0, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm2 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm3 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm3, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 48(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec512_i8_widen_to_i128_factor16_broadcast_to_v4i128_factor4: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec512_i8_widen_to_i128_factor16_broadcast_to_v4i128_factor4: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec512_i8_widen_to_i128_factor16_broadcast_to_v4i128_factor4: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec512_i8_widen_to_i128_factor16_broadcast_to_v4i128_factor4: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1,0,1,0,1] |
| ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <64 x i32> <i32 0, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 0, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 0, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 0, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %broadcast.of.zextinreg, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec512_i8_widen_to_i256_factor32_broadcast_to_v2i256_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE-LABEL: vec512_i8_widen_to_i256_factor32_broadcast_to_v2i256_factor2: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE-NEXT: paddb (%rsi), %xmm0 |
| ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE-NEXT: movaps 16(%rdx), %xmm1 |
| ; SSE-NEXT: movaps 48(%rdx), %xmm2 |
| ; SSE-NEXT: movdqa (%rdx), %xmm3 |
| ; SSE-NEXT: paddb %xmm0, %xmm3 |
| ; SSE-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE-NEXT: movaps %xmm2, 48(%rcx) |
| ; SSE-NEXT: movaps %xmm1, 16(%rcx) |
| ; SSE-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX-LABEL: vec512_i8_widen_to_i256_factor32_broadcast_to_v2i256_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm1 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovaps 16(%rdx), %xmm2 |
| ; AVX-NEXT: vmovaps 48(%rdx), %xmm3 |
| ; AVX-NEXT: vmovaps %xmm2, 16(%rcx) |
| ; AVX-NEXT: vmovaps %xmm3, 48(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 32(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec512_i8_widen_to_i256_factor32_broadcast_to_v2i256_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,0,0] |
| ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec512_i8_widen_to_i256_factor32_broadcast_to_v2i256_factor2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,0,0] |
| ; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec512_i8_widen_to_i256_factor32_broadcast_to_v2i256_factor2: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,0,0] |
| ; AVX512DQ-NEXT: vpand %ymm1, %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec512_i8_widen_to_i256_factor32_broadcast_to_v2i256_factor2: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %broadcast.of.zextinreg = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <64 x i32> <i32 0, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 0, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %broadcast.of.zextinreg, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec512_i16_widen_to_i32_factor2_broadcast_to_v16i32_factor16(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec512_i16_widen_to_i32_factor2_broadcast_to_v16i32_factor16: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE2-NEXT: paddb %xmm0, %xmm1 |
| ; SSE2-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE2-NEXT: paddb %xmm0, %xmm2 |
| ; SSE2-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE2-NEXT: paddb %xmm0, %xmm3 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec512_i16_widen_to_i32_factor2_broadcast_to_v16i32_factor16: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; SSE42-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE42-NEXT: paddb %xmm0, %xmm1 |
| ; SSE42-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE42-NEXT: paddb %xmm0, %xmm2 |
| ; SSE42-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE42-NEXT: paddb %xmm0, %xmm3 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE42-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec512_i16_widen_to_i32_factor2_broadcast_to_v16i32_factor16: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] |
| ; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero |
| ; AVX-NEXT: vpaddb 48(%rdx), %xmm0, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm2 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm3 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm3, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 48(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec512_i16_widen_to_i32_factor2_broadcast_to_v16i32_factor16: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1],zero,zero,ymm0[0,1],zero,zero,ymm0[0,1],zero,zero,ymm0[0,1],zero,zero,ymm0[16,17],zero,zero,ymm0[16,17],zero,zero,ymm0[16,17],zero,zero,ymm0[16,17],zero,zero |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec512_i16_widen_to_i32_factor2_broadcast_to_v16i32_factor16: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1],zero,zero,ymm0[0,1],zero,zero,ymm0[0,1],zero,zero,ymm0[0,1],zero,zero,ymm0[16,17],zero,zero,ymm0[16,17],zero,zero,ymm0[16,17],zero,zero,ymm0[16,17],zero,zero |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec512_i16_widen_to_i32_factor2_broadcast_to_v16i32_factor16: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1],zero,zero,ymm0[0,1],zero,zero,ymm0[0,1],zero,zero,ymm0[0,1],zero,zero,ymm0[16,17],zero,zero,ymm0[16,17],zero,zero,ymm0[16,17],zero,zero,ymm0[16,17],zero,zero |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec512_i16_widen_to_i32_factor2_broadcast_to_v16i32_factor16: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,33,0,35,0,37,0,39,0,41,0,43,0,45,0,47,0,49,0,51,0,53,0,55,0,57,0,59,0,61,0,63] |
| ; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm2, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16> |
| %broadcast.of.zextinreg = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 0, i32 35, i32 0, i32 37, i32 0, i32 39, i32 0, i32 41, i32 0, i32 43, i32 0, i32 45, i32 0, i32 47, i32 0, i32 49, i32 0, i32 51, i32 0, i32 53, i32 0, i32 55, i32 0, i32 57, i32 0, i32 59, i32 0, i32 61, i32 0, i32 63> |
| %out.bytevec = bitcast <32 x i16> %broadcast.of.zextinreg to <64 x i8> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec512_i16_widen_to_i64_factor4_broadcast_to_v8i64_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec512_i16_widen_to_i64_factor4_broadcast_to_v8i64_factor8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE2-NEXT: paddb %xmm0, %xmm1 |
| ; SSE2-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE2-NEXT: paddb %xmm0, %xmm2 |
| ; SSE2-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE2-NEXT: paddb %xmm0, %xmm3 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec512_i16_widen_to_i64_factor4_broadcast_to_v8i64_factor8: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pxor %xmm1, %xmm1 |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7] |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE42-NEXT: paddb %xmm0, %xmm1 |
| ; SSE42-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE42-NEXT: paddb %xmm0, %xmm2 |
| ; SSE42-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE42-NEXT: paddb %xmm0, %xmm3 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE42-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec512_i16_widen_to_i64_factor4_broadcast_to_v8i64_factor8: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7] |
| ; AVX-NEXT: vpaddb 48(%rdx), %xmm0, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm2 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm3 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm3, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 48(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec512_i16_widen_to_i64_factor4_broadcast_to_v8i64_factor8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1],zero,zero,zero,zero,zero,zero,ymm0[0,1],zero,zero,zero,zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,zero,zero |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec512_i16_widen_to_i64_factor4_broadcast_to_v8i64_factor8: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1],zero,zero,zero,zero,zero,zero,ymm0[0,1],zero,zero,zero,zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,zero,zero |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec512_i16_widen_to_i64_factor4_broadcast_to_v8i64_factor8: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1],zero,zero,zero,zero,zero,zero,ymm0[0,1],zero,zero,zero,zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,zero,zero |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec512_i16_widen_to_i64_factor4_broadcast_to_v8i64_factor8: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,33,34,35,0,37,38,39,0,41,42,43,0,45,46,47,0,49,50,51,0,53,54,55,0,57,58,59,0,61,62,63] |
| ; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm2, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16> |
| %broadcast.of.zextinreg = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 34, i32 35, i32 0, i32 37, i32 38, i32 39, i32 0, i32 41, i32 42, i32 43, i32 0, i32 45, i32 46, i32 47, i32 0, i32 49, i32 50, i32 51, i32 0, i32 53, i32 54, i32 55, i32 0, i32 57, i32 58, i32 59, i32 0, i32 61, i32 62, i32 63> |
| %out.bytevec = bitcast <32 x i16> %broadcast.of.zextinreg to <64 x i8> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec512_i16_widen_to_i128_factor8_broadcast_to_v4i128_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec512_i16_widen_to_i128_factor8_broadcast_to_v4i128_factor4: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE2-NEXT: paddb %xmm0, %xmm1 |
| ; SSE2-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE2-NEXT: paddb %xmm0, %xmm2 |
| ; SSE2-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE2-NEXT: paddb %xmm0, %xmm3 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec512_i16_widen_to_i128_factor8_broadcast_to_v4i128_factor4: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pxor %xmm1, %xmm1 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb %xmm1, %xmm0 |
| ; SSE42-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE42-NEXT: paddb %xmm1, %xmm2 |
| ; SSE42-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE42-NEXT: paddb %xmm1, %xmm3 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm1 |
| ; SSE42-NEXT: movdqa %xmm1, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE42-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec512_i16_widen_to_i128_factor8_broadcast_to_v4i128_factor4: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7] |
| ; AVX-NEXT: vpaddb 48(%rdx), %xmm0, %xmm1 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm2 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm3 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm3, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 48(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec512_i16_widen_to_i128_factor8_broadcast_to_v4i128_factor4: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15] |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec512_i16_widen_to_i128_factor8_broadcast_to_v4i128_factor4: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512F-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15] |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec512_i16_widen_to_i128_factor8_broadcast_to_v4i128_factor4: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX512DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15] |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec512_i16_widen_to_i128_factor8_broadcast_to_v4i128_factor4: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,33,34,35,36,37,38,39,0,41,42,43,44,45,46,47,0,49,50,51,52,53,54,55,0,57,58,59,60,61,62,63] |
| ; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm2, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16> |
| %broadcast.of.zextinreg = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 0, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 0, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 0, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63> |
| %out.bytevec = bitcast <32 x i16> %broadcast.of.zextinreg to <64 x i8> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec512_i16_widen_to_i256_factor16_broadcast_to_v2i256_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec512_i16_widen_to_i256_factor16_broadcast_to_v2i256_factor2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: movaps 16(%rdx), %xmm1 |
| ; SSE2-NEXT: movaps 48(%rdx), %xmm2 |
| ; SSE2-NEXT: movdqa (%rdx), %xmm3 |
| ; SSE2-NEXT: paddb %xmm0, %xmm3 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE2-NEXT: movaps %xmm2, 48(%rcx) |
| ; SSE2-NEXT: movaps %xmm1, 16(%rcx) |
| ; SSE2-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec512_i16_widen_to_i256_factor16_broadcast_to_v2i256_factor2: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pxor %xmm1, %xmm1 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] |
| ; SSE42-NEXT: movaps 16(%rdx), %xmm0 |
| ; SSE42-NEXT: movaps 48(%rdx), %xmm2 |
| ; SSE42-NEXT: movdqa (%rdx), %xmm3 |
| ; SSE42-NEXT: paddb %xmm1, %xmm3 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm1 |
| ; SSE42-NEXT: movaps %xmm2, 48(%rcx) |
| ; SSE42-NEXT: movaps %xmm0, 16(%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec512_i16_widen_to_i256_factor16_broadcast_to_v2i256_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7] |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm1 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovaps 16(%rdx), %xmm2 |
| ; AVX-NEXT: vmovaps 48(%rdx), %xmm3 |
| ; AVX-NEXT: vmovaps %xmm2, 16(%rcx) |
| ; AVX-NEXT: vmovaps %xmm3, 48(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 32(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec512_i16_widen_to_i256_factor16_broadcast_to_v2i256_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,0,0,0] |
| ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec512_i16_widen_to_i256_factor16_broadcast_to_v2i256_factor2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,0,0,0] |
| ; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec512_i16_widen_to_i256_factor16_broadcast_to_v2i256_factor2: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,0,0,0] |
| ; AVX512DQ-NEXT: vpand %ymm1, %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec512_i16_widen_to_i256_factor16_broadcast_to_v2i256_factor2: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16> |
| %broadcast.of.zextinreg = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 0, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63> |
| %out.bytevec = bitcast <32 x i16> %broadcast.of.zextinreg to <64 x i8> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec512_i32_widen_to_i64_factor2_broadcast_to_v8i64_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec512_i32_widen_to_i64_factor2_broadcast_to_v8i64_factor8: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: pxor %xmm1, %xmm1 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE2-NEXT: paddb %xmm0, %xmm1 |
| ; SSE2-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE2-NEXT: paddb %xmm0, %xmm2 |
| ; SSE2-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE2-NEXT: paddb %xmm0, %xmm3 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec512_i32_widen_to_i64_factor2_broadcast_to_v8i64_factor8: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pxor %xmm1, %xmm1 |
| ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE42-NEXT: paddb %xmm0, %xmm1 |
| ; SSE42-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE42-NEXT: paddb %xmm0, %xmm2 |
| ; SSE42-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE42-NEXT: paddb %xmm0, %xmm3 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE42-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE42-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec512_i32_widen_to_i64_factor2_broadcast_to_v8i64_factor8: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[1,3],ymm0[4,4],ymm1[5,7] |
| ; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7] |
| ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 |
| ; AVX-NEXT: vpaddb 48(%rdx), %xmm1, %xmm2 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm3 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm3, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 48(%rcx) |
| ; AVX-NEXT: vzeroupper |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec512_i32_widen_to_i64_factor2_broadcast_to_v8i64_factor8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpbroadcastq %xmm1, %ymm1 |
| ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7] |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec512_i32_widen_to_i64_factor2_broadcast_to_v8i64_factor8: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,17,0,19,0,21,0,23,0,25,0,27,0,29,0,31] |
| ; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec512_i32_widen_to_i64_factor2_broadcast_to_v8i64_factor8: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,17,0,19,0,21,0,23,0,25,0,27,0,29,0,31] |
| ; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec512_i32_widen_to_i64_factor2_broadcast_to_v8i64_factor8: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,17,0,19,0,21,0,23,0,25,0,27,0,29,0,31] |
| ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm2, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <16 x i32> |
| %broadcast.of.zextinreg = shufflevector <16 x i32> %in.vec.cast, <16 x i32> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 0, i32 19, i32 0, i32 21, i32 0, i32 23, i32 0, i32 25, i32 0, i32 27, i32 0, i32 29, i32 0, i32 31> |
| %out.bytevec = bitcast <16 x i32> %broadcast.of.zextinreg to <64 x i8> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec512_i32_widen_to_i128_factor4_broadcast_to_v4i128_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec512_i32_widen_to_i128_factor4_broadcast_to_v4i128_factor4: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: xorps %xmm1, %xmm1 |
| ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] |
| ; SSE2-NEXT: movdqa 16(%rdx), %xmm0 |
| ; SSE2-NEXT: paddb %xmm1, %xmm0 |
| ; SSE2-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE2-NEXT: paddb %xmm1, %xmm2 |
| ; SSE2-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE2-NEXT: paddb %xmm1, %xmm3 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE2-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE2-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec512_i32_widen_to_i128_factor4_broadcast_to_v4i128_factor4: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pxor %xmm1, %xmm1 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] |
| ; SSE42-NEXT: movdqa 16(%rdx), %xmm0 |
| ; SSE42-NEXT: paddb %xmm1, %xmm0 |
| ; SSE42-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE42-NEXT: paddb %xmm1, %xmm2 |
| ; SSE42-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE42-NEXT: paddb %xmm1, %xmm3 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm1 |
| ; SSE42-NEXT: movdqa %xmm1, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE42-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE42-NEXT: movdqa %xmm0, 16(%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec512_i32_widen_to_i128_factor4_broadcast_to_v4i128_factor4: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] |
| ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 |
| ; AVX-NEXT: vpaddb 48(%rdx), %xmm1, %xmm2 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm3 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm3, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 48(%rcx) |
| ; AVX-NEXT: vzeroupper |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec512_i32_widen_to_i128_factor4_broadcast_to_v4i128_factor4: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] |
| ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec512_i32_widen_to_i128_factor4_broadcast_to_v4i128_factor4: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,1,2,3,16,5,6,7,16,9,10,11,16,13,14,15] |
| ; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512F-NEXT: vpermt2d %zmm0, %zmm1, %zmm2 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec512_i32_widen_to_i128_factor4_broadcast_to_v4i128_factor4: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,1,2,3,16,5,6,7,16,9,10,11,16,13,14,15] |
| ; AVX512DQ-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512DQ-NEXT: vpermt2d %zmm0, %zmm1, %zmm2 |
| ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec512_i32_widen_to_i128_factor4_broadcast_to_v4i128_factor4: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,17,18,19,0,21,22,23,0,25,26,27,0,29,30,31] |
| ; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm2, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <16 x i32> |
| %broadcast.of.zextinreg = shufflevector <16 x i32> %in.vec.cast, <16 x i32> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 0, i32 21, i32 22, i32 23, i32 0, i32 25, i32 26, i32 27, i32 0, i32 29, i32 30, i32 31> |
| %out.bytevec = bitcast <16 x i32> %broadcast.of.zextinreg to <64 x i8> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec512_i32_widen_to_i256_factor8_broadcast_to_v2i256_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE2-LABEL: vec512_i32_widen_to_i256_factor8_broadcast_to_v2i256_factor2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE2-NEXT: paddb (%rsi), %xmm0 |
| ; SSE2-NEXT: xorps %xmm1, %xmm1 |
| ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] |
| ; SSE2-NEXT: movaps 16(%rdx), %xmm0 |
| ; SSE2-NEXT: movaps 48(%rdx), %xmm2 |
| ; SSE2-NEXT: movdqa (%rdx), %xmm3 |
| ; SSE2-NEXT: paddb %xmm1, %xmm3 |
| ; SSE2-NEXT: paddb 32(%rdx), %xmm1 |
| ; SSE2-NEXT: movaps %xmm2, 48(%rcx) |
| ; SSE2-NEXT: movaps %xmm0, 16(%rcx) |
| ; SSE2-NEXT: movdqa %xmm1, 32(%rcx) |
| ; SSE2-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE2-NEXT: retq |
| ; |
| ; SSE42-LABEL: vec512_i32_widen_to_i256_factor8_broadcast_to_v2i256_factor2: |
| ; SSE42: # %bb.0: |
| ; SSE42-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE42-NEXT: paddb (%rsi), %xmm0 |
| ; SSE42-NEXT: pxor %xmm1, %xmm1 |
| ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] |
| ; SSE42-NEXT: movaps 16(%rdx), %xmm0 |
| ; SSE42-NEXT: movaps 48(%rdx), %xmm2 |
| ; SSE42-NEXT: movdqa (%rdx), %xmm3 |
| ; SSE42-NEXT: paddb %xmm1, %xmm3 |
| ; SSE42-NEXT: paddb 32(%rdx), %xmm1 |
| ; SSE42-NEXT: movaps %xmm2, 48(%rcx) |
| ; SSE42-NEXT: movaps %xmm0, 16(%rcx) |
| ; SSE42-NEXT: movdqa %xmm1, 32(%rcx) |
| ; SSE42-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE42-NEXT: retq |
| ; |
| ; AVX-LABEL: vec512_i32_widen_to_i256_factor8_broadcast_to_v2i256_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7] |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm1 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovaps 16(%rdx), %xmm2 |
| ; AVX-NEXT: vmovaps 48(%rdx), %xmm3 |
| ; AVX-NEXT: vmovaps %xmm2, 16(%rcx) |
| ; AVX-NEXT: vmovaps %xmm3, 48(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 32(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec512_i32_widen_to_i256_factor8_broadcast_to_v2i256_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7] |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec512_i32_widen_to_i256_factor8_broadcast_to_v2i256_factor2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,1,2,3,4,5,6,7,16,9,10,11,12,13,14,15] |
| ; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512F-NEXT: vpermt2d %zmm0, %zmm1, %zmm2 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec512_i32_widen_to_i256_factor8_broadcast_to_v2i256_factor2: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,1,2,3,4,5,6,7,16,9,10,11,12,13,14,15] |
| ; AVX512DQ-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512DQ-NEXT: vpermt2d %zmm0, %zmm1, %zmm2 |
| ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec512_i32_widen_to_i256_factor8_broadcast_to_v2i256_factor2: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <16 x i32> |
| %broadcast.of.zextinreg = shufflevector <16 x i32> %in.vec.cast, <16 x i32> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 0, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31> |
| %out.bytevec = bitcast <16 x i32> %broadcast.of.zextinreg to <64 x i8> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec512_i64_widen_to_i128_factor2_broadcast_to_v4i128_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE-LABEL: vec512_i64_widen_to_i128_factor2_broadcast_to_v4i128_factor4: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE-NEXT: paddb (%rsi), %xmm0 |
| ; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero |
| ; SSE-NEXT: movdqa 16(%rdx), %xmm1 |
| ; SSE-NEXT: paddb %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa (%rdx), %xmm2 |
| ; SSE-NEXT: paddb %xmm0, %xmm2 |
| ; SSE-NEXT: movdqa 48(%rdx), %xmm3 |
| ; SSE-NEXT: paddb %xmm0, %xmm3 |
| ; SSE-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE-NEXT: movdqa %xmm3, 48(%rcx) |
| ; SSE-NEXT: movdqa %xmm2, (%rcx) |
| ; SSE-NEXT: movdqa %xmm1, 16(%rcx) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX-LABEL: vec512_i64_widen_to_i128_factor2_broadcast_to_v4i128_factor4: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] |
| ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 |
| ; AVX-NEXT: vpaddb 48(%rdx), %xmm1, %xmm2 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm3 |
| ; AVX-NEXT: vpaddb 16(%rdx), %xmm1, %xmm1 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 16(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm3, 32(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm2, 48(%rcx) |
| ; AVX-NEXT: vzeroupper |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec512_i64_widen_to_i128_factor2_broadcast_to_v4i128_factor4: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm1, %xmm1 |
| ; AVX2-NEXT: vpbroadcastq %xmm1, %ymm1 |
| ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7] |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec512_i64_widen_to_i128_factor2_broadcast_to_v4i128_factor4: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,9,0,11,0,13,0,15] |
| ; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec512_i64_widen_to_i128_factor2_broadcast_to_v4i128_factor4: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,9,0,11,0,13,0,15] |
| ; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec512_i64_widen_to_i128_factor2_broadcast_to_v4i128_factor4: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,9,0,11,0,13,0,15] |
| ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm2, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <8 x i64> |
| %broadcast.of.zextinreg = shufflevector <8 x i64> %in.vec.cast, <8 x i64> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 0, i32 11, i32 0, i32 13, i32 0, i32 15> |
| %out.bytevec = bitcast <8 x i64> %broadcast.of.zextinreg to <64 x i8> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec512_i64_widen_to_i256_factor4_broadcast_to_v2i256_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE-LABEL: vec512_i64_widen_to_i256_factor4_broadcast_to_v2i256_factor2: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE-NEXT: paddb (%rsi), %xmm0 |
| ; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero |
| ; SSE-NEXT: movaps 16(%rdx), %xmm1 |
| ; SSE-NEXT: movaps 48(%rdx), %xmm2 |
| ; SSE-NEXT: movdqa (%rdx), %xmm3 |
| ; SSE-NEXT: paddb %xmm0, %xmm3 |
| ; SSE-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE-NEXT: movaps %xmm2, 48(%rcx) |
| ; SSE-NEXT: movaps %xmm1, 16(%rcx) |
| ; SSE-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX-LABEL: vec512_i64_widen_to_i256_factor4_broadcast_to_v2i256_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm1 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovaps 16(%rdx), %xmm2 |
| ; AVX-NEXT: vmovaps 48(%rdx), %xmm3 |
| ; AVX-NEXT: vmovaps %xmm2, 16(%rcx) |
| ; AVX-NEXT: vmovaps %xmm3, 48(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 32(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec512_i64_widen_to_i256_factor4_broadcast_to_v2i256_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec512_i64_widen_to_i256_factor4_broadcast_to_v2i256_factor2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = [8,1,2,3,8,5,6,7] |
| ; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm1, %zmm2 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec512_i64_widen_to_i256_factor4_broadcast_to_v2i256_factor2: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [8,1,2,3,8,5,6,7] |
| ; AVX512DQ-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX512DQ-NEXT: vpermt2q %zmm0, %zmm1, %zmm2 |
| ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec512_i64_widen_to_i256_factor4_broadcast_to_v2i256_factor2: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <8 x i64> |
| %broadcast.of.zextinreg = shufflevector <8 x i64> %in.vec.cast, <8 x i64> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 0, i32 13, i32 14, i32 15> |
| %out.bytevec = bitcast <8 x i64> %broadcast.of.zextinreg to <64 x i8> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| |
| define void @vec512_i128_widen_to_i256_factor2_broadcast_to_v2i256_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { |
| ; SSE-LABEL: vec512_i128_widen_to_i256_factor2_broadcast_to_v2i256_factor2: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE-NEXT: paddb (%rsi), %xmm0 |
| ; SSE-NEXT: movaps 16(%rdx), %xmm1 |
| ; SSE-NEXT: movaps 48(%rdx), %xmm2 |
| ; SSE-NEXT: movdqa (%rdx), %xmm3 |
| ; SSE-NEXT: paddb %xmm0, %xmm3 |
| ; SSE-NEXT: paddb 32(%rdx), %xmm0 |
| ; SSE-NEXT: movaps %xmm2, 48(%rcx) |
| ; SSE-NEXT: movaps %xmm1, 16(%rcx) |
| ; SSE-NEXT: movdqa %xmm0, 32(%rcx) |
| ; SSE-NEXT: movdqa %xmm3, (%rcx) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX-LABEL: vec512_i128_widen_to_i256_factor2_broadcast_to_v2i256_factor2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm1 |
| ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0 |
| ; AVX-NEXT: vmovaps 16(%rdx), %xmm2 |
| ; AVX-NEXT: vmovaps 48(%rdx), %xmm3 |
| ; AVX-NEXT: vmovaps %xmm2, 16(%rcx) |
| ; AVX-NEXT: vmovaps %xmm3, 48(%rcx) |
| ; AVX-NEXT: vmovdqa %xmm0, (%rcx) |
| ; AVX-NEXT: vmovdqa %xmm1, 32(%rcx) |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: vec512_i128_widen_to_i256_factor2_broadcast_to_v2i256_factor2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm1 |
| ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0 |
| ; AVX2-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512F-LABEL: vec512_i128_widen_to_i256_factor2_broadcast_to_v2i256_factor2: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,10,11,0,1,14,15] |
| ; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512DQ-LABEL: vec512_i128_widen_to_i256_factor2_broadcast_to_v2i256_factor2: |
| ; AVX512DQ: # %bb.0: |
| ; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,10,11,0,1,14,15] |
| ; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm2, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0 |
| ; AVX512DQ-NEXT: vpaddb (%rdx), %ymm2, %ymm1 |
| ; AVX512DQ-NEXT: vmovdqa %ymm1, (%rcx) |
| ; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx) |
| ; AVX512DQ-NEXT: vzeroupper |
| ; AVX512DQ-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: vec512_i128_widen_to_i256_factor2_broadcast_to_v2i256_factor2: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0 |
| ; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64 |
| %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64 |
| %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias |
| %in.vec.cast = bitcast <64 x i8> %in.vec to <4 x i128> |
| %broadcast.of.zextinreg = shufflevector <4 x i128> %in.vec.cast, <4 x i128> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 0, i32 7> |
| %out.bytevec = bitcast <4 x i128> %broadcast.of.zextinreg to <64 x i8> |
| %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64 |
| %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias |
| store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64 |
| ret void |
| } |
| ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
| ; AVX1-ONLY: {{.*}} |
| ; FALLBACK0: {{.*}} |
| ; FALLBACK1: {{.*}} |
| ; FALLBACK10: {{.*}} |
| ; FALLBACK11: {{.*}} |
| ; FALLBACK12: {{.*}} |
| ; FALLBACK13: {{.*}} |
| ; FALLBACK2: {{.*}} |
| ; FALLBACK3: {{.*}} |
| ; FALLBACK4: {{.*}} |
| ; FALLBACK5: {{.*}} |
| ; FALLBACK6: {{.*}} |
| ; FALLBACK7: {{.*}} |
| ; FALLBACK8: {{.*}} |
| ; FALLBACK9: {{.*}} |