| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12 |
| |
| ; These patterns are produced by LoopVectorizer for interleaved loads. |
| |
| define void @load_i8_stride6_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind { |
| ; SSE-LABEL: load_i8_stride6_vf2: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] |
| ; SSE-NEXT: pand %xmm1, %xmm3 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm2, %xmm2 |
| ; SSE-NEXT: pxor %xmm4, %xmm4 |
| ; SSE-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm5, %xmm5 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,2,2,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm6, %xmm6 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15] |
| ; SSE-NEXT: movdqa %xmm0, %xmm4 |
| ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm4, %xmm4 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm3, %xmm3 |
| ; SSE-NEXT: psrlq $48, %xmm1 |
| ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movd %xmm2, %edi |
| ; SSE-NEXT: movw %di, (%rsi) |
| ; SSE-NEXT: movd %xmm5, %esi |
| ; SSE-NEXT: movw %si, (%rdx) |
| ; SSE-NEXT: movd %xmm6, %edx |
| ; SSE-NEXT: movw %dx, (%rcx) |
| ; SSE-NEXT: movd %xmm4, %ecx |
| ; SSE-NEXT: movw %cx, (%r8) |
| ; SSE-NEXT: movd %xmm3, %ecx |
| ; SSE-NEXT: movw %cx, (%r9) |
| ; SSE-NEXT: movd %xmm0, %ecx |
| ; SSE-NEXT: movw %cx, (%rax) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX-LABEL: load_i8_stride6_vf2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[3,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpextrw $0, %xmm1, (%rsi) |
| ; AVX-NEXT: vpextrw $0, %xmm2, (%rdx) |
| ; AVX-NEXT: vpextrw $0, %xmm3, (%rcx) |
| ; AVX-NEXT: vpextrw $0, %xmm4, (%r8) |
| ; AVX-NEXT: vpextrw $0, %xmm5, (%r9) |
| ; AVX-NEXT: vpextrw $0, %xmm0, (%rax) |
| ; AVX-NEXT: retq |
| %wide.vec = load <12 x i8>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <12 x i8> %wide.vec, <12 x i8> poison, <2 x i32> <i32 0, i32 6> |
| %strided.vec1 = shufflevector <12 x i8> %wide.vec, <12 x i8> poison, <2 x i32> <i32 1, i32 7> |
| %strided.vec2 = shufflevector <12 x i8> %wide.vec, <12 x i8> poison, <2 x i32> <i32 2, i32 8> |
| %strided.vec3 = shufflevector <12 x i8> %wide.vec, <12 x i8> poison, <2 x i32> <i32 3, i32 9> |
| %strided.vec4 = shufflevector <12 x i8> %wide.vec, <12 x i8> poison, <2 x i32> <i32 4, i32 10> |
| %strided.vec5 = shufflevector <12 x i8> %wide.vec, <12 x i8> poison, <2 x i32> <i32 5, i32 11> |
| store <2 x i8> %strided.vec0, ptr %out.vec0, align 64 |
| store <2 x i8> %strided.vec1, ptr %out.vec1, align 64 |
| store <2 x i8> %strided.vec2, ptr %out.vec2, align 64 |
| store <2 x i8> %strided.vec3, ptr %out.vec3, align 64 |
| store <2 x i8> %strided.vec4, ptr %out.vec4, align 64 |
| store <2 x i8> %strided.vec5, ptr %out.vec5, align 64 |
| ret void |
| } |
| |
| define void @load_i8_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind { |
| ; SSE-LABEL: load_i8_stride6_vf4: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movdqa (%rdi), %xmm3 |
| ; SSE-NEXT: movdqa 16(%rdi), %xmm0 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,65535,65535,65535,65535] |
| ; SSE-NEXT: movdqa %xmm3, %xmm2 |
| ; SSE-NEXT: pand %xmm1, %xmm2 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: por %xmm2, %xmm1 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] |
| ; SSE-NEXT: pand %xmm2, %xmm1 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE-NEXT: pxor %xmm4, %xmm4 |
| ; SSE-NEXT: movdqa %xmm3, %xmm6 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; SSE-NEXT: pandn %xmm0, %xmm5 |
| ; SSE-NEXT: movdqa %xmm0, %xmm7 |
| ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm3[0,0] |
| ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm3[2,3] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,0,65535,65535,0,65535,65535] |
| ; SSE-NEXT: pand %xmm8, %xmm0 |
| ; SSE-NEXT: pandn %xmm3, %xmm8 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15] |
| ; SSE-NEXT: movdqa %xmm3, %xmm9 |
| ; SSE-NEXT: psrld $16, %xmm9 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3],xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,7,6,7] |
| ; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm9[2],xmm6[3],xmm9[3] |
| ; SSE-NEXT: packuswb %xmm6, %xmm6 |
| ; SSE-NEXT: por %xmm6, %xmm5 |
| ; SSE-NEXT: movaps %xmm7, %xmm6 |
| ; SSE-NEXT: andps %xmm2, %xmm6 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,2,3,0,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm6, %xmm6 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3],xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7] |
| ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,2],xmm3[0,3] |
| ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3] |
| ; SSE-NEXT: packuswb %xmm7, %xmm7 |
| ; SSE-NEXT: por %xmm0, %xmm8 |
| ; SSE-NEXT: pand %xmm8, %xmm2 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[3,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7] |
| ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm8[0,0] |
| ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm8[2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,0,1,2,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm2, %xmm2 |
| ; SSE-NEXT: movd %xmm1, (%rsi) |
| ; SSE-NEXT: movd %xmm5, (%rdx) |
| ; SSE-NEXT: movd %xmm6, (%rcx) |
| ; SSE-NEXT: movd %xmm7, (%r8) |
| ; SSE-NEXT: movd %xmm0, (%r9) |
| ; SSE-NEXT: movd %xmm2, (%rax) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX-LABEL: load_i8_stride6_vf4: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm1[2,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,6,12],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm1[3,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,7,13],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpor %xmm3, %xmm4, %xmm3 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm1[4,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[2,8,14],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpor %xmm4, %xmm5, %xmm4 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm1[5,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[3,9,15],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpor %xmm5, %xmm6, %xmm5 |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3] |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; AVX-NEXT: vmovd %xmm2, (%rsi) |
| ; AVX-NEXT: vmovd %xmm3, (%rdx) |
| ; AVX-NEXT: vmovd %xmm4, (%rcx) |
| ; AVX-NEXT: vmovd %xmm5, (%r8) |
| ; AVX-NEXT: vmovd %xmm6, (%r9) |
| ; AVX-NEXT: vmovd %xmm0, (%rax) |
| ; AVX-NEXT: retq |
| %wide.vec = load <24 x i8>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <24 x i8> %wide.vec, <24 x i8> poison, <4 x i32> <i32 0, i32 6, i32 12, i32 18> |
| %strided.vec1 = shufflevector <24 x i8> %wide.vec, <24 x i8> poison, <4 x i32> <i32 1, i32 7, i32 13, i32 19> |
| %strided.vec2 = shufflevector <24 x i8> %wide.vec, <24 x i8> poison, <4 x i32> <i32 2, i32 8, i32 14, i32 20> |
| %strided.vec3 = shufflevector <24 x i8> %wide.vec, <24 x i8> poison, <4 x i32> <i32 3, i32 9, i32 15, i32 21> |
| %strided.vec4 = shufflevector <24 x i8> %wide.vec, <24 x i8> poison, <4 x i32> <i32 4, i32 10, i32 16, i32 22> |
| %strided.vec5 = shufflevector <24 x i8> %wide.vec, <24 x i8> poison, <4 x i32> <i32 5, i32 11, i32 17, i32 23> |
| store <4 x i8> %strided.vec0, ptr %out.vec0, align 64 |
| store <4 x i8> %strided.vec1, ptr %out.vec1, align 64 |
| store <4 x i8> %strided.vec2, ptr %out.vec2, align 64 |
| store <4 x i8> %strided.vec3, ptr %out.vec3, align 64 |
| store <4 x i8> %strided.vec4, ptr %out.vec4, align 64 |
| store <4 x i8> %strided.vec5, ptr %out.vec5, align 64 |
| ret void |
| } |
| |
| define void @load_i8_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind { |
| ; SSE-LABEL: load_i8_stride6_vf8: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movdqa (%rdi), %xmm4 |
| ; SSE-NEXT: movdqa 16(%rdi), %xmm3 |
| ; SSE-NEXT: movdqa 32(%rdi), %xmm0 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,0,65535,65535,0,65535,65535,0] |
| ; SSE-NEXT: movdqa %xmm4, %xmm1 |
| ; SSE-NEXT: pand %xmm8, %xmm1 |
| ; SSE-NEXT: pandn %xmm3, %xmm8 |
| ; SSE-NEXT: por %xmm1, %xmm8 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255] |
| ; SSE-NEXT: movdqa %xmm8, %xmm1 |
| ; SSE-NEXT: pand %xmm5, %xmm1 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm1[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: packuswb %xmm6, %xmm6 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,0,65535,65535,65535,65535] |
| ; SSE-NEXT: pand %xmm1, %xmm6 |
| ; SSE-NEXT: movdqa %xmm0, %xmm7 |
| ; SSE-NEXT: pand %xmm5, %xmm7 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[0,1,2,1] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm2[0,1,2,3,4,5,6,5] |
| ; SSE-NEXT: packuswb %xmm9, %xmm9 |
| ; SSE-NEXT: movdqa %xmm1, %xmm2 |
| ; SSE-NEXT: pandn %xmm9, %xmm2 |
| ; SSE-NEXT: por %xmm6, %xmm2 |
| ; SSE-NEXT: pxor %xmm6, %xmm6 |
| ; SSE-NEXT: movdqa %xmm8, %xmm9 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm6[8],xmm9[9],xmm6[9],xmm9[10],xmm6[10],xmm9[11],xmm6[11],xmm9[12],xmm6[12],xmm9[13],xmm6[13],xmm9[14],xmm6[14],xmm9[15],xmm6[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[2,1,0,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[1,1,1,1,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,5,7,6,7] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,0,65535,0,0,65535,65535] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3],xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[1,3,2,0,4,5,6,7] |
| ; SSE-NEXT: pand %xmm10, %xmm8 |
| ; SSE-NEXT: pandn %xmm9, %xmm10 |
| ; SSE-NEXT: por %xmm8, %xmm10 |
| ; SSE-NEXT: packuswb %xmm10, %xmm10 |
| ; SSE-NEXT: pand %xmm1, %xmm10 |
| ; SSE-NEXT: movdqa %xmm0, %xmm8 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm6[8],xmm8[9],xmm6[9],xmm8[10],xmm6[10],xmm8[11],xmm6[11],xmm8[12],xmm6[12],xmm8[13],xmm6[13],xmm8[14],xmm6[14],xmm8[15],xmm6[15] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,2,3,3] |
| ; SSE-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3] |
| ; SSE-NEXT: packuswb %xmm9, %xmm9 |
| ; SSE-NEXT: pandn %xmm9, %xmm1 |
| ; SSE-NEXT: por %xmm10, %xmm1 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,0,65535,65535,0,65535,65535] |
| ; SSE-NEXT: movdqa %xmm11, %xmm9 |
| ; SSE-NEXT: pandn %xmm3, %xmm9 |
| ; SSE-NEXT: movdqa %xmm4, %xmm12 |
| ; SSE-NEXT: pand %xmm11, %xmm12 |
| ; SSE-NEXT: por %xmm9, %xmm12 |
| ; SSE-NEXT: movdqa %xmm12, %xmm9 |
| ; SSE-NEXT: pand %xmm5, %xmm9 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,4,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[1,2,3,0,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm13 = xmm9[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: packuswb %xmm13, %xmm13 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255] |
| ; SSE-NEXT: pand %xmm9, %xmm13 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm7[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm10[0,1,2,3,4,4,5,6] |
| ; SSE-NEXT: packuswb %xmm14, %xmm14 |
| ; SSE-NEXT: movdqa %xmm9, %xmm10 |
| ; SSE-NEXT: pandn %xmm14, %xmm10 |
| ; SSE-NEXT: por %xmm13, %xmm10 |
| ; SSE-NEXT: movdqa %xmm12, %xmm13 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm6[0],xmm13[1],xmm6[1],xmm13[2],xmm6[2],xmm13[3],xmm6[3],xmm13[4],xmm6[4],xmm13[5],xmm6[5],xmm13[6],xmm6[6],xmm13[7],xmm6[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm13[2,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm13 = xmm13[3,1,2,1,4,5,6,7] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm14 = [0,65535,65535,0,65535,65535,65535,65535] |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm6[8],xmm12[9],xmm6[9],xmm12[10],xmm6[10],xmm12[11],xmm6[11],xmm12[12],xmm6[12],xmm12[13],xmm6[13],xmm12[14],xmm6[14],xmm12[15],xmm6[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[0,3,2,1] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm12[0,1,3,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,7,7,7,7] |
| ; SSE-NEXT: pand %xmm14, %xmm12 |
| ; SSE-NEXT: pandn %xmm13, %xmm14 |
| ; SSE-NEXT: por %xmm12, %xmm14 |
| ; SSE-NEXT: packuswb %xmm14, %xmm14 |
| ; SSE-NEXT: pand %xmm9, %xmm14 |
| ; SSE-NEXT: movdqa %xmm8, %xmm12 |
| ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[2,0],xmm0[3,0] |
| ; SSE-NEXT: movaps %xmm0, %xmm13 |
| ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,1],xmm12[0,2] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm13[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,1,0,2] |
| ; SSE-NEXT: packuswb %xmm13, %xmm13 |
| ; SSE-NEXT: movdqa %xmm9, %xmm12 |
| ; SSE-NEXT: pandn %xmm13, %xmm12 |
| ; SSE-NEXT: por %xmm14, %xmm12 |
| ; SSE-NEXT: pand %xmm11, %xmm3 |
| ; SSE-NEXT: pandn %xmm4, %xmm11 |
| ; SSE-NEXT: por %xmm3, %xmm11 |
| ; SSE-NEXT: pand %xmm11, %xmm5 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[3,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,0,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm3[2,1,0,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm4, %xmm4 |
| ; SSE-NEXT: pand %xmm9, %xmm4 |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm7[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,1,0,2] |
| ; SSE-NEXT: packuswb %xmm5, %xmm5 |
| ; SSE-NEXT: movdqa %xmm9, %xmm3 |
| ; SSE-NEXT: pandn %xmm5, %xmm3 |
| ; SSE-NEXT: por %xmm4, %xmm3 |
| ; SSE-NEXT: movdqa %xmm11, %xmm4 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,65535] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm6[0],xmm11[1],xmm6[1],xmm11[2],xmm6[2],xmm11[3],xmm6[3],xmm11[4],xmm6[4],xmm11[5],xmm6[5],xmm11[6],xmm6[6],xmm11[7],xmm6[7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm11[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,1,1,2,4,5,6,7] |
| ; SSE-NEXT: pand %xmm5, %xmm6 |
| ; SSE-NEXT: pandn %xmm4, %xmm5 |
| ; SSE-NEXT: por %xmm6, %xmm5 |
| ; SSE-NEXT: packuswb %xmm5, %xmm5 |
| ; SSE-NEXT: pand %xmm9, %xmm5 |
| ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm8[0,0] |
| ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm8[2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,7] |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: pandn %xmm0, %xmm9 |
| ; SSE-NEXT: por %xmm5, %xmm9 |
| ; SSE-NEXT: movq %xmm2, (%rsi) |
| ; SSE-NEXT: movq %xmm1, (%rdx) |
| ; SSE-NEXT: movq %xmm10, (%rcx) |
| ; SSE-NEXT: movq %xmm12, (%r8) |
| ; SSE-NEXT: movq %xmm3, (%r9) |
| ; SSE-NEXT: movq %xmm9, (%rax) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i8_stride6_vf8: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm2[2,8,14,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = xmm1[0,6,12],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm3, %xmm4, %xmm3 |
| ; AVX1-ONLY-NEXT: vpxor %xmm4, %xmm4, %xmm4 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[3],xmm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,xmm0[4,10,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm5, %xmm3, %xmm3 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm2[3,9,15,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[1,7,13],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm5, %xmm6, %xmm5 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3],xmm5[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,xmm0[5,11,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm5, %xmm4, %xmm4 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm2[4,10,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[2,8,14],zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm5, %xmm6, %xmm5 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm6 = [0,1,2,3,4,128,128,128,0,1,2,3,4,128,128,128] |
| ; AVX1-ONLY-NEXT: # xmm6 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm5, %xmm5 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,zero,zero,xmm0[0,6,12,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm7, %xmm5, %xmm5 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm2[5,11,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = xmm1[3,9,15],zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm7, %xmm8, %xmm7 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm7, %xmm7 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,zero,zero,xmm0[1,7,13,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm7, %xmm8, %xmm7 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = xmm1[4,10],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,xmm2[0,6,12,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm8, %xmm9, %xmm8 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm8, %xmm8 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,zero,zero,xmm0[2,8,14,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm9, %xmm8, %xmm8 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[5,11],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[1,7,13,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm1, %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[3,9,15,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm1, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovq %xmm3, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovq %xmm4, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovq %xmm5, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovq %xmm7, (%r8) |
| ; AVX1-ONLY-NEXT: vmovq %xmm8, (%r9) |
| ; AVX1-ONLY-NEXT: vmovq %xmm0, (%rax) |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-LABEL: load_i8_stride6_vf8: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15] |
| ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm3[2,8,14],zero,zero,xmm3[u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10,u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpor %xmm4, %xmm5, %xmm4 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,9,15],zero,zero,xmm3[u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11,u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2 |
| ; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15] |
| ; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm5 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[2,8,14],zero,zero,xmm3[0,6,12,u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpor %xmm6, %xmm7, %xmm6 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[3,9,15],zero,zero,xmm3[1,7,13,u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpor %xmm5, %xmm3, %xmm3 |
| ; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14],ymm1[15] |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14,u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpor %xmm5, %xmm7, %xmm5 |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,7,13],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15,u,u,u,u,u,u,u,u] |
| ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vmovq %xmm4, (%rsi) |
| ; AVX2-NEXT: vmovq %xmm2, (%rdx) |
| ; AVX2-NEXT: vmovq %xmm6, (%rcx) |
| ; AVX2-NEXT: vmovq %xmm3, (%r8) |
| ; AVX2-NEXT: vmovq %xmm5, (%r9) |
| ; AVX2-NEXT: vmovq %xmm0, (%rax) |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| %wide.vec = load <48 x i8>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <48 x i8> %wide.vec, <48 x i8> poison, <8 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42> |
| %strided.vec1 = shufflevector <48 x i8> %wide.vec, <48 x i8> poison, <8 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43> |
| %strided.vec2 = shufflevector <48 x i8> %wide.vec, <48 x i8> poison, <8 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44> |
| %strided.vec3 = shufflevector <48 x i8> %wide.vec, <48 x i8> poison, <8 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45> |
| %strided.vec4 = shufflevector <48 x i8> %wide.vec, <48 x i8> poison, <8 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46> |
| %strided.vec5 = shufflevector <48 x i8> %wide.vec, <48 x i8> poison, <8 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47> |
| store <8 x i8> %strided.vec0, ptr %out.vec0, align 64 |
| store <8 x i8> %strided.vec1, ptr %out.vec1, align 64 |
| store <8 x i8> %strided.vec2, ptr %out.vec2, align 64 |
| store <8 x i8> %strided.vec3, ptr %out.vec3, align 64 |
| store <8 x i8> %strided.vec4, ptr %out.vec4, align 64 |
| store <8 x i8> %strided.vec5, ptr %out.vec5, align 64 |
| ret void |
| } |
| |
| define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind { |
| ; SSE-LABEL: load_i8_stride6_vf16: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movdqa 64(%rdi), %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa (%rdi), %xmm5 |
| ; SSE-NEXT: movdqa 16(%rdi), %xmm11 |
| ; SSE-NEXT: movdqa 32(%rdi), %xmm10 |
| ; SSE-NEXT: movdqa 48(%rdi), %xmm6 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,0,65535,65535,0,65535,65535] |
| ; SSE-NEXT: movdqa %xmm3, %xmm7 |
| ; SSE-NEXT: pandn %xmm10, %xmm7 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,0,65535,65535,0,65535,65535,0] |
| ; SSE-NEXT: movdqa %xmm8, %xmm0 |
| ; SSE-NEXT: pandn %xmm6, %xmm0 |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm3, %xmm0 |
| ; SSE-NEXT: pandn %xmm6, %xmm0 |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm3, %xmm6 |
| ; SSE-NEXT: por %xmm7, %xmm6 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255] |
| ; SSE-NEXT: movdqa %xmm6, %xmm0 |
| ; SSE-NEXT: pand %xmm7, %xmm0 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5] |
| ; SSE-NEXT: packuswb %xmm2, %xmm0 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,0,0,0,65535,65535] |
| ; SSE-NEXT: pandn %xmm0, %xmm4 |
| ; SSE-NEXT: movdqa %xmm8, %xmm9 |
| ; SSE-NEXT: movdqa %xmm11, %xmm0 |
| ; SSE-NEXT: pandn %xmm11, %xmm9 |
| ; SSE-NEXT: movdqa %xmm1, %xmm11 |
| ; SSE-NEXT: pand %xmm3, %xmm11 |
| ; SSE-NEXT: movdqa %xmm3, %xmm2 |
| ; SSE-NEXT: pandn %xmm0, %xmm2 |
| ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa %xmm5, %xmm14 |
| ; SSE-NEXT: pand %xmm3, %xmm14 |
| ; SSE-NEXT: movdqa 80(%rdi), %xmm0 |
| ; SSE-NEXT: movdqa %xmm0, %xmm13 |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm3, %xmm13 |
| ; SSE-NEXT: movdqa %xmm10, %xmm15 |
| ; SSE-NEXT: pand %xmm3, %xmm10 |
| ; SSE-NEXT: pand %xmm3, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm3, %xmm1 |
| ; SSE-NEXT: movdqa %xmm3, %xmm12 |
| ; SSE-NEXT: pandn %xmm5, %xmm3 |
| ; SSE-NEXT: pand %xmm8, %xmm5 |
| ; SSE-NEXT: por %xmm9, %xmm5 |
| ; SSE-NEXT: movdqa %xmm5, %xmm9 |
| ; SSE-NEXT: pand %xmm7, %xmm9 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,2,1,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[0,3,2,1,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: packuswb %xmm9, %xmm9 |
| ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm9 |
| ; SSE-NEXT: por %xmm4, %xmm9 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: por %xmm1, %xmm11 |
| ; SSE-NEXT: movdqa %xmm11, %xmm1 |
| ; SSE-NEXT: pand %xmm7, %xmm1 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,2,0] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5] |
| ; SSE-NEXT: packuswb %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0] |
| ; SSE-NEXT: movdqa %xmm4, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: pand %xmm4, %xmm9 |
| ; SSE-NEXT: por %xmm9, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pxor %xmm9, %xmm9 |
| ; SSE-NEXT: movdqa %xmm6, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm9[8],xmm0[9],xmm9[9],xmm0[10],xmm9[10],xmm0[11],xmm9[11],xmm0[12],xmm9[12],xmm0[13],xmm9[13],xmm0[14],xmm9[14],xmm0[15],xmm9[15] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3],xmm6[4],xmm9[4],xmm6[5],xmm9[5],xmm6[6],xmm9[6],xmm6[7],xmm9[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[2,2,3,3] |
| ; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] |
| ; SSE-NEXT: psrld $16, %xmm0 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,7,6,7] |
| ; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3] |
| ; SSE-NEXT: packuswb %xmm6, %xmm1 |
| ; SSE-NEXT: movdqa %xmm5, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm9[8],xmm0[9],xmm9[9],xmm0[10],xmm9[10],xmm0[11],xmm9[11],xmm0[12],xmm9[12],xmm0[13],xmm9[13],xmm0[14],xmm9[14],xmm0[15],xmm9[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,7,6,7] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3],xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,3,2,0,4,5,6,7] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,0,65535,0,0,65535,65535] |
| ; SSE-NEXT: pand %xmm6, %xmm5 |
| ; SSE-NEXT: pandn %xmm0, %xmm6 |
| ; SSE-NEXT: por %xmm5, %xmm6 |
| ; SSE-NEXT: packuswb %xmm6, %xmm6 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,65535,0,0,0,65535,65535] |
| ; SSE-NEXT: pand %xmm8, %xmm6 |
| ; SSE-NEXT: pandn %xmm1, %xmm8 |
| ; SSE-NEXT: por %xmm8, %xmm6 |
| ; SSE-NEXT: movdqa %xmm11, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm9[8],xmm11[9],xmm9[9],xmm11[10],xmm9[10],xmm11[11],xmm9[11],xmm11[12],xmm9[12],xmm11[13],xmm9[13],xmm11[14],xmm9[14],xmm11[15],xmm9[15] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm11[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,7,6,4] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,0,65535,65535,0,65535] |
| ; SSE-NEXT: pand %xmm5, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm5 |
| ; SSE-NEXT: por %xmm1, %xmm5 |
| ; SSE-NEXT: packuswb %xmm5, %xmm0 |
| ; SSE-NEXT: movdqa %xmm4, %xmm11 |
| ; SSE-NEXT: pandn %xmm0, %xmm11 |
| ; SSE-NEXT: pand %xmm4, %xmm6 |
| ; SSE-NEXT: por %xmm6, %xmm11 |
| ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm15 |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm15, %xmm0 |
| ; SSE-NEXT: pand %xmm7, %xmm0 |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,3,4,5,6,7] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6] |
| ; SSE-NEXT: packuswb %xmm1, %xmm0 |
| ; SSE-NEXT: por %xmm2, %xmm14 |
| ; SSE-NEXT: movdqa %xmm14, %xmm1 |
| ; SSE-NEXT: pand %xmm7, %xmm1 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255] |
| ; SSE-NEXT: movdqa %xmm2, %xmm5 |
| ; SSE-NEXT: pandn %xmm1, %xmm5 |
| ; SSE-NEXT: pand %xmm2, %xmm0 |
| ; SSE-NEXT: por %xmm0, %xmm5 |
| ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload |
| ; SSE-NEXT: por %xmm12, %xmm13 |
| ; SSE-NEXT: movdqa %xmm13, %xmm0 |
| ; SSE-NEXT: pand %xmm7, %xmm0 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4] |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa %xmm4, %xmm8 |
| ; SSE-NEXT: pandn %xmm0, %xmm8 |
| ; SSE-NEXT: pand %xmm4, %xmm5 |
| ; SSE-NEXT: por %xmm5, %xmm8 |
| ; SSE-NEXT: movdqa %xmm15, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7] |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm9[8],xmm15[9],xmm9[9],xmm15[10],xmm9[10],xmm15[11],xmm9[11],xmm15[12],xmm9[12],xmm15[13],xmm9[13],xmm15[14],xmm9[14],xmm15[15],xmm9[15] |
| ; SSE-NEXT: movdqa %xmm15, %xmm1 |
| ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0] |
| ; SSE-NEXT: movaps %xmm0, %xmm5 |
| ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm1[0,2] |
| ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm15[0,0] |
| ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm15[2,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm5[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa %xmm14, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7] |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm9[8],xmm14[9],xmm9[9],xmm14[10],xmm9[10],xmm14[11],xmm9[11],xmm14[12],xmm9[12],xmm14[13],xmm9[13],xmm14[14],xmm9[14],xmm14[15],xmm9[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm14[0,3,2,1] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,3,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,7,7,7] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [0,65535,65535,0,65535,65535,65535,65535] |
| ; SSE-NEXT: pand %xmm6, %xmm5 |
| ; SSE-NEXT: pandn %xmm0, %xmm6 |
| ; SSE-NEXT: por %xmm5, %xmm6 |
| ; SSE-NEXT: pand %xmm2, %xmm1 |
| ; SSE-NEXT: packuswb %xmm6, %xmm6 |
| ; SSE-NEXT: pandn %xmm6, %xmm2 |
| ; SSE-NEXT: por %xmm1, %xmm2 |
| ; SSE-NEXT: movdqa %xmm13, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm9[8],xmm0[9],xmm9[9],xmm0[10],xmm9[10],xmm0[11],xmm9[11],xmm0[12],xmm9[12],xmm0[13],xmm9[13],xmm0[14],xmm9[14],xmm0[15],xmm9[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1],xmm13[2],xmm9[2],xmm13[3],xmm9[3],xmm13[4],xmm9[4],xmm13[5],xmm9[5],xmm13[6],xmm9[6],xmm13[7],xmm9[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[0,2,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,7,7] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535,0,65535,65535,0] |
| ; SSE-NEXT: pand %xmm5, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm5 |
| ; SSE-NEXT: por %xmm1, %xmm5 |
| ; SSE-NEXT: pand %xmm4, %xmm2 |
| ; SSE-NEXT: packuswb %xmm5, %xmm0 |
| ; SSE-NEXT: pandn %xmm0, %xmm4 |
| ; SSE-NEXT: por %xmm2, %xmm4 |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm10, %xmm0 |
| ; SSE-NEXT: pand %xmm7, %xmm0 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,2] |
| ; SSE-NEXT: packuswb %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255] |
| ; SSE-NEXT: movdqa %xmm2, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm3, %xmm0 |
| ; SSE-NEXT: pand %xmm7, %xmm0 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[2,1,0,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm5, %xmm5 |
| ; SSE-NEXT: pand %xmm2, %xmm5 |
| ; SSE-NEXT: por %xmm1, %xmm5 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,0,65535,65535,0,65535,65535,0] |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm12, %xmm1 |
| ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,0,0,0] |
| ; SSE-NEXT: pand %xmm0, %xmm5 |
| ; SSE-NEXT: por %xmm1, %xmm12 |
| ; SSE-NEXT: pand %xmm12, %xmm7 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,1,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7] |
| ; SSE-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE-NEXT: movdqa %xmm0, %xmm7 |
| ; SSE-NEXT: pandn %xmm1, %xmm7 |
| ; SSE-NEXT: por %xmm5, %xmm7 |
| ; SSE-NEXT: movdqa %xmm10, %xmm1 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7] |
| ; SSE-NEXT: movdqa %xmm10, %xmm5 |
| ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[1,0],xmm1[0,0] |
| ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[2,0],xmm1[2,3] |
| ; SSE-NEXT: psrlq $48, %xmm1 |
| ; SSE-NEXT: psrldq {{.*#+}} xmm5 = xmm5[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm10[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,7] |
| ; SSE-NEXT: packuswb %xmm5, %xmm1 |
| ; SSE-NEXT: movdqa %xmm3, %xmm5 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm9[8],xmm5[9],xmm9[9],xmm5[10],xmm9[10],xmm5[11],xmm9[11],xmm5[12],xmm9[12],xmm5[13],xmm9[13],xmm5[14],xmm9[14],xmm5[15],xmm9[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,0,65535,65535,0,65535,65535,65535] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,1,2,4,5,6,7] |
| ; SSE-NEXT: pand %xmm6, %xmm3 |
| ; SSE-NEXT: pandn %xmm5, %xmm6 |
| ; SSE-NEXT: por %xmm3, %xmm6 |
| ; SSE-NEXT: packuswb %xmm6, %xmm6 |
| ; SSE-NEXT: pand %xmm2, %xmm6 |
| ; SSE-NEXT: pandn %xmm1, %xmm2 |
| ; SSE-NEXT: por %xmm2, %xmm6 |
| ; SSE-NEXT: movdqa %xmm12, %xmm1 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3],xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,1,1] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm9[8],xmm12[9],xmm9[9],xmm12[10],xmm9[10],xmm12[11],xmm9[11],xmm12[12],xmm9[12],xmm12[13],xmm9[13],xmm12[14],xmm9[14],xmm12[15],xmm9[15] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,0,65535,0,0] |
| ; SSE-NEXT: pand %xmm2, %xmm1 |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm12[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,7,4] |
| ; SSE-NEXT: pandn %xmm3, %xmm2 |
| ; SSE-NEXT: por %xmm1, %xmm2 |
| ; SSE-NEXT: pand %xmm0, %xmm6 |
| ; SSE-NEXT: packuswb %xmm2, %xmm1 |
| ; SSE-NEXT: pandn %xmm1, %xmm0 |
| ; SSE-NEXT: por %xmm6, %xmm0 |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, (%rsi) |
| ; SSE-NEXT: movdqa %xmm11, (%rdx) |
| ; SSE-NEXT: movdqa %xmm8, (%rcx) |
| ; SSE-NEXT: movdqa %xmm4, (%r8) |
| ; SSE-NEXT: movdqa %xmm7, (%r9) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movdqa %xmm0, (%rax) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i8_stride6_vf16: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[u,u,4,10,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = xmm3[u,u,u,u,0,6,12,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm2[2,8,14,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,6,12],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm5, %xmm6, %xmm5 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm5[0,1,2],xmm4[3,4,5],xmm5[6,7] |
| ; AVX1-ONLY-NEXT: vmovdqa 80(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm7 = xmm4[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm4[4,10] |
| ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = xmm5[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero |
| ; AVX1-ONLY-NEXT: vpor %xmm7, %xmm8, %xmm7 |
| ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0] |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm9, %xmm6, %xmm7, %xmm6 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[u,u,5,11,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = xmm3[u,u,u,u,1,7,13,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm2[3,9,15,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = xmm1[1,7,13],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm8, %xmm10, %xmm8 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm8[0,1,2],xmm7[3,4,5],xmm8[6,7] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = xmm4[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm4[5,11] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = xmm5[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero |
| ; AVX1-ONLY-NEXT: vpor %xmm8, %xmm10, %xmm8 |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm9, %xmm7, %xmm8, %xmm7 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = xmm3[2,8,14,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = xmm0[u,u,u,u,u,0,6,12,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm10[0],xmm8[0] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm2[4,10,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm11 = xmm1[2,8,14],zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm10, %xmm11, %xmm10 |
| ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm11 = <0,0,0,0,0,255,255,255,255,255,255,u,u,u,u,u> |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm11, %xmm8, %xmm10, %xmm8 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = xmm5[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm12 = xmm4[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm4[0,6,12] |
| ; AVX1-ONLY-NEXT: vpor %xmm10, %xmm12, %xmm10 |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm9, %xmm8, %xmm10, %xmm8 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = xmm3[3,9,15,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,1,7,13,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm10 = xmm12[0],xmm10[0] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm2[5,11,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm13 = xmm1[3,9,15],zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm12, %xmm13, %xmm12 |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm11, %xmm10, %xmm12, %xmm10 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm11 = xmm5[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm12 = xmm4[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm4[1,7,13] |
| ; AVX1-ONLY-NEXT: vpor %xmm11, %xmm12, %xmm11 |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm9, %xmm10, %xmm11, %xmm9 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = xmm1[4,10],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,xmm2[0,6,12,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm10, %xmm11, %xmm10 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm11 = xmm3[u,u,u,u,u,u,u,u,4,10,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,2,8,14] |
| ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm11 = xmm12[1],xmm11[1] |
| ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm12 = <255,255,255,255,255,0,0,0,0,0,u,u,u,u,u,u> |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm12, %xmm10, %xmm11, %xmm10 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm11 = xmm4[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm4[2,8,14] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm13 = xmm5[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero |
| ; AVX1-ONLY-NEXT: vpor %xmm11, %xmm13, %xmm11 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm11[5,6,7] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[5,11],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[1,7,13,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,u,u,u,5,11,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,3,9,15] |
| ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm12, %xmm1, %xmm0, %xmm0 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = xmm4[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm4[3,9,15] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm5[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero |
| ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7] |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm6, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm7, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm8, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm9, (%r8) |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm10, (%r9) |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%rax) |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i8_stride6_vf16: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm0 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255> |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm0, %ymm3, %ymm4, %ymm5 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = xmm5[0,6,12],zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm5, %xmm6 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm6[2,8,14],zero,zero,xmm6[0,6,12,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpor %xmm0, %xmm1, %xmm2 |
| ; AVX2-ONLY-NEXT: vmovdqa 80(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[4,10] |
| ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = xmm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero |
| ; AVX2-ONLY-NEXT: vpor %xmm7, %xmm8, %xmm7 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0] |
| ; AVX2-ONLY-NEXT: vpblendvb %xmm8, %xmm2, %xmm7, %xmm2 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[1,7,13],zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[3,9,15],zero,zero,xmm6[1,7,13,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpor %xmm5, %xmm6, %xmm5 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[5,11] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero |
| ; AVX2-ONLY-NEXT: vpor %xmm6, %xmm7, %xmm6 |
| ; AVX2-ONLY-NEXT: vpblendvb %xmm8, %xmm5, %xmm6, %xmm5 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm6 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255> |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm6, %ymm4, %ymm3, %ymm6 |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm6, %xmm7 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[2,8,14,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = xmm6[2,8,14],zero,zero,xmm6[0,6,12],zero,zero,zero,xmm6[u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpor %xmm9, %xmm10, %xmm9 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = xmm1[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm0[0,6,12] |
| ; AVX2-ONLY-NEXT: vpor %xmm10, %xmm11, %xmm10 |
| ; AVX2-ONLY-NEXT: vpblendvb %xmm8, %xmm9, %xmm10, %xmm9 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[3,9,15,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[3,9,15],zero,zero,xmm6[1,7,13],zero,zero,zero,xmm6[u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpor %xmm7, %xmm6, %xmm6 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm0[1,7,13] |
| ; AVX2-ONLY-NEXT: vpor %xmm7, %xmm10, %xmm7 |
| ; AVX2-ONLY-NEXT: vpblendvb %xmm8, %xmm6, %xmm7, %xmm6 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm7 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u> |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm7, %ymm4, %ymm3, %ymm3 |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm3, %xmm4 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[4,10,u,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = xmm3[4,10],zero,zero,zero,xmm3[2,8,14],zero,zero,xmm3[u,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpor %xmm7, %xmm8, %xmm7 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[2,8,14] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = xmm1[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero |
| ; AVX2-ONLY-NEXT: vpor %xmm8, %xmm10, %xmm8 |
| ; AVX2-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4],xmm8[5,6,7] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[5,11,u,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[5,11],zero,zero,zero,xmm3[3,9,15],zero,zero,xmm3[u,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpor %xmm4, %xmm3, %xmm3 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[3,9,15] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero |
| ; AVX2-ONLY-NEXT: vpor %xmm0, %xmm1, %xmm0 |
| ; AVX2-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3,4],xmm0[5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm2, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm5, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm9, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm6, (%r8) |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm7, (%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, (%rax) |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512F-LABEL: load_i8_stride6_vf16: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535] |
| ; AVX512F-NEXT: vmovdqa (%rdi), %ymm3 |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm4 |
| ; AVX512F-NEXT: vmovdqa %ymm0, %ymm5 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm4, %ymm3, %ymm5 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm1 = xmm5[0,6,12],zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u] |
| ; AVX512F-NEXT: vextracti128 $1, %ymm5, %xmm6 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm6[2,8,14],zero,zero,xmm6[0,6,12,u,u,u,u,u] |
| ; AVX512F-NEXT: vpor %xmm1, %xmm2, %xmm7 |
| ; AVX512F-NEXT: vmovdqa 80(%rdi), %xmm2 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm8 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[4,10] |
| ; AVX512F-NEXT: vmovdqa 64(%rdi), %xmm1 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero |
| ; AVX512F-NEXT: vpor %xmm8, %xmm9, %xmm8 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0] |
| ; AVX512F-NEXT: vpternlogq $184, %xmm7, %xmm9, %xmm8 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[1,7,13],zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[3,9,15],zero,zero,xmm6[1,7,13,u,u,u,u,u] |
| ; AVX512F-NEXT: vpor %xmm5, %xmm6, %xmm5 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm6 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[5,11] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero |
| ; AVX512F-NEXT: vpor %xmm6, %xmm7, %xmm6 |
| ; AVX512F-NEXT: vpternlogq $184, %xmm5, %xmm9, %xmm6 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm5 = xmm1[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm7 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm2[0,6,12] |
| ; AVX512F-NEXT: vpor %xmm5, %xmm7, %xmm5 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535] |
| ; AVX512F-NEXT: vpternlogq $202, %ymm3, %ymm4, %ymm7 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm7, %xmm10 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm10[4,10],zero,zero,zero,xmm10[2,8,14,u,u,u,u,u] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm12 = xmm7[2,8,14],zero,zero,xmm7[0,6,12],zero,zero,zero,xmm7[u,u,u,u,u] |
| ; AVX512F-NEXT: vpor %xmm11, %xmm12, %xmm11 |
| ; AVX512F-NEXT: vpternlogq $226, %xmm5, %xmm9, %xmm11 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm5 = xmm1[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm12 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm2[1,7,13] |
| ; AVX512F-NEXT: vpor %xmm5, %xmm12, %xmm5 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[5,11],zero,zero,zero,xmm10[3,9,15,u,u,u,u,u] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[3,9,15],zero,zero,xmm7[1,7,13],zero,zero,zero,xmm7[u,u,u,u,u] |
| ; AVX512F-NEXT: vpor %xmm7, %xmm10, %xmm7 |
| ; AVX512F-NEXT: vpternlogq $226, %xmm5, %xmm9, %xmm7 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm3, %ymm4, %ymm0 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm3 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[2,8,14] |
| ; AVX512F-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero |
| ; AVX512F-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm9[5,6,7] |
| ; AVX512F-NEXT: vpor %xmm4, %xmm5, %xmm4 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[3,9,15] |
| ; AVX512F-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6,7] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero |
| ; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7] |
| ; AVX512F-NEXT: vpor %xmm2, %xmm0, %xmm0 |
| ; AVX512F-NEXT: vmovdqa %xmm8, (%rsi) |
| ; AVX512F-NEXT: vmovdqa %xmm6, (%rdx) |
| ; AVX512F-NEXT: vmovdqa %xmm11, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %xmm7, (%r8) |
| ; AVX512F-NEXT: vmovdqa %xmm4, (%r9) |
| ; AVX512F-NEXT: vmovdqa %xmm0, (%rax) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: load_i8_stride6_vf16: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm1 |
| ; AVX512BW-NEXT: movw $18724, %r10w # imm = 0x4924 |
| ; AVX512BW-NEXT: kmovd %r10d, %k1 |
| ; AVX512BW-NEXT: vpblendmw %ymm1, %ymm0, %ymm2 {%k1} |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[u,u,u,u,u] |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm2, %xmm4 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm4[2,8,14],zero,zero,xmm4[0,6,12,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpor %xmm3, %xmm5, %xmm3 |
| ; AVX512BW-NEXT: vmovdqa 80(%rdi), %xmm5 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[4,10] |
| ; AVX512BW-NEXT: vmovdqa 64(%rdi), %xmm7 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero |
| ; AVX512BW-NEXT: vpor %xmm6, %xmm8, %xmm6 |
| ; AVX512BW-NEXT: movw $-2048, %di # imm = 0xF800 |
| ; AVX512BW-NEXT: kmovd %edi, %k2 |
| ; AVX512BW-NEXT: vmovdqu8 %xmm6, %xmm3 {%k2} |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11],zero,zero,zero,xmm2[u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[3,9,15],zero,zero,xmm4[1,7,13,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpor %xmm2, %xmm4, %xmm2 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[5,11] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero |
| ; AVX512BW-NEXT: vpor %xmm4, %xmm6, %xmm4 |
| ; AVX512BW-NEXT: vmovdqu8 %xmm4, %xmm2 {%k2} |
| ; AVX512BW-NEXT: movw $9362, %di # imm = 0x2492 |
| ; AVX512BW-NEXT: kmovd %edi, %k3 |
| ; AVX512BW-NEXT: vpblendmw %ymm0, %ymm1, %ymm4 {%k3} |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm4, %xmm6 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm4[2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[u,u,u,u,u] |
| ; AVX512BW-NEXT: vpor %xmm8, %xmm9, %xmm8 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm7[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[0,6,12] |
| ; AVX512BW-NEXT: vpor %xmm9, %xmm10, %xmm9 |
| ; AVX512BW-NEXT: vmovdqu8 %xmm9, %xmm8 {%k2} |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[5,11],zero,zero,zero,xmm6[3,9,15,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[3,9,15],zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[u,u,u,u,u] |
| ; AVX512BW-NEXT: vpor %xmm6, %xmm4, %xmm4 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,7,13] |
| ; AVX512BW-NEXT: vpor %xmm6, %xmm9, %xmm6 |
| ; AVX512BW-NEXT: vmovdqu8 %xmm6, %xmm4 {%k2} |
| ; AVX512BW-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1} |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm0 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm0[0,6,12],zero,zero,zero,xmm0[4,10,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[2,8,14] |
| ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm9[5,6,7] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[4,10],zero,zero,zero,xmm1[2,8,14],zero,zero,xmm1[u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm7[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero |
| ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm10[5,6,7] |
| ; AVX512BW-NEXT: vpor %xmm6, %xmm9, %xmm6 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[1,7,13],zero,zero,zero,xmm0[5,11,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[3,9,15] |
| ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm5[5,6,7] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[5,11],zero,zero,zero,xmm1[3,9,15],zero,zero,xmm1[u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm5 = xmm7[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero |
| ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7] |
| ; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0 |
| ; AVX512BW-NEXT: vmovdqa %xmm3, (%rsi) |
| ; AVX512BW-NEXT: vmovdqa %xmm2, (%rdx) |
| ; AVX512BW-NEXT: vmovdqa %xmm8, (%rcx) |
| ; AVX512BW-NEXT: vmovdqa %xmm4, (%r8) |
| ; AVX512BW-NEXT: vmovdqa %xmm6, (%r9) |
| ; AVX512BW-NEXT: vmovdqa %xmm0, (%rax) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %wide.vec = load <96 x i8>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <96 x i8> %wide.vec, <96 x i8> poison, <16 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42, i32 48, i32 54, i32 60, i32 66, i32 72, i32 78, i32 84, i32 90> |
| %strided.vec1 = shufflevector <96 x i8> %wide.vec, <96 x i8> poison, <16 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43, i32 49, i32 55, i32 61, i32 67, i32 73, i32 79, i32 85, i32 91> |
| %strided.vec2 = shufflevector <96 x i8> %wide.vec, <96 x i8> poison, <16 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44, i32 50, i32 56, i32 62, i32 68, i32 74, i32 80, i32 86, i32 92> |
| %strided.vec3 = shufflevector <96 x i8> %wide.vec, <96 x i8> poison, <16 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45, i32 51, i32 57, i32 63, i32 69, i32 75, i32 81, i32 87, i32 93> |
| %strided.vec4 = shufflevector <96 x i8> %wide.vec, <96 x i8> poison, <16 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46, i32 52, i32 58, i32 64, i32 70, i32 76, i32 82, i32 88, i32 94> |
| %strided.vec5 = shufflevector <96 x i8> %wide.vec, <96 x i8> poison, <16 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47, i32 53, i32 59, i32 65, i32 71, i32 77, i32 83, i32 89, i32 95> |
| store <16 x i8> %strided.vec0, ptr %out.vec0, align 64 |
| store <16 x i8> %strided.vec1, ptr %out.vec1, align 64 |
| store <16 x i8> %strided.vec2, ptr %out.vec2, align 64 |
| store <16 x i8> %strided.vec3, ptr %out.vec3, align 64 |
| store <16 x i8> %strided.vec4, ptr %out.vec4, align 64 |
| store <16 x i8> %strided.vec5, ptr %out.vec5, align 64 |
| ret void |
| } |
| |
| define void @load_i8_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind { |
| ; SSE-LABEL: load_i8_stride6_vf32: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: subq $280, %rsp # imm = 0x118 |
| ; SSE-NEXT: movdqa 64(%rdi), %xmm8 |
| ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa 80(%rdi), %xmm6 |
| ; SSE-NEXT: movdqa (%rdi), %xmm15 |
| ; SSE-NEXT: movdqa 16(%rdi), %xmm13 |
| ; SSE-NEXT: movdqa 32(%rdi), %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa 48(%rdi), %xmm7 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,0,65535,65535,0,65535,65535] |
| ; SSE-NEXT: movdqa %xmm5, %xmm0 |
| ; SSE-NEXT: pandn %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,0,65535,65535,0,65535,65535,0] |
| ; SSE-NEXT: movdqa %xmm10, %xmm1 |
| ; SSE-NEXT: pandn %xmm7, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm5, %xmm1 |
| ; SSE-NEXT: pandn %xmm7, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm7, %xmm14 |
| ; SSE-NEXT: pand %xmm5, %xmm14 |
| ; SSE-NEXT: por %xmm0, %xmm14 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255] |
| ; SSE-NEXT: movdqa %xmm14, %xmm0 |
| ; SSE-NEXT: pand %xmm11, %xmm0 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5] |
| ; SSE-NEXT: packuswb %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm9 = [65535,65535,65535,0,0,0,65535,65535] |
| ; SSE-NEXT: movdqa %xmm10, %xmm1 |
| ; SSE-NEXT: pandn %xmm13, %xmm1 |
| ; SSE-NEXT: movdqa %xmm15, %xmm12 |
| ; SSE-NEXT: pand %xmm10, %xmm12 |
| ; SSE-NEXT: por %xmm1, %xmm12 |
| ; SSE-NEXT: movdqa %xmm12, %xmm1 |
| ; SSE-NEXT: pand %xmm11, %xmm1 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE-NEXT: pand %xmm9, %xmm1 |
| ; SSE-NEXT: movdqa %xmm9, %xmm2 |
| ; SSE-NEXT: pandn %xmm0, %xmm2 |
| ; SSE-NEXT: por %xmm2, %xmm1 |
| ; SSE-NEXT: movdqa %xmm5, %xmm0 |
| ; SSE-NEXT: pandn %xmm6, %xmm0 |
| ; SSE-NEXT: pand %xmm5, %xmm8 |
| ; SSE-NEXT: por %xmm0, %xmm8 |
| ; SSE-NEXT: movdqa %xmm8, %xmm0 |
| ; SSE-NEXT: pand %xmm11, %xmm0 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5] |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0] |
| ; SSE-NEXT: movdqa %xmm3, %xmm2 |
| ; SSE-NEXT: pandn %xmm0, %xmm2 |
| ; SSE-NEXT: pand %xmm3, %xmm1 |
| ; SSE-NEXT: por %xmm1, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa 128(%rdi), %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm5, %xmm0 |
| ; SSE-NEXT: pandn %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa 144(%rdi), %xmm3 |
| ; SSE-NEXT: movdqa %xmm10, %xmm2 |
| ; SSE-NEXT: pandn %xmm3, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm5, %xmm2 |
| ; SSE-NEXT: pandn %xmm3, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm5, %xmm3 |
| ; SSE-NEXT: por %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, %xmm0 |
| ; SSE-NEXT: pand %xmm11, %xmm0 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5] |
| ; SSE-NEXT: packuswb %xmm2, %xmm0 |
| ; SSE-NEXT: pandn %xmm0, %xmm9 |
| ; SSE-NEXT: movdqa %xmm5, %xmm1 |
| ; SSE-NEXT: movdqa %xmm5, %xmm0 |
| ; SSE-NEXT: pandn %xmm15, %xmm0 |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa 112(%rdi), %xmm0 |
| ; SSE-NEXT: movdqa %xmm10, %xmm7 |
| ; SSE-NEXT: pandn %xmm0, %xmm7 |
| ; SSE-NEXT: movdqa 160(%rdi), %xmm4 |
| ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm5, %xmm4 |
| ; SSE-NEXT: movdqa %xmm13, %xmm2 |
| ; SSE-NEXT: pandn %xmm13, %xmm5 |
| ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm1, %xmm15 |
| ; SSE-NEXT: movdqa %xmm10, %xmm5 |
| ; SSE-NEXT: pandn %xmm6, %xmm5 |
| ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm1, %xmm6 |
| ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm1, %xmm5 |
| ; SSE-NEXT: pandn %xmm0, %xmm5 |
| ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm0, %xmm5 |
| ; SSE-NEXT: movdqa 96(%rdi), %xmm0 |
| ; SSE-NEXT: movdqa %xmm0, %xmm6 |
| ; SSE-NEXT: pand %xmm1, %xmm6 |
| ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa 176(%rdi), %xmm13 |
| ; SSE-NEXT: movdqa %xmm13, %xmm10 |
| ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm1, %xmm10 |
| ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm10, %xmm6 |
| ; SSE-NEXT: pand %xmm1, %xmm10 |
| ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm1, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm2, %xmm10 |
| ; SSE-NEXT: pand %xmm1, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm1, %xmm5 |
| ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm1, %xmm2 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE-NEXT: por %xmm7, %xmm0 |
| ; SSE-NEXT: movdqa %xmm0, %xmm5 |
| ; SSE-NEXT: pand %xmm11, %xmm5 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,1,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,3,2,1,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: packuswb %xmm5, %xmm5 |
| ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5 |
| ; SSE-NEXT: por %xmm9, %xmm5 |
| ; SSE-NEXT: pandn %xmm13, %xmm2 |
| ; SSE-NEXT: por %xmm2, %xmm4 |
| ; SSE-NEXT: movdqa %xmm4, %xmm2 |
| ; SSE-NEXT: pand %xmm11, %xmm2 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,1,2,0] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,5] |
| ; SSE-NEXT: packuswb %xmm2, %xmm2 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0] |
| ; SSE-NEXT: movdqa %xmm9, %xmm1 |
| ; SSE-NEXT: pandn %xmm2, %xmm1 |
| ; SSE-NEXT: pand %xmm9, %xmm5 |
| ; SSE-NEXT: por %xmm5, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pxor %xmm2, %xmm2 |
| ; SSE-NEXT: movdqa %xmm14, %xmm5 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm2[0],xmm14[1],xmm2[1],xmm14[2],xmm2[2],xmm14[3],xmm2[3],xmm14[4],xmm2[4],xmm14[5],xmm2[5],xmm14[6],xmm2[6],xmm14[7],xmm2[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm14[2,2,3,3] |
| ; SSE-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3] |
| ; SSE-NEXT: psrld $16, %xmm5 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm14[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,5,7,6,7] |
| ; SSE-NEXT: punpckhdq {{.*#+}} xmm14 = xmm14[2],xmm5[2],xmm14[3],xmm5[3] |
| ; SSE-NEXT: packuswb %xmm14, %xmm7 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm13 = [65535,65535,65535,0,0,0,65535,65535] |
| ; SSE-NEXT: movdqa %xmm13, %xmm5 |
| ; SSE-NEXT: pandn %xmm7, %xmm5 |
| ; SSE-NEXT: movdqa %xmm12, %xmm7 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm2[8],xmm7[9],xmm2[9],xmm7[10],xmm2[10],xmm7[11],xmm2[11],xmm7[12],xmm2[12],xmm7[13],xmm2[13],xmm7[14],xmm2[14],xmm7[15],xmm2[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,1,0,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,1,1,1,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,7,6,7] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm14 = [65535,65535,0,65535,0,0,65535,65535] |
| ; SSE-NEXT: movdqa %xmm14, %xmm1 |
| ; SSE-NEXT: pandn %xmm7, %xmm1 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm2[0],xmm12[1],xmm2[1],xmm12[2],xmm2[2],xmm12[3],xmm2[3],xmm12[4],xmm2[4],xmm12[5],xmm2[5],xmm12[6],xmm2[6],xmm12[7],xmm2[7] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm12[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,3,2,0,4,5,6,7] |
| ; SSE-NEXT: pand %xmm14, %xmm7 |
| ; SSE-NEXT: por %xmm1, %xmm7 |
| ; SSE-NEXT: packuswb %xmm7, %xmm7 |
| ; SSE-NEXT: pand %xmm13, %xmm7 |
| ; SSE-NEXT: por %xmm5, %xmm7 |
| ; SSE-NEXT: movdqa %xmm8, %xmm1 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,0,65535,65535,0,65535] |
| ; SSE-NEXT: movdqa %xmm5, %xmm12 |
| ; SSE-NEXT: pandn %xmm1, %xmm12 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm2[8],xmm8[9],xmm2[9],xmm8[10],xmm2[10],xmm8[11],xmm2[11],xmm8[12],xmm2[12],xmm8[13],xmm2[13],xmm8[14],xmm2[14],xmm8[15],xmm2[15] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm8[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,7,6,4] |
| ; SSE-NEXT: pand %xmm5, %xmm1 |
| ; SSE-NEXT: por %xmm12, %xmm1 |
| ; SSE-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE-NEXT: movdqa %xmm9, %xmm8 |
| ; SSE-NEXT: pandn %xmm1, %xmm8 |
| ; SSE-NEXT: pand %xmm9, %xmm7 |
| ; SSE-NEXT: por %xmm7, %xmm8 |
| ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm3, %xmm1 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[2,2,3,3] |
| ; SSE-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3] |
| ; SSE-NEXT: psrld $16, %xmm1 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,7,6,7] |
| ; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm1[2],xmm3[3],xmm1[3] |
| ; SSE-NEXT: packuswb %xmm3, %xmm7 |
| ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,0,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,1,1,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,7,6,7] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,0,4,5,6,7] |
| ; SSE-NEXT: pand %xmm14, %xmm0 |
| ; SSE-NEXT: pandn %xmm1, %xmm14 |
| ; SSE-NEXT: por %xmm0, %xmm14 |
| ; SSE-NEXT: packuswb %xmm14, %xmm14 |
| ; SSE-NEXT: movdqa %xmm13, %xmm0 |
| ; SSE-NEXT: pand %xmm13, %xmm14 |
| ; SSE-NEXT: pandn %xmm7, %xmm0 |
| ; SSE-NEXT: por %xmm0, %xmm14 |
| ; SSE-NEXT: movdqa %xmm4, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm4[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,7,6,4] |
| ; SSE-NEXT: pand %xmm5, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm5 |
| ; SSE-NEXT: por %xmm1, %xmm5 |
| ; SSE-NEXT: packuswb %xmm5, %xmm0 |
| ; SSE-NEXT: movdqa %xmm9, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: pand %xmm9, %xmm14 |
| ; SSE-NEXT: por %xmm14, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,0,65535,65535,0,65535,65535,0] |
| ; SSE-NEXT: pand %xmm4, %xmm6 |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm6, %xmm0 |
| ; SSE-NEXT: pand %xmm11, %xmm0 |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,3,4,5,6,7] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6] |
| ; SSE-NEXT: packuswb %xmm1, %xmm0 |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm15, %xmm1 |
| ; SSE-NEXT: pand %xmm11, %xmm1 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm13 = [0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255] |
| ; SSE-NEXT: movdqa %xmm13, %xmm3 |
| ; SSE-NEXT: pandn %xmm1, %xmm3 |
| ; SSE-NEXT: pand %xmm13, %xmm0 |
| ; SSE-NEXT: por %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload |
| ; SSE-NEXT: por %xmm0, %xmm12 |
| ; SSE-NEXT: movdqa %xmm12, %xmm0 |
| ; SSE-NEXT: pand %xmm11, %xmm0 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4] |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa %xmm9, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: pand %xmm9, %xmm3 |
| ; SSE-NEXT: por %xmm3, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm4, %xmm10 |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm10, %xmm0 |
| ; SSE-NEXT: pand %xmm11, %xmm0 |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,3,4,5,6,7] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6] |
| ; SSE-NEXT: packuswb %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm7, %xmm1 |
| ; SSE-NEXT: pand %xmm11, %xmm1 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE-NEXT: movdqa %xmm13, %xmm3 |
| ; SSE-NEXT: pandn %xmm1, %xmm3 |
| ; SSE-NEXT: pand %xmm13, %xmm0 |
| ; SSE-NEXT: por %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; SSE-NEXT: por %xmm0, %xmm8 |
| ; SSE-NEXT: movdqa %xmm8, %xmm0 |
| ; SSE-NEXT: pand %xmm11, %xmm0 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4] |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa %xmm9, %xmm14 |
| ; SSE-NEXT: pandn %xmm0, %xmm14 |
| ; SSE-NEXT: pand %xmm9, %xmm3 |
| ; SSE-NEXT: por %xmm3, %xmm14 |
| ; SSE-NEXT: movdqa %xmm6, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm6, %xmm1 |
| ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0] |
| ; SSE-NEXT: movaps %xmm0, %xmm3 |
| ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[0,2] |
| ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm6[0,0] |
| ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm6[2,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa %xmm15, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,65535,65535,0,65535,65535,65535,65535] |
| ; SSE-NEXT: movdqa %xmm3, %xmm4 |
| ; SSE-NEXT: pandn %xmm0, %xmm4 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm2[8],xmm15[9],xmm2[9],xmm15[10],xmm2[10],xmm15[11],xmm2[11],xmm15[12],xmm2[12],xmm15[13],xmm2[13],xmm15[14],xmm2[14],xmm15[15],xmm2[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[0,3,2,1] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7] |
| ; SSE-NEXT: pand %xmm3, %xmm0 |
| ; SSE-NEXT: por %xmm4, %xmm0 |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa %xmm13, %xmm4 |
| ; SSE-NEXT: pandn %xmm0, %xmm4 |
| ; SSE-NEXT: pand %xmm13, %xmm1 |
| ; SSE-NEXT: por %xmm1, %xmm4 |
| ; SSE-NEXT: movdqa %xmm12, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535,0,65535,65535,0] |
| ; SSE-NEXT: movdqa %xmm5, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm2[0],xmm12[1],xmm2[1],xmm12[2],xmm2[2],xmm12[3],xmm2[3],xmm12[4],xmm2[4],xmm12[5],xmm2[5],xmm12[6],xmm2[6],xmm12[7],xmm2[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[0,2,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7] |
| ; SSE-NEXT: pand %xmm5, %xmm0 |
| ; SSE-NEXT: por %xmm1, %xmm0 |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa %xmm9, %xmm12 |
| ; SSE-NEXT: pandn %xmm0, %xmm12 |
| ; SSE-NEXT: pand %xmm9, %xmm4 |
| ; SSE-NEXT: por %xmm4, %xmm12 |
| ; SSE-NEXT: movdqa %xmm10, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm2[8],xmm10[9],xmm2[9],xmm10[10],xmm2[10],xmm10[11],xmm2[11],xmm10[12],xmm2[12],xmm10[13],xmm2[13],xmm10[14],xmm2[14],xmm10[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm10, %xmm4 |
| ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm0[3,0] |
| ; SSE-NEXT: movaps %xmm0, %xmm6 |
| ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm4[0,2] |
| ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm10[0,0] |
| ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm10[2,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm6[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,2] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm0, %xmm4 |
| ; SSE-NEXT: movdqa %xmm7, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7] |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm2[8],xmm7[9],xmm2[9],xmm7[10],xmm2[10],xmm7[11],xmm2[11],xmm7[12],xmm2[12],xmm7[13],xmm2[13],xmm7[14],xmm2[14],xmm7[15],xmm2[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm7[0,3,2,1] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,3,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7] |
| ; SSE-NEXT: pand %xmm3, %xmm6 |
| ; SSE-NEXT: pandn %xmm0, %xmm3 |
| ; SSE-NEXT: por %xmm6, %xmm3 |
| ; SSE-NEXT: pand %xmm13, %xmm4 |
| ; SSE-NEXT: packuswb %xmm3, %xmm3 |
| ; SSE-NEXT: pandn %xmm3, %xmm13 |
| ; SSE-NEXT: por %xmm4, %xmm13 |
| ; SSE-NEXT: movdqa %xmm8, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3],xmm8[4],xmm2[4],xmm8[5],xmm2[5],xmm8[6],xmm2[6],xmm8[7],xmm2[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[0,2,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7] |
| ; SSE-NEXT: pand %xmm5, %xmm3 |
| ; SSE-NEXT: pandn %xmm0, %xmm5 |
| ; SSE-NEXT: por %xmm3, %xmm5 |
| ; SSE-NEXT: pand %xmm9, %xmm13 |
| ; SSE-NEXT: packuswb %xmm5, %xmm0 |
| ; SSE-NEXT: pandn %xmm0, %xmm9 |
| ; SSE-NEXT: por %xmm13, %xmm9 |
| ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm8, %xmm0 |
| ; SSE-NEXT: pand %xmm11, %xmm0 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,2] |
| ; SSE-NEXT: packuswb %xmm3, %xmm0 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255] |
| ; SSE-NEXT: movdqa %xmm3, %xmm4 |
| ; SSE-NEXT: pandn %xmm0, %xmm4 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm10, %xmm0 |
| ; SSE-NEXT: pand %xmm11, %xmm0 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[2,1,0,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm5, %xmm5 |
| ; SSE-NEXT: pand %xmm3, %xmm5 |
| ; SSE-NEXT: por %xmm4, %xmm5 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; SSE-NEXT: movdqa {{.*#+}} xmm13 = [65535,0,65535,65535,0,65535,65535,0] |
| ; SSE-NEXT: pand %xmm13, %xmm15 |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm15, %xmm0 |
| ; SSE-NEXT: pand %xmm11, %xmm0 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7] |
| ; SSE-NEXT: packuswb %xmm0, %xmm4 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,0,0,0] |
| ; SSE-NEXT: movdqa %xmm0, %xmm6 |
| ; SSE-NEXT: pandn %xmm4, %xmm6 |
| ; SSE-NEXT: pand %xmm0, %xmm5 |
| ; SSE-NEXT: por %xmm5, %xmm6 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm11, %xmm4 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,2] |
| ; SSE-NEXT: packuswb %xmm5, %xmm4 |
| ; SSE-NEXT: movdqa %xmm3, %xmm5 |
| ; SSE-NEXT: pandn %xmm4, %xmm5 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm1, %xmm4 |
| ; SSE-NEXT: pand %xmm11, %xmm4 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[3,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,1,0,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm4[2,1,0,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm7, %xmm7 |
| ; SSE-NEXT: pand %xmm3, %xmm7 |
| ; SSE-NEXT: por %xmm5, %xmm7 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm13, %xmm4 |
| ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload |
| ; SSE-NEXT: por %xmm4, %xmm13 |
| ; SSE-NEXT: pand %xmm13, %xmm11 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm11[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,2,1,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,0,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,5,4,7] |
| ; SSE-NEXT: packuswb %xmm4, %xmm5 |
| ; SSE-NEXT: movdqa %xmm0, %xmm4 |
| ; SSE-NEXT: pandn %xmm5, %xmm4 |
| ; SSE-NEXT: pand %xmm0, %xmm7 |
| ; SSE-NEXT: por %xmm7, %xmm4 |
| ; SSE-NEXT: movdqa %xmm8, %xmm5 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3],xmm8[4],xmm2[4],xmm8[5],xmm2[5],xmm8[6],xmm2[6],xmm8[7],xmm2[7] |
| ; SSE-NEXT: movdqa %xmm8, %xmm7 |
| ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,0],xmm5[0,0] |
| ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm5[2,3] |
| ; SSE-NEXT: psrlq $48, %xmm5 |
| ; SSE-NEXT: psrldq {{.*#+}} xmm7 = xmm7[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; SSE-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm8[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,5,7] |
| ; SSE-NEXT: packuswb %xmm7, %xmm5 |
| ; SSE-NEXT: movdqa %xmm3, %xmm7 |
| ; SSE-NEXT: pandn %xmm5, %xmm7 |
| ; SSE-NEXT: movdqa %xmm10, %xmm5 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm5[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,65535] |
| ; SSE-NEXT: movdqa %xmm5, %xmm9 |
| ; SSE-NEXT: pandn %xmm8, %xmm9 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm2[0],xmm10[1],xmm2[1],xmm10[2],xmm2[2],xmm10[3],xmm2[3],xmm10[4],xmm2[4],xmm10[5],xmm2[5],xmm10[6],xmm2[6],xmm10[7],xmm2[7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm10[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm8[3,1,1,2,4,5,6,7] |
| ; SSE-NEXT: pand %xmm5, %xmm10 |
| ; SSE-NEXT: por %xmm9, %xmm10 |
| ; SSE-NEXT: packuswb %xmm10, %xmm10 |
| ; SSE-NEXT: pand %xmm3, %xmm10 |
| ; SSE-NEXT: por %xmm7, %xmm10 |
| ; SSE-NEXT: movdqa %xmm15, %xmm8 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm2[8],xmm15[9],xmm2[9],xmm15[10],xmm2[10],xmm15[11],xmm2[11],xmm15[12],xmm2[12],xmm15[13],xmm2[13],xmm15[14],xmm2[14],xmm15[15],xmm2[15] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm15[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm7[0,1,2,3,5,5,7,4] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,65535,65535,65535,0,65535,0,0] |
| ; SSE-NEXT: movdqa %xmm7, %xmm11 |
| ; SSE-NEXT: pandn %xmm9, %xmm11 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3],xmm8[4],xmm2[4],xmm8[5],xmm2[5],xmm8[6],xmm2[6],xmm8[7],xmm2[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,1,1] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: pand %xmm7, %xmm8 |
| ; SSE-NEXT: por %xmm8, %xmm11 |
| ; SSE-NEXT: packuswb %xmm11, %xmm9 |
| ; SSE-NEXT: movdqa %xmm0, %xmm8 |
| ; SSE-NEXT: pandn %xmm9, %xmm8 |
| ; SSE-NEXT: pand %xmm0, %xmm10 |
| ; SSE-NEXT: por %xmm10, %xmm8 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm11, %xmm9 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm2[8],xmm9[9],xmm2[9],xmm9[10],xmm2[10],xmm9[11],xmm2[11],xmm9[12],xmm2[12],xmm9[13],xmm2[13],xmm9[14],xmm2[14],xmm9[15],xmm2[15] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm2[0],xmm11[1],xmm2[1],xmm11[2],xmm2[2],xmm11[3],xmm2[3],xmm11[4],xmm2[4],xmm11[5],xmm2[5],xmm11[6],xmm2[6],xmm11[7],xmm2[7] |
| ; SSE-NEXT: movdqa %xmm11, %xmm10 |
| ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,0],xmm9[0,0] |
| ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm9[2,3] |
| ; SSE-NEXT: psrlq $48, %xmm9 |
| ; SSE-NEXT: psrldq {{.*#+}} xmm10 = xmm10[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; SSE-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm11[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,4,5,7] |
| ; SSE-NEXT: packuswb %xmm10, %xmm9 |
| ; SSE-NEXT: movdqa %xmm1, %xmm10 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm2[8],xmm10[9],xmm2[9],xmm10[10],xmm2[10],xmm10[11],xmm2[11],xmm10[12],xmm2[12],xmm10[13],xmm2[13],xmm10[14],xmm2[14],xmm10[15],xmm2[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,2,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm1[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm11[3,1,1,2,4,5,6,7] |
| ; SSE-NEXT: pand %xmm5, %xmm11 |
| ; SSE-NEXT: pandn %xmm10, %xmm5 |
| ; SSE-NEXT: por %xmm11, %xmm5 |
| ; SSE-NEXT: packuswb %xmm5, %xmm5 |
| ; SSE-NEXT: pand %xmm3, %xmm5 |
| ; SSE-NEXT: pandn %xmm9, %xmm3 |
| ; SSE-NEXT: por %xmm3, %xmm5 |
| ; SSE-NEXT: movdqa %xmm13, %xmm3 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm2[8],xmm13[9],xmm2[9],xmm13[10],xmm2[10],xmm13[11],xmm2[11],xmm13[12],xmm2[12],xmm13[13],xmm2[13],xmm13[14],xmm2[14],xmm13[15],xmm2[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,3,1,1] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: pand %xmm7, %xmm2 |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm13[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,7,4] |
| ; SSE-NEXT: pandn %xmm3, %xmm7 |
| ; SSE-NEXT: por %xmm2, %xmm7 |
| ; SSE-NEXT: pand %xmm0, %xmm5 |
| ; SSE-NEXT: packuswb %xmm7, %xmm2 |
| ; SSE-NEXT: pandn %xmm2, %xmm0 |
| ; SSE-NEXT: por %xmm5, %xmm0 |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, 16(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, (%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 16(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, (%rdx) |
| ; SSE-NEXT: movdqa %xmm14, 16(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, (%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 16(%r8) |
| ; SSE-NEXT: movdqa %xmm12, (%r8) |
| ; SSE-NEXT: movdqa %xmm4, 16(%r9) |
| ; SSE-NEXT: movdqa %xmm6, (%r9) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movdqa %xmm0, 16(%rax) |
| ; SSE-NEXT: movdqa %xmm8, (%rax) |
| ; SSE-NEXT: addq $280, %rsp # imm = 0x118 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i8_stride6_vf32: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: subq $168, %rsp |
| ; AVX1-ONLY-NEXT: vmovdqa 128(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = xmm4[u,u,4,10,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vmovdqa 144(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = xmm5[u,u,u,u,0,6,12,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovdqa 176(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = xmm6[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm6[4,10] |
| ; AVX1-ONLY-NEXT: vmovdqa 160(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm7[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero |
| ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm2, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,255,255,255,255,255,0,0,0,0,0> |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = xmm4[u,u,5,11,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm5[u,u,u,u,1,7,13,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm6[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm6[5,11] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = xmm7[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero |
| ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm3, %xmm2 |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm15 |
| ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm14 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm14[2,8,14,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm1 = [0,0,6,12,0,0,6,12,0,0,6,12,0,0,6,12] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm15, %xmm3 |
| ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm8[4,10,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = xmm9[2,8,14],zero,zero,xmm9[u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm10, %xmm10 |
| ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm2 = <0,0,0,0,0,255,255,255,255,255,255,u,u,u,u,u> |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm2, %xmm3, %xmm10, %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm3 = [3,9,15,0,3,9,15,0,3,9,15,0,3,9,15,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm14, %xmm10 |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm11 = [0,1,7,13,0,1,7,13,0,1,7,13,0,1,7,13] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm15, %xmm12 |
| ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm10 = xmm12[0],xmm10[0] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm8[5,11,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm13 = xmm9[3,9,15],zero,zero,xmm9[u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm12, %xmm13, %xmm12 |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm2, %xmm10, %xmm12, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = xmm5[2,8,14,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm4, %xmm1 |
| ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = xmm7[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm6[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm6[0,6,12] |
| ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,255,255,255,255,255,255,0,0,0,0,0> |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm5, %xmm0 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm4, %xmm1 |
| ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = xmm7[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = xmm6[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm6[1,7,13] |
| ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm3, %xmm1 |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = xmm9[4,10],zero,zero,zero,xmm9[u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm8[0,6,12,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm1, %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = xmm14[u,u,u,u,u,u,u,u,4,10,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm15[u,u,u,u,u,u,u,u,u,u,u,u,u,2,8,14] |
| ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm0 = <255,255,255,255,255,0,0,0,0,0,u,u,u,u,u,u> |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = xmm9[5,11],zero,zero,zero,xmm9[u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm8[1,7,13,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm14[u,u,u,u,u,u,u,u,5,11,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = xmm15[u,u,u,u,u,u,u,u,u,u,u,u,u,3,9,15] |
| ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm3[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm10 = [128,128,128,2,8,14,0,0,128,128,128,2,8,14,0,0] |
| ; AVX1-ONLY-NEXT: # xmm10 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovdqa 112(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm3, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm6 = [0,6,12,128,128,128,0,0,0,6,12,128,128,128,0,0] |
| ; AVX1-ONLY-NEXT: # xmm6 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovdqa 96(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm0, %xmm2 |
| ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm2, %xmm5 |
| ; AVX1-ONLY-NEXT: vmovdqa 80(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[4,10] |
| ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm13 = xmm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero |
| ; AVX1-ONLY-NEXT: vpor %xmm4, %xmm13, %xmm4 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm4 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = xmm15[u,u,4,10,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm13 = xmm14[u,u,u,u,0,6,12,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm13[0],xmm5[0],xmm13[1],xmm5[1] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm8, %xmm10 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm9, %xmm6 |
| ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm10, %xmm6 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3,4,5],xmm6[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm6 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255] |
| ; AVX1-ONLY-NEXT: vandnps %ymm4, %ymm6, %ymm4 |
| ; AVX1-ONLY-NEXT: vandps %ymm6, %ymm5, %ymm5 |
| ; AVX1-ONLY-NEXT: vorps %ymm4, %ymm5, %ymm4 |
| ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm5 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0] |
| ; AVX1-ONLY-NEXT: vandps %ymm5, %ymm4, %ymm4 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm10 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandnps %ymm10, %ymm5, %ymm10 |
| ; AVX1-ONLY-NEXT: vorps %ymm4, %ymm10, %ymm4 |
| ; AVX1-ONLY-NEXT: vmovups %ymm4, (%rsp) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm7 = [128,128,128,3,9,15,0,0,128,128,128,3,9,15,0,0] |
| ; AVX1-ONLY-NEXT: # xmm7 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm3, %xmm13 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm4 = [1,7,13,128,128,128,0,0,1,7,13,128,128,128,0,0] |
| ; AVX1-ONLY-NEXT: # xmm4 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm0, %xmm12 |
| ; AVX1-ONLY-NEXT: vpor %xmm13, %xmm12, %xmm12 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm13 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[5,11] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm11 = xmm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero |
| ; AVX1-ONLY-NEXT: vpor %xmm13, %xmm11, %xmm11 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm11, %ymm11 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm12 = xmm15[u,u,5,11,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm13 = xmm14[u,u,u,u,1,7,13,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm8, %xmm7 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm9, %xmm4 |
| ; AVX1-ONLY-NEXT: vpor %xmm7, %xmm4, %xmm4 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2],xmm12[3,4,5],xmm4[6,7] |
| ; AVX1-ONLY-NEXT: vandnps %ymm11, %ymm6, %ymm7 |
| ; AVX1-ONLY-NEXT: vandps %ymm6, %ymm4, %ymm4 |
| ; AVX1-ONLY-NEXT: vorps %ymm7, %ymm4, %ymm4 |
| ; AVX1-ONLY-NEXT: vandps %ymm5, %ymm4, %ymm4 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandnps %ymm6, %ymm5, %ymm5 |
| ; AVX1-ONLY-NEXT: vorps %ymm5, %ymm4, %ymm4 |
| ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm3[4,10,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm5, %xmm6, %xmm5 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm7 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm2[0,6,12] |
| ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm7, %xmm6 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5 |
| ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm6 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255] |
| ; AVX1-ONLY-NEXT: vandnps %ymm5, %ymm6, %ymm5 |
| ; AVX1-ONLY-NEXT: vandps {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vorps %ymm5, %ymm7, %ymm7 |
| ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm10 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0] |
| ; AVX1-ONLY-NEXT: vandps %ymm7, %ymm10, %ymm7 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandnps %ymm8, %ymm10, %ymm8 |
| ; AVX1-ONLY-NEXT: vorps %ymm7, %ymm8, %ymm13 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm3[5,11,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm7, %xmm8, %xmm7 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = xmm1[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm9 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm2[1,7,13] |
| ; AVX1-ONLY-NEXT: vpor %xmm8, %xmm9, %xmm8 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm8, %ymm7 |
| ; AVX1-ONLY-NEXT: vandnps %ymm7, %ymm6, %ymm7 |
| ; AVX1-ONLY-NEXT: vandps {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vorps %ymm7, %ymm6, %ymm6 |
| ; AVX1-ONLY-NEXT: vandps %ymm6, %ymm10, %ymm6 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandnps %ymm7, %ymm10, %ymm7 |
| ; AVX1-ONLY-NEXT: vorps %ymm7, %ymm6, %ymm6 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[4,10],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm3[0,6,12,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm7, %xmm8, %xmm7 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm8 = [0,0,128,128,128,2,8,14,0,0,128,128,128,2,8,14] |
| ; AVX1-ONLY-NEXT: # xmm8 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm8, %xmm2, %xmm9 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm11 = [0,0,0,6,12,128,128,128,0,0,0,6,12,128,128,128] |
| ; AVX1-ONLY-NEXT: # xmm11 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm1, %xmm12 |
| ; AVX1-ONLY-NEXT: vpor %xmm9, %xmm12, %xmm9 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm9, %ymm7 |
| ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm9 = [0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535] |
| ; AVX1-ONLY-NEXT: vandnps {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm12 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandps %ymm7, %ymm9, %ymm7 |
| ; AVX1-ONLY-NEXT: vorps %ymm7, %ymm12, %ymm7 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm8, %xmm5, %xmm8 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm4, %xmm11 |
| ; AVX1-ONLY-NEXT: vpor %xmm8, %xmm11, %xmm8 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm11 = xmm15[u,u,u,u,u,u,u,u,4,10,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm12 = xmm14[u,u,u,u,u,u,u,u,u,u,u,u,u,2,8,14] |
| ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm11 = xmm12[1],xmm11[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm11[0,1,2,3,4],xmm8[5,6,7] |
| ; AVX1-ONLY-NEXT: vandps %ymm7, %ymm10, %ymm7 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8 |
| ; AVX1-ONLY-NEXT: vandnps %ymm8, %ymm10, %ymm8 |
| ; AVX1-ONLY-NEXT: vorps %ymm7, %ymm8, %ymm7 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[1,7,13,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm3, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm3 = [0,0,128,128,128,3,9,15,0,0,128,128,128,3,9,15] |
| ; AVX1-ONLY-NEXT: # xmm3 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm2, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm8 = [0,0,1,7,13,128,128,128,0,0,1,7,13,128,128,128] |
| ; AVX1-ONLY-NEXT: # xmm8 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm8, %xmm1, %xmm1 |
| ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm1, %xmm1 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 |
| ; AVX1-ONLY-NEXT: vandnps {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm1 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandps %ymm0, %ymm9, %ymm0 |
| ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm5, %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm8, %xmm4, %xmm2 |
| ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm15[u,u,u,u,u,u,u,u,5,11,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = xmm14[u,u,u,u,u,u,u,u,u,u,u,u,u,3,9,15] |
| ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm3[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm1[5,6,7] |
| ; AVX1-ONLY-NEXT: vandps %ymm0, %ymm10, %ymm0 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX1-ONLY-NEXT: vandnps %ymm1, %ymm10, %ymm1 |
| ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm13, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm6, (%r8) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm7, (%r9) |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax) |
| ; AVX1-ONLY-NEXT: addq $168, %rsp |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i8_stride6_vf32: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: vmovdqa 160(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm8 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255> |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm8, %ymm1, %ymm2, %ymm9 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = xmm9[0,6,12],zero,zero,zero,xmm9[4,10],zero,zero,zero,xmm9[u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm9, %xmm10 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm10[2,8,14],zero,zero,xmm10[0,6,12,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpor %xmm5, %xmm6, %xmm11 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm7 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0> |
| ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm0[0,1],ymm3[0,1] |
| ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm0[2,3],ymm3[2,3] |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm7, %ymm5, %ymm6, %ymm3 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0] |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm7, %ymm11, %ymm0, %ymm0 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[1,7,13],zero,zero,zero,xmm9[5,11],zero,zero,zero,xmm9[u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[3,9,15],zero,zero,xmm10[1,7,13,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpor %xmm9, %xmm10, %xmm9 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm7, %ymm9, %ymm3, %ymm3 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm11 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255> |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm11, %ymm2, %ymm1, %ymm9 |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm9, %xmm10 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm10[4,10],zero,zero,zero,xmm10[2,8,14,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm13 = xmm9[2,8,14],zero,zero,xmm9[0,6,12],zero,zero,zero,xmm9[u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpor %xmm12, %xmm13, %xmm12 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm13 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0> |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm13, %ymm6, %ymm5, %ymm13 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} ymm14 = ymm13[u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm7, %ymm12, %ymm14, %ymm12 |
| ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %ymm14 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm8, %ymm14, %ymm4, %ymm8 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[5,11],zero,zero,zero,xmm10[3,9,15,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[3,9,15],zero,zero,xmm9[1,7,13],zero,zero,zero,xmm9[u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpor %xmm10, %xmm9, %xmm9 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = xmm8[u,u,u,u,u,0,6,12],zero,zero,zero,xmm8[4,10],zero,zero,zero |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm7, %ymm9, %ymm13, %ymm13 |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm8, %xmm9 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u],zero,zero,zero,xmm9[2,8,14],zero,zero,xmm9[0,6,12] |
| ; AVX2-ONLY-NEXT: vpor %xmm7, %xmm10, %xmm7 |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0] |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm10, %ymm12, %ymm7, %ymm7 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm12 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u> |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,1,7,13],zero,zero,zero,xmm8[5,11],zero,zero,zero |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u],zero,zero,zero,xmm9[3,9,15],zero,zero,xmm9[1,7,13] |
| ; AVX2-ONLY-NEXT: vpor %xmm8, %xmm9, %xmm8 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm12, %ymm4, %ymm14, %ymm9 |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm10, %ymm13, %ymm8, %ymm8 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm11, %ymm4, %ymm14, %ymm4 |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm4, %xmm11 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm13 = xmm11[u,u,u,u,u],zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm14 = xmm4[u,u,u,u,u,2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero |
| ; AVX2-ONLY-NEXT: vpor %xmm13, %xmm14, %xmm13 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm12, %ymm2, %ymm1, %ymm1 |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm1, %xmm2 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm12 = <0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u> |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm12, %ymm6, %ymm5, %ymm5 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm2[0,6,12],zero,zero,zero,xmm2[4,10,u,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm12 = xmm1[4,10],zero,zero,zero,xmm1[2,8,14],zero,zero,xmm1[u,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpor %xmm6, %xmm12, %xmm6 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} ymm12 = ymm5[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm12[5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm12[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm12 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm10, %ymm6, %ymm12, %ymm6 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u],zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,3,9,15],zero,zero,xmm4[1,7,13],zero,zero,zero |
| ; AVX2-ONLY-NEXT: vpor %xmm4, %xmm11, %xmm4 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[1,7,13],zero,zero,zero,xmm2[5,11,u,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[5,11],zero,zero,zero,xmm1[3,9,15],zero,zero,xmm1[u,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpor %xmm2, %xmm1, %xmm1 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} ymm2 = ymm5[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm9, %xmm2 |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm10, %ymm1, %ymm4, %ymm1 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u,u],zero,zero,xmm2[0,6,12],zero,zero,zero,xmm2[4,10] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = xmm9[u,u,u,u,u,u,4,10],zero,zero,zero,xmm9[2,8,14],zero,zero |
| ; AVX2-ONLY-NEXT: vpor %xmm4, %xmm5, %xmm4 |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 |
| ; AVX2-ONLY-NEXT: vpblendw {{.*#+}} ymm4 = ymm0[0,1,2],ymm4[3,4,5,6,7],ymm0[8,9,10],ymm4[11,12,13,14,15] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u],zero,zero,xmm2[1,7,13],zero,zero,zero,xmm2[5,11] |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = xmm9[u,u,u,u,u,u,5,11],zero,zero,zero,xmm9[3,9,15],zero,zero |
| ; AVX2-ONLY-NEXT: vpor %xmm2, %xmm4, %xmm2 |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2 |
| ; AVX2-ONLY-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm0, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm2, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm7, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm8, (%r8) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm6, (%r9) |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm1, (%rax) |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512F-LABEL: load_i8_stride6_vf32: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535] |
| ; AVX512F-NEXT: vmovdqa64 (%rdi), %ymm17 |
| ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3 |
| ; AVX512F-NEXT: vmovdqa 64(%rdi), %ymm1 |
| ; AVX512F-NEXT: vmovdqa 128(%rdi), %ymm6 |
| ; AVX512F-NEXT: vmovdqa %ymm0, %ymm7 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm3, %ymm17, %ymm7 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm4 = xmm7[0,6,12],zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[u,u,u,u,u] |
| ; AVX512F-NEXT: vextracti128 $1, %ymm7, %xmm8 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm8[2,8,14],zero,zero,xmm8[0,6,12,u,u,u,u,u] |
| ; AVX512F-NEXT: vpor %xmm4, %xmm5, %xmm4 |
| ; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm1[2,3],mem[2,3] |
| ; AVX512F-NEXT: vinserti128 $1, 96(%rdi), %ymm1, %ymm1 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0] |
| ; AVX512F-NEXT: vmovdqa %ymm9, %ymm10 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm5, %ymm1, %ymm10 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[2,8,14,4,10,16,22,28,18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255] |
| ; AVX512F-NEXT: vpternlogq $248, %ymm16, %ymm4, %ymm11 |
| ; AVX512F-NEXT: vmovdqa 160(%rdi), %ymm13 |
| ; AVX512F-NEXT: vmovdqa %ymm0, %ymm14 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm6, %ymm13, %ymm14 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm14, %xmm15 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm4 = xmm15[u,u,u,u,u,u],zero,zero,xmm15[0,6,12],zero,zero,zero,xmm15[4,10] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm12 = xmm14[u,u,u,u,u,u,4,10],zero,zero,zero,xmm14[2,8,14],zero,zero |
| ; AVX512F-NEXT: vpor %xmm4, %xmm12, %xmm4 |
| ; AVX512F-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 |
| ; AVX512F-NEXT: vpblendw {{.*#+}} ymm4 = ymm11[0,1,2],ymm4[3,4,5,6,7],ymm11[8,9,10],ymm4[11,12,13,14,15] |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm2 = ymm11[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-NEXT: vmovdqa64 %ymm2, %ymm18 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[1,7,13],zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[u,u,u,u,u] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm8[3,9,15],zero,zero,xmm8[1,7,13,u,u,u,u,u] |
| ; AVX512F-NEXT: vpor %xmm7, %xmm8, %xmm7 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[3,9,15,5,11,17,23,29,19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; AVX512F-NEXT: vpternlogq $248, %ymm16, %ymm7, %ymm8 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm7 = xmm15[u,u,u,u,u,u],zero,zero,xmm15[1,7,13],zero,zero,zero,xmm15[5,11] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,u,u,5,11],zero,zero,zero,xmm14[3,9,15],zero,zero |
| ; AVX512F-NEXT: vpor %xmm7, %xmm10, %xmm7 |
| ; AVX512F-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7 |
| ; AVX512F-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7],ymm8[8,9,10],ymm7[11,12,13,14,15] |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535] |
| ; AVX512F-NEXT: vmovdqa %ymm8, %ymm10 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm17, %ymm3, %ymm10 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm10, %xmm11 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14,u,u,u,u,u] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm14 = xmm10[2,8,14],zero,zero,xmm10[0,6,12],zero,zero,zero,xmm10[u,u,u,u,u] |
| ; AVX512F-NEXT: vpor %xmm12, %xmm14, %xmm12 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm1, %ymm5, %ymm9 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255] |
| ; AVX512F-NEXT: vpternlogq $248, %ymm16, %ymm12, %ymm14 |
| ; AVX512F-NEXT: vmovdqa %ymm0, %ymm12 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm13, %ymm6, %ymm12 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm15 = xmm12[u,u,u,u,u,0,6,12],zero,zero,zero,xmm12[4,10],zero,zero,zero |
| ; AVX512F-NEXT: vextracti128 $1, %ymm12, %xmm2 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[2,8,14],zero,zero,xmm2[0,6,12] |
| ; AVX512F-NEXT: vpor %xmm4, %xmm15, %xmm4 |
| ; AVX512F-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm15 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0] |
| ; AVX512F-NEXT: vpternlogq $184, %ymm14, %ymm15, %ymm4 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15,u,u,u,u,u] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[3,9,15],zero,zero,xmm10[1,7,13],zero,zero,zero,xmm10[u,u,u,u,u] |
| ; AVX512F-NEXT: vpor %xmm11, %xmm10, %xmm10 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpternlogq $248, %ymm16, %ymm10, %ymm9 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm10 = xmm12[u,u,u,u,u,1,7,13],zero,zero,zero,xmm12[5,11],zero,zero,zero |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[3,9,15],zero,zero,xmm2[1,7,13] |
| ; AVX512F-NEXT: vpor %xmm2, %xmm10, %xmm2 |
| ; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2 |
| ; AVX512F-NEXT: vpternlogq $184, %ymm9, %ymm15, %ymm2 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm6, %ymm13, %ymm8 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm8, %xmm6 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm9 = xmm6[u,u,u,u,u],zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm10 = xmm8[u,u,u,u,u,2,8,14],zero,zero,xmm8[0,6,12],zero,zero,zero |
| ; AVX512F-NEXT: vpor %xmm9, %xmm10, %xmm9 |
| ; AVX512F-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm17, %ymm3, %ymm0 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm3 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpor %xmm10, %xmm11, %xmm10 |
| ; AVX512F-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm1 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm5[5,6,7] |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3],ymm5[4,5,6,7] |
| ; AVX512F-NEXT: vpternlogq $226, %ymm9, %ymm15, %ymm5 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u],zero,zero,zero,xmm6[5,11],zero,zero,zero,xmm6[3,9,15] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,3,9,15],zero,zero,xmm8[1,7,13],zero,zero,zero |
| ; AVX512F-NEXT: vpor %xmm6, %xmm8, %xmm6 |
| ; AVX512F-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpor %xmm3, %xmm0, %xmm0 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7] |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512F-NEXT: vpternlogq $226, %ymm6, %ymm15, %ymm0 |
| ; AVX512F-NEXT: vmovdqa64 %ymm18, (%rsi) |
| ; AVX512F-NEXT: vmovdqa %ymm7, (%rdx) |
| ; AVX512F-NEXT: vmovdqa %ymm4, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm2, (%r8) |
| ; AVX512F-NEXT: vmovdqa %ymm5, (%r9) |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rax) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: load_i8_stride6_vf32: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm4 |
| ; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm0 |
| ; AVX512BW-NEXT: vmovdqa 64(%rdi), %ymm3 |
| ; AVX512BW-NEXT: vmovdqa 128(%rdi), %ymm2 |
| ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3] |
| ; AVX512BW-NEXT: vinserti128 $1, 96(%rdi), %ymm3, %ymm8 |
| ; AVX512BW-NEXT: movw $-28124, %r10w # imm = 0x9224 |
| ; AVX512BW-NEXT: kmovd %r10d, %k2 |
| ; AVX512BW-NEXT: vpblendmw %ymm1, %ymm8, %ymm6 {%k2} |
| ; AVX512BW-NEXT: movw $18724, %r10w # imm = 0x4924 |
| ; AVX512BW-NEXT: kmovd %r10d, %k1 |
| ; AVX512BW-NEXT: vpblendmw %ymm0, %ymm4, %ymm7 {%k1} |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm3 = xmm7[0,6,12],zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[u,u,u,u,u] |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm7, %xmm9 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm9[2,8,14],zero,zero,xmm9[0,6,12,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpor %xmm3, %xmm5, %xmm5 |
| ; AVX512BW-NEXT: movl $4192256, %r10d # imm = 0x3FF800 |
| ; AVX512BW-NEXT: kmovd %r10d, %k3 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm5 {%k3} = ymm6[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vmovdqa 160(%rdi), %ymm3 |
| ; AVX512BW-NEXT: vpblendmw %ymm2, %ymm3, %ymm10 {%k1} |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm10, %xmm11 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm12 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[0,6,12],zero,zero,zero,xmm11[4,10] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm13 = xmm10[u,u,u,u,u,u,4,10],zero,zero,zero,xmm10[2,8,14],zero,zero |
| ; AVX512BW-NEXT: vpor %xmm12, %xmm13, %xmm12 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12 |
| ; AVX512BW-NEXT: vpblendw {{.*#+}} ymm12 = ymm5[0,1,2],ymm12[3,4,5,6,7],ymm5[8,9,10],ymm12[11,12,13,14,15] |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm12[4,5,6,7] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[1,7,13],zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[3,9,15],zero,zero,xmm9[1,7,13,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpor %xmm7, %xmm9, %xmm7 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 {%k3} = ymm6[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm6 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[1,7,13],zero,zero,zero,xmm11[5,11] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm10[u,u,u,u,u,u,5,11],zero,zero,zero,xmm10[3,9,15],zero,zero |
| ; AVX512BW-NEXT: vpor %xmm6, %xmm9, %xmm6 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6 |
| ; AVX512BW-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15] |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX512BW-NEXT: vpblendmw %ymm8, %ymm1, %ymm9 {%k2} |
| ; AVX512BW-NEXT: movw $9362, %di # imm = 0x2492 |
| ; AVX512BW-NEXT: kmovd %edi, %k3 |
| ; AVX512BW-NEXT: vpblendmw %ymm4, %ymm0, %ymm10 {%k3} |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm10, %xmm11 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm12 = xmm10[2,8,14],zero,zero,xmm10[0,6,12],zero,zero,zero,xmm10[u,u,u,u,u] |
| ; AVX512BW-NEXT: vpor %xmm7, %xmm12, %xmm7 |
| ; AVX512BW-NEXT: movl $2095104, %edi # imm = 0x1FF800 |
| ; AVX512BW-NEXT: kmovd %edi, %k4 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm7 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpblendmw %ymm3, %ymm2, %ymm12 {%k1} |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm13 = xmm12[u,u,u,u,u,0,6,12],zero,zero,zero,xmm12[4,10],zero,zero,zero |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm12, %xmm14 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm15 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[2,8,14],zero,zero,xmm14[0,6,12] |
| ; AVX512BW-NEXT: vpor %xmm13, %xmm15, %xmm13 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13 |
| ; AVX512BW-NEXT: movl $-2097152, %edi # imm = 0xFFE00000 |
| ; AVX512BW-NEXT: kmovd %edi, %k2 |
| ; AVX512BW-NEXT: vmovdqu8 %ymm13, %ymm7 {%k2} |
| ; AVX512BW-NEXT: movw $9289, %di # imm = 0x2449 |
| ; AVX512BW-NEXT: kmovd %edi, %k5 |
| ; AVX512BW-NEXT: vmovdqu16 %ymm8, %ymm1 {%k5} |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[3,9,15],zero,zero,xmm10[1,7,13],zero,zero,zero,xmm10[u,u,u,u,u] |
| ; AVX512BW-NEXT: vpor %xmm8, %xmm10, %xmm8 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm8 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm9 = xmm12[u,u,u,u,u,1,7,13],zero,zero,zero,xmm12[5,11],zero,zero,zero |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[3,9,15],zero,zero,xmm14[1,7,13] |
| ; AVX512BW-NEXT: vpor %xmm9, %xmm10, %xmm9 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9 |
| ; AVX512BW-NEXT: vmovdqu8 %ymm9, %ymm8 {%k2} |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vmovdqu16 %ymm4, %ymm0 {%k1} |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm4 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[4,10,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm11 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpor %xmm10, %xmm11, %xmm10 |
| ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm9[5,6,7] |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX512BW-NEXT: vmovdqu16 %ymm2, %ymm3 {%k3} |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm3, %xmm2 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm10 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[2,8,14] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm11 = xmm3[u,u,u,u,u,2,8,14],zero,zero,xmm3[0,6,12],zero,zero,zero |
| ; AVX512BW-NEXT: vpor %xmm10, %xmm11, %xmm10 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10 |
| ; AVX512BW-NEXT: vmovdqu8 %ymm10, %ymm9 {%k2} |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[5,11,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpor %xmm4, %xmm0, %xmm0 |
| ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7] |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[5,11],zero,zero,zero,xmm2[3,9,15] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,3,9,15],zero,zero,xmm3[1,7,13],zero,zero,zero |
| ; AVX512BW-NEXT: vpor %xmm1, %xmm2, %xmm1 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX512BW-NEXT: vmovdqu8 %ymm1, %ymm0 {%k2} |
| ; AVX512BW-NEXT: vmovdqa %ymm5, (%rsi) |
| ; AVX512BW-NEXT: vmovdqa %ymm6, (%rdx) |
| ; AVX512BW-NEXT: vmovdqa %ymm7, (%rcx) |
| ; AVX512BW-NEXT: vmovdqa %ymm8, (%r8) |
| ; AVX512BW-NEXT: vmovdqa %ymm9, (%r9) |
| ; AVX512BW-NEXT: vmovdqa %ymm0, (%rax) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %wide.vec = load <192 x i8>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <32 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42, i32 48, i32 54, i32 60, i32 66, i32 72, i32 78, i32 84, i32 90, i32 96, i32 102, i32 108, i32 114, i32 120, i32 126, i32 132, i32 138, i32 144, i32 150, i32 156, i32 162, i32 168, i32 174, i32 180, i32 186> |
| %strided.vec1 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <32 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43, i32 49, i32 55, i32 61, i32 67, i32 73, i32 79, i32 85, i32 91, i32 97, i32 103, i32 109, i32 115, i32 121, i32 127, i32 133, i32 139, i32 145, i32 151, i32 157, i32 163, i32 169, i32 175, i32 181, i32 187> |
| %strided.vec2 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <32 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44, i32 50, i32 56, i32 62, i32 68, i32 74, i32 80, i32 86, i32 92, i32 98, i32 104, i32 110, i32 116, i32 122, i32 128, i32 134, i32 140, i32 146, i32 152, i32 158, i32 164, i32 170, i32 176, i32 182, i32 188> |
| %strided.vec3 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <32 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45, i32 51, i32 57, i32 63, i32 69, i32 75, i32 81, i32 87, i32 93, i32 99, i32 105, i32 111, i32 117, i32 123, i32 129, i32 135, i32 141, i32 147, i32 153, i32 159, i32 165, i32 171, i32 177, i32 183, i32 189> |
| %strided.vec4 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <32 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46, i32 52, i32 58, i32 64, i32 70, i32 76, i32 82, i32 88, i32 94, i32 100, i32 106, i32 112, i32 118, i32 124, i32 130, i32 136, i32 142, i32 148, i32 154, i32 160, i32 166, i32 172, i32 178, i32 184, i32 190> |
| %strided.vec5 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <32 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47, i32 53, i32 59, i32 65, i32 71, i32 77, i32 83, i32 89, i32 95, i32 101, i32 107, i32 113, i32 119, i32 125, i32 131, i32 137, i32 143, i32 149, i32 155, i32 161, i32 167, i32 173, i32 179, i32 185, i32 191> |
| store <32 x i8> %strided.vec0, ptr %out.vec0, align 64 |
| store <32 x i8> %strided.vec1, ptr %out.vec1, align 64 |
| store <32 x i8> %strided.vec2, ptr %out.vec2, align 64 |
| store <32 x i8> %strided.vec3, ptr %out.vec3, align 64 |
| store <32 x i8> %strided.vec4, ptr %out.vec4, align 64 |
| store <32 x i8> %strided.vec5, ptr %out.vec5, align 64 |
| ret void |
| } |
| |
| define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind { |
| ; SSE-LABEL: load_i8_stride6_vf64: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: subq $824, %rsp # imm = 0x338 |
| ; SSE-NEXT: movdqa 256(%rdi), %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa 272(%rdi), %xmm4 |
| ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 192(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa 208(%rdi), %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa 240(%rdi), %xmm14 |
| ; SSE-NEXT: movdqa 224(%rdi), %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,0,65535,65535,0,65535,65535] |
| ; SSE-NEXT: movdqa %xmm6, %xmm0 |
| ; SSE-NEXT: pandn %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,0] |
| ; SSE-NEXT: movdqa %xmm5, %xmm1 |
| ; SSE-NEXT: pandn %xmm14, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm6, %xmm1 |
| ; SSE-NEXT: pandn %xmm14, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm6, %xmm14 |
| ; SSE-NEXT: por %xmm0, %xmm14 |
| ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{.*#+}} xmm15 = [255,255,255,255,255,255,255,255] |
| ; SSE-NEXT: movdqa %xmm14, %xmm0 |
| ; SSE-NEXT: pand %xmm15, %xmm0 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5] |
| ; SSE-NEXT: packuswb %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,65535,0,0,0,65535,65535] |
| ; SSE-NEXT: movdqa %xmm5, %xmm1 |
| ; SSE-NEXT: pandn %xmm2, %xmm1 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm5, %xmm2 |
| ; SSE-NEXT: por %xmm1, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm2, %xmm1 |
| ; SSE-NEXT: pand %xmm15, %xmm1 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE-NEXT: pand %xmm11, %xmm1 |
| ; SSE-NEXT: movdqa %xmm11, %xmm2 |
| ; SSE-NEXT: pandn %xmm0, %xmm2 |
| ; SSE-NEXT: por %xmm2, %xmm1 |
| ; SSE-NEXT: movdqa %xmm6, %xmm0 |
| ; SSE-NEXT: pandn %xmm4, %xmm0 |
| ; SSE-NEXT: pand %xmm6, %xmm3 |
| ; SSE-NEXT: por %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm3, %xmm0 |
| ; SSE-NEXT: pand %xmm15, %xmm0 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5] |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0] |
| ; SSE-NEXT: movdqa %xmm3, %xmm2 |
| ; SSE-NEXT: pandn %xmm0, %xmm2 |
| ; SSE-NEXT: pand %xmm3, %xmm1 |
| ; SSE-NEXT: movdqa %xmm3, %xmm8 |
| ; SSE-NEXT: por %xmm1, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa 320(%rdi), %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm6, %xmm0 |
| ; SSE-NEXT: pandn %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa 336(%rdi), %xmm9 |
| ; SSE-NEXT: movdqa %xmm5, %xmm1 |
| ; SSE-NEXT: pandn %xmm9, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm6, %xmm1 |
| ; SSE-NEXT: pandn %xmm9, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm6, %xmm9 |
| ; SSE-NEXT: por %xmm0, %xmm9 |
| ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm9, %xmm0 |
| ; SSE-NEXT: pand %xmm15, %xmm0 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5] |
| ; SSE-NEXT: packuswb %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa %xmm11, %xmm2 |
| ; SSE-NEXT: pandn %xmm0, %xmm2 |
| ; SSE-NEXT: movdqa 304(%rdi), %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm5, %xmm0 |
| ; SSE-NEXT: pandn %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa 288(%rdi), %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, %xmm3 |
| ; SSE-NEXT: pand %xmm5, %xmm3 |
| ; SSE-NEXT: movdqa %xmm5, %xmm10 |
| ; SSE-NEXT: por %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm3, %xmm0 |
| ; SSE-NEXT: pand %xmm15, %xmm0 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: pand %xmm11, %xmm0 |
| ; SSE-NEXT: por %xmm2, %xmm0 |
| ; SSE-NEXT: movdqa 368(%rdi), %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm6, %xmm2 |
| ; SSE-NEXT: pandn %xmm3, %xmm2 |
| ; SSE-NEXT: movdqa 352(%rdi), %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm6, %xmm3 |
| ; SSE-NEXT: por %xmm2, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm3, %xmm2 |
| ; SSE-NEXT: pand %xmm15, %xmm2 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,1,2,0] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,5] |
| ; SSE-NEXT: packuswb %xmm2, %xmm2 |
| ; SSE-NEXT: movdqa %xmm8, %xmm3 |
| ; SSE-NEXT: pandn %xmm2, %xmm3 |
| ; SSE-NEXT: pand %xmm8, %xmm0 |
| ; SSE-NEXT: por %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa 32(%rdi), %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm6, %xmm0 |
| ; SSE-NEXT: pandn %xmm2, %xmm0 |
| ; SSE-NEXT: movdqa 48(%rdi), %xmm12 |
| ; SSE-NEXT: movdqa %xmm5, %xmm2 |
| ; SSE-NEXT: pandn %xmm12, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm6, %xmm2 |
| ; SSE-NEXT: pandn %xmm12, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm6, %xmm12 |
| ; SSE-NEXT: por %xmm0, %xmm12 |
| ; SSE-NEXT: movdqa %xmm12, %xmm0 |
| ; SSE-NEXT: pand %xmm15, %xmm0 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5] |
| ; SSE-NEXT: packuswb %xmm2, %xmm0 |
| ; SSE-NEXT: movdqa %xmm11, %xmm2 |
| ; SSE-NEXT: pandn %xmm0, %xmm2 |
| ; SSE-NEXT: movdqa 16(%rdi), %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm5, %xmm0 |
| ; SSE-NEXT: pandn %xmm3, %xmm0 |
| ; SSE-NEXT: movdqa (%rdi), %xmm9 |
| ; SSE-NEXT: movdqa %xmm9, %xmm4 |
| ; SSE-NEXT: pand %xmm5, %xmm4 |
| ; SSE-NEXT: por %xmm0, %xmm4 |
| ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm4, %xmm0 |
| ; SSE-NEXT: pand %xmm15, %xmm0 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: pand %xmm11, %xmm0 |
| ; SSE-NEXT: por %xmm2, %xmm0 |
| ; SSE-NEXT: movdqa 80(%rdi), %xmm14 |
| ; SSE-NEXT: movdqa %xmm6, %xmm2 |
| ; SSE-NEXT: pandn %xmm14, %xmm2 |
| ; SSE-NEXT: movdqa 64(%rdi), %xmm13 |
| ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm6, %xmm13 |
| ; SSE-NEXT: por %xmm2, %xmm13 |
| ; SSE-NEXT: movdqa %xmm13, %xmm2 |
| ; SSE-NEXT: pand %xmm15, %xmm2 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,1,2,0] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,5] |
| ; SSE-NEXT: packuswb %xmm2, %xmm2 |
| ; SSE-NEXT: movdqa %xmm8, %xmm4 |
| ; SSE-NEXT: pandn %xmm2, %xmm4 |
| ; SSE-NEXT: pand %xmm8, %xmm0 |
| ; SSE-NEXT: por %xmm0, %xmm4 |
| ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa 128(%rdi), %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm6, %xmm0 |
| ; SSE-NEXT: pandn %xmm2, %xmm0 |
| ; SSE-NEXT: movdqa 144(%rdi), %xmm7 |
| ; SSE-NEXT: movdqa %xmm5, %xmm4 |
| ; SSE-NEXT: pandn %xmm7, %xmm4 |
| ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm6, %xmm4 |
| ; SSE-NEXT: pandn %xmm7, %xmm4 |
| ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm6, %xmm7 |
| ; SSE-NEXT: por %xmm0, %xmm7 |
| ; SSE-NEXT: movdqa %xmm7, %xmm0 |
| ; SSE-NEXT: pand %xmm15, %xmm0 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,3] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5] |
| ; SSE-NEXT: packuswb %xmm5, %xmm0 |
| ; SSE-NEXT: pandn %xmm0, %xmm11 |
| ; SSE-NEXT: movdqa %xmm6, %xmm0 |
| ; SSE-NEXT: movdqa %xmm6, %xmm4 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: pandn %xmm3, %xmm4 |
| ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm6, %xmm2 |
| ; SSE-NEXT: pandn %xmm1, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm1, %xmm4 |
| ; SSE-NEXT: movdqa %xmm6, %xmm1 |
| ; SSE-NEXT: pandn %xmm9, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm9, %xmm5 |
| ; SSE-NEXT: movdqa 112(%rdi), %xmm6 |
| ; SSE-NEXT: movdqa %xmm10, %xmm9 |
| ; SSE-NEXT: pandn %xmm6, %xmm10 |
| ; SSE-NEXT: movdqa 160(%rdi), %xmm8 |
| ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm0, %xmm8 |
| ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: pandn %xmm2, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm9, %xmm1 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: pandn %xmm3, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: pandn %xmm1, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm0, %xmm4 |
| ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm9, %xmm3 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; SSE-NEXT: pandn %xmm4, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm4, %xmm3 |
| ; SSE-NEXT: pand %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; SSE-NEXT: pandn %xmm4, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm0, %xmm5 |
| ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm9, %xmm3 |
| ; SSE-NEXT: pandn %xmm14, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm0, %xmm14 |
| ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm0, %xmm3 |
| ; SSE-NEXT: pandn %xmm6, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa 96(%rdi), %xmm5 |
| ; SSE-NEXT: movdqa %xmm5, %xmm3 |
| ; SSE-NEXT: pand %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa 176(%rdi), %xmm14 |
| ; SSE-NEXT: movdqa %xmm14, %xmm3 |
| ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm0, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm0, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm0, %xmm4 |
| ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE-NEXT: pand %xmm0, %xmm6 |
| ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pandn %xmm5, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm5, %xmm2 |
| ; SSE-NEXT: pand %xmm9, %xmm2 |
| ; SSE-NEXT: por %xmm10, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, %xmm5 |
| ; SSE-NEXT: pand %xmm15, %xmm5 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,1,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,3,2,1,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: packuswb %xmm5, %xmm5 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,0,0,0,65535,65535] |
| ; SSE-NEXT: pand %xmm4, %xmm5 |
| ; SSE-NEXT: por %xmm11, %xmm5 |
| ; SSE-NEXT: pandn %xmm14, %xmm0 |
| ; SSE-NEXT: por %xmm0, %xmm8 |
| ; SSE-NEXT: movdqa %xmm8, %xmm0 |
| ; SSE-NEXT: pand %xmm15, %xmm0 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5] |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0] |
| ; SSE-NEXT: movdqa %xmm9, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: pand %xmm9, %xmm5 |
| ; SSE-NEXT: por %xmm5, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pxor %xmm5, %xmm5 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,2,3,3] |
| ; SSE-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3] |
| ; SSE-NEXT: psrld $16, %xmm0 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm1[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,5,7,6,7] |
| ; SSE-NEXT: punpckhdq {{.*#+}} xmm14 = xmm14[2],xmm0[2],xmm14[3],xmm0[3] |
| ; SSE-NEXT: packuswb %xmm14, %xmm6 |
| ; SSE-NEXT: movdqa %xmm4, %xmm1 |
| ; SSE-NEXT: pandn %xmm6, %xmm1 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm3, %xmm6 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,1,0,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,1,1,1,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,7,6,7] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm14 = [65535,65535,0,65535,0,0,65535,65535] |
| ; SSE-NEXT: movdqa %xmm14, %xmm0 |
| ; SSE-NEXT: pandn %xmm6, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm3[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,3,2,0,4,5,6,7] |
| ; SSE-NEXT: pand %xmm14, %xmm6 |
| ; SSE-NEXT: por %xmm0, %xmm6 |
| ; SSE-NEXT: packuswb %xmm6, %xmm6 |
| ; SSE-NEXT: pand %xmm4, %xmm6 |
| ; SSE-NEXT: movdqa %xmm4, %xmm10 |
| ; SSE-NEXT: por %xmm1, %xmm6 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm4, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm0[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,0,65535,65535,0,65535] |
| ; SSE-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE-NEXT: pandn %xmm3, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm4[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,7,6,4] |
| ; SSE-NEXT: pand %xmm1, %xmm3 |
| ; SSE-NEXT: por %xmm0, %xmm3 |
| ; SSE-NEXT: packuswb %xmm3, %xmm0 |
| ; SSE-NEXT: movdqa %xmm9, %xmm3 |
| ; SSE-NEXT: pandn %xmm0, %xmm3 |
| ; SSE-NEXT: pand %xmm9, %xmm6 |
| ; SSE-NEXT: por %xmm6, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm4, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[2,2,3,3] |
| ; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] |
| ; SSE-NEXT: psrld $16, %xmm0 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,7,6,7] |
| ; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3] |
| ; SSE-NEXT: packuswb %xmm6, %xmm3 |
| ; SSE-NEXT: movdqa %xmm10, %xmm0 |
| ; SSE-NEXT: pandn %xmm3, %xmm0 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm4, %xmm3 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,0,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,1,1,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,7,6,7] |
| ; SSE-NEXT: movdqa %xmm14, %xmm6 |
| ; SSE-NEXT: pandn %xmm3, %xmm6 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm4[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,3,2,0,4,5,6,7] |
| ; SSE-NEXT: pand %xmm14, %xmm3 |
| ; SSE-NEXT: por %xmm6, %xmm3 |
| ; SSE-NEXT: packuswb %xmm3, %xmm3 |
| ; SSE-NEXT: pand %xmm10, %xmm3 |
| ; SSE-NEXT: por %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm4, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: movdqa %xmm1, %xmm6 |
| ; SSE-NEXT: pandn %xmm0, %xmm6 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm4[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,7,6,4] |
| ; SSE-NEXT: pand %xmm1, %xmm0 |
| ; SSE-NEXT: por %xmm6, %xmm0 |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa %xmm9, %xmm6 |
| ; SSE-NEXT: pandn %xmm0, %xmm6 |
| ; SSE-NEXT: pand %xmm9, %xmm3 |
| ; SSE-NEXT: por %xmm3, %xmm6 |
| ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm12, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3],xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm12[2,2,3,3] |
| ; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] |
| ; SSE-NEXT: psrld $16, %xmm0 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm12[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,7,6,7] |
| ; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3] |
| ; SSE-NEXT: packuswb %xmm6, %xmm3 |
| ; SSE-NEXT: movdqa %xmm10, %xmm0 |
| ; SSE-NEXT: pandn %xmm3, %xmm0 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm4, %xmm3 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,0,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,1,1,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,7,6,7] |
| ; SSE-NEXT: movdqa %xmm14, %xmm6 |
| ; SSE-NEXT: pandn %xmm3, %xmm6 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm4[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,3,2,0,4,5,6,7] |
| ; SSE-NEXT: pand %xmm14, %xmm3 |
| ; SSE-NEXT: por %xmm6, %xmm3 |
| ; SSE-NEXT: packuswb %xmm3, %xmm3 |
| ; SSE-NEXT: pand %xmm10, %xmm3 |
| ; SSE-NEXT: por %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa %xmm13, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: movdqa %xmm1, %xmm6 |
| ; SSE-NEXT: pandn %xmm0, %xmm6 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm5[8],xmm13[9],xmm5[9],xmm13[10],xmm5[10],xmm13[11],xmm5[11],xmm13[12],xmm5[12],xmm13[13],xmm5[13],xmm13[14],xmm5[14],xmm13[15],xmm5[15] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,7,6,4] |
| ; SSE-NEXT: pand %xmm1, %xmm0 |
| ; SSE-NEXT: por %xmm6, %xmm0 |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa %xmm9, %xmm6 |
| ; SSE-NEXT: pandn %xmm0, %xmm6 |
| ; SSE-NEXT: pand %xmm9, %xmm3 |
| ; SSE-NEXT: por %xmm3, %xmm6 |
| ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm7, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[2,2,3,3] |
| ; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] |
| ; SSE-NEXT: psrld $16, %xmm0 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm7[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,7,6,7] |
| ; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm0[2],xmm4[3],xmm0[3] |
| ; SSE-NEXT: packuswb %xmm4, %xmm3 |
| ; SSE-NEXT: movdqa %xmm2, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,7,6,7] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,3,2,0,4,5,6,7] |
| ; SSE-NEXT: pand %xmm14, %xmm2 |
| ; SSE-NEXT: pandn %xmm0, %xmm14 |
| ; SSE-NEXT: por %xmm2, %xmm14 |
| ; SSE-NEXT: packuswb %xmm14, %xmm14 |
| ; SSE-NEXT: movdqa %xmm10, %xmm0 |
| ; SSE-NEXT: pand %xmm10, %xmm14 |
| ; SSE-NEXT: pandn %xmm3, %xmm0 |
| ; SSE-NEXT: por %xmm0, %xmm14 |
| ; SSE-NEXT: movdqa %xmm8, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm5[8],xmm8[9],xmm5[9],xmm8[10],xmm5[10],xmm8[11],xmm5[11],xmm8[12],xmm5[12],xmm8[13],xmm5[13],xmm8[14],xmm5[14],xmm8[15],xmm5[15] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm8[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,7,6,4] |
| ; SSE-NEXT: pand %xmm1, %xmm2 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: por %xmm2, %xmm1 |
| ; SSE-NEXT: packuswb %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa %xmm9, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: pand %xmm9, %xmm14 |
| ; SSE-NEXT: por %xmm14, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{.*#+}} xmm14 = [65535,0,65535,65535,0,65535,65535,0] |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm14, %xmm0 |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm15, %xmm0 |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,3,4,5,6,7] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6] |
| ; SSE-NEXT: packuswb %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm6, %xmm1 |
| ; SSE-NEXT: pand %xmm15, %xmm1 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255] |
| ; SSE-NEXT: movdqa %xmm8, %xmm3 |
| ; SSE-NEXT: pandn %xmm1, %xmm3 |
| ; SSE-NEXT: pand %xmm8, %xmm0 |
| ; SSE-NEXT: por %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: por %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa %xmm1, %xmm5 |
| ; SSE-NEXT: pand %xmm15, %xmm0 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4] |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa %xmm9, %xmm2 |
| ; SSE-NEXT: movdqa %xmm9, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: pand %xmm9, %xmm3 |
| ; SSE-NEXT: por %xmm3, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm14, %xmm0 |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm15, %xmm0 |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,3,4,5,6,7] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6] |
| ; SSE-NEXT: packuswb %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm9, %xmm1 |
| ; SSE-NEXT: pand %xmm15, %xmm1 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE-NEXT: movdqa %xmm8, %xmm3 |
| ; SSE-NEXT: pandn %xmm1, %xmm3 |
| ; SSE-NEXT: pand %xmm8, %xmm0 |
| ; SSE-NEXT: por %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; SSE-NEXT: por %xmm0, %xmm11 |
| ; SSE-NEXT: movdqa %xmm11, %xmm0 |
| ; SSE-NEXT: pand %xmm15, %xmm0 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4] |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa %xmm2, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: pand %xmm2, %xmm3 |
| ; SSE-NEXT: por %xmm3, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm14, %xmm0 |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm15, %xmm0 |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,3,4,5,6,7] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6] |
| ; SSE-NEXT: packuswb %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm7, %xmm1 |
| ; SSE-NEXT: pand %xmm15, %xmm1 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE-NEXT: movdqa %xmm8, %xmm3 |
| ; SSE-NEXT: pandn %xmm1, %xmm3 |
| ; SSE-NEXT: pand %xmm8, %xmm0 |
| ; SSE-NEXT: por %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; SSE-NEXT: por %xmm0, %xmm13 |
| ; SSE-NEXT: movdqa %xmm13, %xmm0 |
| ; SSE-NEXT: pand %xmm15, %xmm0 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4] |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa %xmm2, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: pand %xmm2, %xmm3 |
| ; SSE-NEXT: por %xmm3, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm14, %xmm0 |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm15, %xmm0 |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,3,4,5,6,7] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6] |
| ; SSE-NEXT: packuswb %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm12, %xmm1 |
| ; SSE-NEXT: pand %xmm15, %xmm1 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE-NEXT: movdqa %xmm8, %xmm3 |
| ; SSE-NEXT: pandn %xmm1, %xmm3 |
| ; SSE-NEXT: pand %xmm8, %xmm0 |
| ; SSE-NEXT: por %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload |
| ; SSE-NEXT: por %xmm0, %xmm14 |
| ; SSE-NEXT: movdqa %xmm14, %xmm0 |
| ; SSE-NEXT: pand %xmm15, %xmm0 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4] |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa %xmm2, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: pand %xmm2, %xmm3 |
| ; SSE-NEXT: por %xmm3, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm4, %xmm0 |
| ; SSE-NEXT: pxor %xmm10, %xmm10 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7] |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm10[8],xmm4[9],xmm10[9],xmm4[10],xmm10[10],xmm4[11],xmm10[11],xmm4[12],xmm10[12],xmm4[13],xmm10[13],xmm4[14],xmm10[14],xmm4[15],xmm10[15] |
| ; SSE-NEXT: movdqa %xmm4, %xmm1 |
| ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0] |
| ; SSE-NEXT: movaps %xmm0, %xmm3 |
| ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[0,2] |
| ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm4[0,0] |
| ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm4[2,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa %xmm6, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,65535,65535,0,65535,65535,65535,65535] |
| ; SSE-NEXT: movdqa %xmm3, %xmm4 |
| ; SSE-NEXT: pandn %xmm0, %xmm4 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm10[8],xmm6[9],xmm10[9],xmm6[10],xmm10[10],xmm6[11],xmm10[11],xmm6[12],xmm10[12],xmm6[13],xmm10[13],xmm6[14],xmm10[14],xmm6[15],xmm10[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,3,2,1] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7] |
| ; SSE-NEXT: pand %xmm3, %xmm0 |
| ; SSE-NEXT: por %xmm4, %xmm0 |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa %xmm8, %xmm6 |
| ; SSE-NEXT: pandn %xmm0, %xmm6 |
| ; SSE-NEXT: pand %xmm8, %xmm1 |
| ; SSE-NEXT: por %xmm1, %xmm6 |
| ; SSE-NEXT: movdqa %xmm5, %xmm1 |
| ; SSE-NEXT: movdqa %xmm5, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm10[8],xmm0[9],xmm10[9],xmm0[10],xmm10[10],xmm0[11],xmm10[11],xmm0[12],xmm10[12],xmm0[13],xmm10[13],xmm0[14],xmm10[14],xmm0[15],xmm10[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535,0,65535,65535,0] |
| ; SSE-NEXT: movdqa %xmm5, %xmm4 |
| ; SSE-NEXT: pandn %xmm0, %xmm4 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3],xmm1[4],xmm10[4],xmm1[5],xmm10[5],xmm1[6],xmm10[6],xmm1[7],xmm10[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7] |
| ; SSE-NEXT: pand %xmm5, %xmm0 |
| ; SSE-NEXT: por %xmm4, %xmm0 |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa %xmm2, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: pand %xmm2, %xmm6 |
| ; SSE-NEXT: movdqa %xmm2, %xmm4 |
| ; SSE-NEXT: por %xmm6, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7] |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm10[8],xmm1[9],xmm10[9],xmm1[10],xmm10[10],xmm1[11],xmm10[11],xmm1[12],xmm10[12],xmm1[13],xmm10[13],xmm1[14],xmm10[14],xmm1[15],xmm10[15] |
| ; SSE-NEXT: movdqa %xmm1, %xmm2 |
| ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[3,0] |
| ; SSE-NEXT: movaps %xmm0, %xmm6 |
| ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm2[0,2] |
| ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0] |
| ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa %xmm9, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm3, %xmm6 |
| ; SSE-NEXT: pandn %xmm0, %xmm6 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm10[8],xmm9[9],xmm10[9],xmm9[10],xmm10[10],xmm9[11],xmm10[11],xmm9[12],xmm10[12],xmm9[13],xmm10[13],xmm9[14],xmm10[14],xmm9[15],xmm10[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,3,2,1] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7] |
| ; SSE-NEXT: pand %xmm3, %xmm0 |
| ; SSE-NEXT: por %xmm6, %xmm0 |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa %xmm8, %xmm6 |
| ; SSE-NEXT: pandn %xmm0, %xmm6 |
| ; SSE-NEXT: pand %xmm8, %xmm1 |
| ; SSE-NEXT: por %xmm1, %xmm6 |
| ; SSE-NEXT: movdqa %xmm11, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm10[8],xmm0[9],xmm10[9],xmm0[10],xmm10[10],xmm0[11],xmm10[11],xmm0[12],xmm10[12],xmm0[13],xmm10[13],xmm0[14],xmm10[14],xmm0[15],xmm10[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5] |
| ; SSE-NEXT: movdqa %xmm5, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[0,2,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7] |
| ; SSE-NEXT: pand %xmm5, %xmm0 |
| ; SSE-NEXT: por %xmm1, %xmm0 |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa %xmm4, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: pand %xmm4, %xmm6 |
| ; SSE-NEXT: por %xmm6, %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm9, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7] |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm10[8],xmm9[9],xmm10[9],xmm9[10],xmm10[10],xmm9[11],xmm10[11],xmm9[12],xmm10[12],xmm9[13],xmm10[13],xmm9[14],xmm10[14],xmm9[15],xmm10[15] |
| ; SSE-NEXT: movdqa %xmm9, %xmm1 |
| ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0] |
| ; SSE-NEXT: movaps %xmm0, %xmm6 |
| ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm1[0,2] |
| ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm9[0,0] |
| ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm9[2,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa %xmm7, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm3, %xmm6 |
| ; SSE-NEXT: pandn %xmm0, %xmm6 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm10[8],xmm7[9],xmm10[9],xmm7[10],xmm10[10],xmm7[11],xmm10[11],xmm7[12],xmm10[12],xmm7[13],xmm10[13],xmm7[14],xmm10[14],xmm7[15],xmm10[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,3,2,1] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7] |
| ; SSE-NEXT: pand %xmm3, %xmm0 |
| ; SSE-NEXT: por %xmm6, %xmm0 |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movdqa %xmm8, %xmm6 |
| ; SSE-NEXT: pandn %xmm0, %xmm6 |
| ; SSE-NEXT: pand %xmm8, %xmm1 |
| ; SSE-NEXT: por %xmm1, %xmm6 |
| ; SSE-NEXT: movdqa %xmm13, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm10[8],xmm0[9],xmm10[9],xmm0[10],xmm10[10],xmm0[11],xmm10[11],xmm0[12],xmm10[12],xmm0[13],xmm10[13],xmm0[14],xmm10[14],xmm0[15],xmm10[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5] |
| ; SSE-NEXT: movdqa %xmm5, %xmm1 |
| ; SSE-NEXT: pandn %xmm0, %xmm1 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3],xmm13[4],xmm10[4],xmm13[5],xmm10[5],xmm13[6],xmm10[6],xmm13[7],xmm10[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,2,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7] |
| ; SSE-NEXT: pand %xmm5, %xmm0 |
| ; SSE-NEXT: por %xmm1, %xmm0 |
| ; SSE-NEXT: packuswb %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa %xmm4, %xmm0 |
| ; SSE-NEXT: pandn %xmm1, %xmm0 |
| ; SSE-NEXT: pand %xmm4, %xmm6 |
| ; SSE-NEXT: por %xmm6, %xmm0 |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3],xmm1[4],xmm10[4],xmm1[5],xmm10[5],xmm1[6],xmm10[6],xmm1[7],xmm10[7] |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm10[8],xmm0[9],xmm10[9],xmm0[10],xmm10[10],xmm0[11],xmm10[11],xmm0[12],xmm10[12],xmm0[13],xmm10[13],xmm0[14],xmm10[14],xmm0[15],xmm10[15] |
| ; SSE-NEXT: movdqa %xmm0, %xmm6 |
| ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm1[3,0] |
| ; SSE-NEXT: movaps %xmm1, %xmm7 |
| ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm6[0,2] |
| ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0] |
| ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm7[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,2] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm1, %xmm6 |
| ; SSE-NEXT: movdqa %xmm12, %xmm1 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3],xmm1[4],xmm10[4],xmm1[5],xmm10[5],xmm1[6],xmm10[6],xmm1[7],xmm10[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,1,4,5,6,7] |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm10[8],xmm12[9],xmm10[9],xmm12[10],xmm10[10],xmm12[11],xmm10[11],xmm12[12],xmm10[12],xmm12[13],xmm10[13],xmm12[14],xmm10[14],xmm12[15],xmm10[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm12[0,3,2,1] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,3,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,7,7,7] |
| ; SSE-NEXT: pand %xmm3, %xmm7 |
| ; SSE-NEXT: pandn %xmm1, %xmm3 |
| ; SSE-NEXT: por %xmm7, %xmm3 |
| ; SSE-NEXT: pand %xmm8, %xmm6 |
| ; SSE-NEXT: packuswb %xmm3, %xmm3 |
| ; SSE-NEXT: pandn %xmm3, %xmm8 |
| ; SSE-NEXT: por %xmm6, %xmm8 |
| ; SSE-NEXT: movdqa %xmm14, %xmm1 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm10[8],xmm1[9],xmm10[9],xmm1[10],xmm10[10],xmm1[11],xmm10[11],xmm1[12],xmm10[12],xmm1[13],xmm10[13],xmm1[14],xmm10[14],xmm1[15],xmm10[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,5] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm10[0],xmm14[1],xmm10[1],xmm14[2],xmm10[2],xmm14[3],xmm10[3],xmm14[4],xmm10[4],xmm14[5],xmm10[5],xmm14[6],xmm10[6],xmm14[7],xmm10[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm14[0,2,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7] |
| ; SSE-NEXT: pand %xmm5, %xmm3 |
| ; SSE-NEXT: pandn %xmm1, %xmm5 |
| ; SSE-NEXT: por %xmm3, %xmm5 |
| ; SSE-NEXT: movdqa %xmm4, %xmm0 |
| ; SSE-NEXT: pand %xmm4, %xmm8 |
| ; SSE-NEXT: packuswb %xmm5, %xmm1 |
| ; SSE-NEXT: pandn %xmm1, %xmm0 |
| ; SSE-NEXT: por %xmm8, %xmm0 |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm10, %xmm1 |
| ; SSE-NEXT: pand %xmm15, %xmm1 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2] |
| ; SSE-NEXT: packuswb %xmm2, %xmm1 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm14 = [255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255] |
| ; SSE-NEXT: movdqa %xmm14, %xmm2 |
| ; SSE-NEXT: pandn %xmm1, %xmm2 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm12, %xmm1 |
| ; SSE-NEXT: pand %xmm15, %xmm1 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,0,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,0,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE-NEXT: pand %xmm14, %xmm1 |
| ; SSE-NEXT: por %xmm2, %xmm1 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,0,65535,65535,0] |
| ; SSE-NEXT: pand %xmm0, %xmm13 |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm13, %xmm2 |
| ; SSE-NEXT: pand %xmm15, %xmm2 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,2,1,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,4,7] |
| ; SSE-NEXT: packuswb %xmm2, %xmm6 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,65535,0,0,0] |
| ; SSE-NEXT: movdqa %xmm2, %xmm3 |
| ; SSE-NEXT: pandn %xmm6, %xmm3 |
| ; SSE-NEXT: pand %xmm2, %xmm1 |
| ; SSE-NEXT: por %xmm1, %xmm3 |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm15, %xmm1 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2] |
| ; SSE-NEXT: packuswb %xmm6, %xmm1 |
| ; SSE-NEXT: movdqa %xmm14, %xmm6 |
| ; SSE-NEXT: pandn %xmm1, %xmm6 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm4, %xmm1 |
| ; SSE-NEXT: pand %xmm15, %xmm1 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,0,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm1[2,1,0,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm7, %xmm7 |
| ; SSE-NEXT: pand %xmm14, %xmm7 |
| ; SSE-NEXT: por %xmm6, %xmm7 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa %xmm0, %xmm11 |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm3, %xmm1 |
| ; SSE-NEXT: pand %xmm15, %xmm1 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,1,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7] |
| ; SSE-NEXT: packuswb %xmm1, %xmm6 |
| ; SSE-NEXT: movdqa %xmm2, %xmm0 |
| ; SSE-NEXT: pandn %xmm6, %xmm0 |
| ; SSE-NEXT: pand %xmm2, %xmm7 |
| ; SSE-NEXT: por %xmm7, %xmm0 |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa (%rsp), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm6, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm15, %xmm6 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[2,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,2] |
| ; SSE-NEXT: packuswb %xmm7, %xmm6 |
| ; SSE-NEXT: movdqa %xmm14, %xmm7 |
| ; SSE-NEXT: pandn %xmm6, %xmm7 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm15, %xmm6 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[3,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,1,0,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm6[2,1,0,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm8, %xmm8 |
| ; SSE-NEXT: pand %xmm14, %xmm8 |
| ; SSE-NEXT: por %xmm7, %xmm8 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm11, %xmm1 |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm1, %xmm6 |
| ; SSE-NEXT: pand %xmm15, %xmm6 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,2,1,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,0,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,5,4,7] |
| ; SSE-NEXT: packuswb %xmm6, %xmm7 |
| ; SSE-NEXT: movdqa %xmm2, %xmm6 |
| ; SSE-NEXT: pandn %xmm7, %xmm6 |
| ; SSE-NEXT: pand %xmm2, %xmm8 |
| ; SSE-NEXT: por %xmm8, %xmm6 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm15, %xmm7 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm7[2,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[2,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,0,2] |
| ; SSE-NEXT: packuswb %xmm8, %xmm7 |
| ; SSE-NEXT: movdqa %xmm14, %xmm8 |
| ; SSE-NEXT: pandn %xmm7, %xmm8 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload |
| ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm15, %xmm7 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[3,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,1,0,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm7[2,1,0,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm9, %xmm9 |
| ; SSE-NEXT: pand %xmm14, %xmm9 |
| ; SSE-NEXT: por %xmm8, %xmm9 |
| ; SSE-NEXT: movdqa %xmm11, %xmm0 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm11, %xmm7 |
| ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: por %xmm7, %xmm0 |
| ; SSE-NEXT: pand %xmm0, %xmm15 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm15[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,2,1,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,1,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,0,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,5,4,7] |
| ; SSE-NEXT: packuswb %xmm7, %xmm8 |
| ; SSE-NEXT: movdqa %xmm2, %xmm7 |
| ; SSE-NEXT: pandn %xmm8, %xmm7 |
| ; SSE-NEXT: pand %xmm2, %xmm9 |
| ; SSE-NEXT: por %xmm9, %xmm7 |
| ; SSE-NEXT: movdqa %xmm10, %xmm8 |
| ; SSE-NEXT: pxor %xmm5, %xmm5 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm5[8],xmm8[9],xmm5[9],xmm8[10],xmm5[10],xmm8[11],xmm5[11],xmm8[12],xmm5[12],xmm8[13],xmm5[13],xmm8[14],xmm5[14],xmm8[15],xmm5[15] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm5[0],xmm10[1],xmm5[1],xmm10[2],xmm5[2],xmm10[3],xmm5[3],xmm10[4],xmm5[4],xmm10[5],xmm5[5],xmm10[6],xmm5[6],xmm10[7],xmm5[7] |
| ; SSE-NEXT: movdqa %xmm10, %xmm9 |
| ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[1,0],xmm8[0,0] |
| ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[2,0],xmm8[2,3] |
| ; SSE-NEXT: psrlq $48, %xmm8 |
| ; SSE-NEXT: psrldq {{.*#+}} xmm9 = xmm9[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; SSE-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm10[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,5,7] |
| ; SSE-NEXT: packuswb %xmm9, %xmm8 |
| ; SSE-NEXT: movdqa %xmm14, %xmm10 |
| ; SSE-NEXT: pandn %xmm8, %xmm10 |
| ; SSE-NEXT: movdqa %xmm12, %xmm8 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm5[8],xmm8[9],xmm5[9],xmm8[10],xmm5[10],xmm8[11],xmm5[11],xmm8[12],xmm5[12],xmm8[13],xmm5[13],xmm8[14],xmm5[14],xmm8[15],xmm5[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[1,1,2,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm9 = [65535,0,65535,65535,0,65535,65535,65535] |
| ; SSE-NEXT: movdqa %xmm9, %xmm11 |
| ; SSE-NEXT: pandn %xmm8, %xmm11 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3],xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm12[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm8[3,1,1,2,4,5,6,7] |
| ; SSE-NEXT: pand %xmm9, %xmm12 |
| ; SSE-NEXT: por %xmm11, %xmm12 |
| ; SSE-NEXT: packuswb %xmm12, %xmm12 |
| ; SSE-NEXT: pand %xmm14, %xmm12 |
| ; SSE-NEXT: por %xmm10, %xmm12 |
| ; SSE-NEXT: movdqa %xmm13, %xmm8 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm5[8],xmm13[9],xmm5[9],xmm13[10],xmm5[10],xmm13[11],xmm5[11],xmm13[12],xmm5[12],xmm13[13],xmm5[13],xmm13[14],xmm5[14],xmm13[15],xmm5[15] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm13[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,7,4] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,65535,65535,0,65535,0,0] |
| ; SSE-NEXT: movdqa %xmm11, %xmm13 |
| ; SSE-NEXT: pandn %xmm10, %xmm13 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm5[0],xmm8[1],xmm5[1],xmm8[2],xmm5[2],xmm8[3],xmm5[3],xmm8[4],xmm5[4],xmm8[5],xmm5[5],xmm8[6],xmm5[6],xmm8[7],xmm5[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,1,1] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: pand %xmm11, %xmm8 |
| ; SSE-NEXT: por %xmm8, %xmm13 |
| ; SSE-NEXT: packuswb %xmm13, %xmm10 |
| ; SSE-NEXT: movdqa %xmm2, %xmm8 |
| ; SSE-NEXT: pandn %xmm10, %xmm8 |
| ; SSE-NEXT: pand %xmm2, %xmm12 |
| ; SSE-NEXT: por %xmm12, %xmm8 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm13, %xmm10 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm5[8],xmm10[9],xmm5[9],xmm10[10],xmm5[10],xmm10[11],xmm5[11],xmm10[12],xmm5[12],xmm10[13],xmm5[13],xmm10[14],xmm5[14],xmm10[15],xmm5[15] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm5[0],xmm13[1],xmm5[1],xmm13[2],xmm5[2],xmm13[3],xmm5[3],xmm13[4],xmm5[4],xmm13[5],xmm5[5],xmm13[6],xmm5[6],xmm13[7],xmm5[7] |
| ; SSE-NEXT: movdqa %xmm13, %xmm12 |
| ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[1,0],xmm10[0,0] |
| ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[2,0],xmm10[2,3] |
| ; SSE-NEXT: psrlq $48, %xmm10 |
| ; SSE-NEXT: psrldq {{.*#+}} xmm12 = xmm12[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; SSE-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm13[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,4,5,7] |
| ; SSE-NEXT: packuswb %xmm12, %xmm10 |
| ; SSE-NEXT: movdqa %xmm14, %xmm12 |
| ; SSE-NEXT: pandn %xmm10, %xmm12 |
| ; SSE-NEXT: movdqa %xmm4, %xmm10 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm5[8],xmm10[9],xmm5[9],xmm10[10],xmm5[10],xmm10[11],xmm5[11],xmm10[12],xmm5[12],xmm10[13],xmm5[13],xmm10[14],xmm5[14],xmm10[15],xmm5[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,2,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: movdqa %xmm9, %xmm13 |
| ; SSE-NEXT: pandn %xmm10, %xmm13 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm4[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm10[3,1,1,2,4,5,6,7] |
| ; SSE-NEXT: pand %xmm9, %xmm10 |
| ; SSE-NEXT: por %xmm13, %xmm10 |
| ; SSE-NEXT: packuswb %xmm10, %xmm10 |
| ; SSE-NEXT: pand %xmm14, %xmm10 |
| ; SSE-NEXT: movdqa %xmm14, %xmm4 |
| ; SSE-NEXT: por %xmm12, %xmm10 |
| ; SSE-NEXT: movdqa %xmm3, %xmm12 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm13 = xmm3[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm13[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,5,5,7,4] |
| ; SSE-NEXT: movdqa %xmm11, %xmm14 |
| ; SSE-NEXT: pandn %xmm13, %xmm14 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3],xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[0,3,1,1] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm12[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: pand %xmm11, %xmm12 |
| ; SSE-NEXT: por %xmm12, %xmm14 |
| ; SSE-NEXT: packuswb %xmm14, %xmm13 |
| ; SSE-NEXT: movdqa %xmm2, %xmm12 |
| ; SSE-NEXT: pandn %xmm13, %xmm12 |
| ; SSE-NEXT: pand %xmm2, %xmm10 |
| ; SSE-NEXT: por %xmm10, %xmm12 |
| ; SSE-NEXT: movdqa (%rsp), %xmm14 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm14, %xmm10 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm5[8],xmm10[9],xmm5[9],xmm10[10],xmm5[10],xmm10[11],xmm5[11],xmm10[12],xmm5[12],xmm10[13],xmm5[13],xmm10[14],xmm5[14],xmm10[15],xmm5[15] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm5[0],xmm14[1],xmm5[1],xmm14[2],xmm5[2],xmm14[3],xmm5[3],xmm14[4],xmm5[4],xmm14[5],xmm5[5],xmm14[6],xmm5[6],xmm14[7],xmm5[7] |
| ; SSE-NEXT: movdqa %xmm14, %xmm13 |
| ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[1,0],xmm10[0,0] |
| ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[2,0],xmm10[2,3] |
| ; SSE-NEXT: psrlq $48, %xmm10 |
| ; SSE-NEXT: psrldq {{.*#+}} xmm13 = xmm13[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm14[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,4,5,7] |
| ; SSE-NEXT: packuswb %xmm13, %xmm10 |
| ; SSE-NEXT: movdqa %xmm4, %xmm13 |
| ; SSE-NEXT: pandn %xmm10, %xmm13 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm3, %xmm10 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm5[8],xmm10[9],xmm5[9],xmm10[10],xmm5[10],xmm10[11],xmm5[11],xmm10[12],xmm5[12],xmm10[13],xmm5[13],xmm10[14],xmm5[14],xmm10[15],xmm5[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,2,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: movdqa %xmm9, %xmm14 |
| ; SSE-NEXT: pandn %xmm10, %xmm14 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm3[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm10[3,1,1,2,4,5,6,7] |
| ; SSE-NEXT: pand %xmm9, %xmm10 |
| ; SSE-NEXT: por %xmm14, %xmm10 |
| ; SSE-NEXT: packuswb %xmm10, %xmm10 |
| ; SSE-NEXT: pand %xmm4, %xmm10 |
| ; SSE-NEXT: por %xmm13, %xmm10 |
| ; SSE-NEXT: movdqa %xmm1, %xmm13 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm1[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm14[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,5,5,7,4] |
| ; SSE-NEXT: movdqa %xmm11, %xmm15 |
| ; SSE-NEXT: pandn %xmm14, %xmm15 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm5[0],xmm13[1],xmm5[1],xmm13[2],xmm5[2],xmm13[3],xmm5[3],xmm13[4],xmm5[4],xmm13[5],xmm5[5],xmm13[6],xmm5[6],xmm13[7],xmm5[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm13[0,3,1,1] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm13 = xmm13[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: pand %xmm11, %xmm13 |
| ; SSE-NEXT: por %xmm13, %xmm15 |
| ; SSE-NEXT: packuswb %xmm15, %xmm14 |
| ; SSE-NEXT: movdqa %xmm2, %xmm13 |
| ; SSE-NEXT: pandn %xmm14, %xmm13 |
| ; SSE-NEXT: pand %xmm2, %xmm10 |
| ; SSE-NEXT: por %xmm10, %xmm13 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm3, %xmm10 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm5[8],xmm10[9],xmm5[9],xmm10[10],xmm5[10],xmm10[11],xmm5[11],xmm10[12],xmm5[12],xmm10[13],xmm5[13],xmm10[14],xmm5[14],xmm10[15],xmm5[15] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] |
| ; SSE-NEXT: movdqa %xmm3, %xmm14 |
| ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm10[0,0] |
| ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm10[2,3] |
| ; SSE-NEXT: psrlq $48, %xmm10 |
| ; SSE-NEXT: psrldq {{.*#+}} xmm14 = xmm14[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; SSE-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm10[0],xmm14[1],xmm10[1],xmm14[2],xmm10[2],xmm14[3],xmm10[3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm3[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,0,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,4,5,7] |
| ; SSE-NEXT: packuswb %xmm14, %xmm10 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm1, %xmm14 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm5[8],xmm14[9],xmm5[9],xmm14[10],xmm5[10],xmm14[11],xmm5[11],xmm14[12],xmm5[12],xmm14[13],xmm5[13],xmm14[14],xmm5[14],xmm14[15],xmm5[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm14[1,1,2,3] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,5,5,5,5] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm15 = xmm1[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm15[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm15 = xmm15[3,1,1,2,4,5,6,7] |
| ; SSE-NEXT: pand %xmm9, %xmm15 |
| ; SSE-NEXT: pandn %xmm14, %xmm9 |
| ; SSE-NEXT: por %xmm15, %xmm9 |
| ; SSE-NEXT: packuswb %xmm9, %xmm9 |
| ; SSE-NEXT: pand %xmm4, %xmm9 |
| ; SSE-NEXT: pandn %xmm10, %xmm4 |
| ; SSE-NEXT: por %xmm4, %xmm9 |
| ; SSE-NEXT: movdqa %xmm0, %xmm4 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,1,1] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: pand %xmm11, %xmm4 |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm0[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,7,4] |
| ; SSE-NEXT: pandn %xmm5, %xmm11 |
| ; SSE-NEXT: por %xmm4, %xmm11 |
| ; SSE-NEXT: pand %xmm2, %xmm9 |
| ; SSE-NEXT: packuswb %xmm11, %xmm4 |
| ; SSE-NEXT: pandn %xmm4, %xmm2 |
| ; SSE-NEXT: por %xmm9, %xmm2 |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%r8) |
| ; SSE-NEXT: movdqa %xmm7, 16(%r9) |
| ; SSE-NEXT: movdqa %xmm6, (%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%r9) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movdqa %xmm2, 16(%rax) |
| ; SSE-NEXT: movdqa %xmm13, (%rax) |
| ; SSE-NEXT: movdqa %xmm12, 48(%rax) |
| ; SSE-NEXT: movdqa %xmm8, 32(%rax) |
| ; SSE-NEXT: addq $824, %rsp # imm = 0x338 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i8_stride6_vf64: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: subq $808, %rsp # imm = 0x328 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm3 = [0,0,0,128,128,128,4,10,0,0,0,128,128,128,4,10] |
| ; AVX1-ONLY-NEXT: # xmm3 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm5 = [0,0,0,2,8,14,128,128,0,0,0,2,8,14,128,128] |
| ; AVX1-ONLY-NEXT: # xmm5 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm2 = [0,0,4,10,0,0,4,10,0,0,4,10,0,0,4,10] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm4 = [0,6,12,0,0,6,12,0,0,6,12,0,0,6,12,0] |
| ; AVX1-ONLY-NEXT: vmovdqa 128(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm1, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, %xmm6 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, %xmm8 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa 144(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, %xmm10 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovdqa 176(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm2, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, %xmm7 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, %xmm9 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa 160(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm3, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, %xmm11 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, (%rsp) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm2, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,255,255,255,255,255,0,0,0,0,0> |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa 320(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, %xmm12 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa 336(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm3, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, %xmm13 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vmovdqa 368(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm3, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, %xmm14 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa 352(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm4, %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm4, %xmm15 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm3, %xmm2 |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm2 = [0,0,5,11,0,0,5,11,0,0,5,11,0,0,5,11] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm3 = [1,7,13,0,1,7,13,0,1,7,13,0,1,7,13,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm8, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, %xmm4 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm10, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, %xmm5 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm3 = [0,0,0,128,128,128,5,11,0,0,0,128,128,128,5,11] |
| ; AVX1-ONLY-NEXT: # xmm3 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm6 = [0,0,0,3,9,15,128,128,0,0,0,3,9,15,128,128] |
| ; AVX1-ONLY-NEXT: # xmm6 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm9, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, %xmm7 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm11, %xmm3 |
| ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm3, %xmm2 |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm12, %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm13, %xmm2 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm14, %xmm2 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm15, %xmm3 |
| ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm3, %xmm2 |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm2 = [2,8,14,0,2,8,14,0,2,8,14,0,2,8,14,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm11, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm15 = [0,0,6,12,0,0,6,12,0,0,6,12,0,0,6,12] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm12, %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm10 |
| ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm14 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm4 = [128,128,128,4,10,0,0,0,128,128,128,4,10,0,0,0] |
| ; AVX1-ONLY-NEXT: # xmm4 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm5 = [2,8,14,128,128,0,0,0,2,8,14,128,128,0,0,0] |
| ; AVX1-ONLY-NEXT: # xmm5 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm14, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm4, %xmm6 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm10, %xmm4 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm5, %xmm7 |
| ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm4, %xmm4 |
| ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm13 = <0,0,0,0,0,255,255,255,255,255,255,u,u,u,u,u> |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm13, %xmm3, %xmm4, %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa 224(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovdqa 240(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm1, %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm5, %xmm4 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm5, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0] |
| ; AVX1-ONLY-NEXT: vmovdqa 208(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vmovdqa 192(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm8, %xmm4 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm9, %xmm5 |
| ; AVX1-ONLY-NEXT: vpor %xmm4, %xmm5, %xmm4 |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm13, %xmm3, %xmm4, %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm0 = [3,9,15,0,3,9,15,0,3,9,15,0,3,9,15,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm11, %xmm5 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, %xmm11 |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm4 = [0,1,7,13,0,1,7,13,0,1,7,13,0,1,7,13] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm12, %xmm6 |
| ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm6[0],xmm5[0] |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm3 = [128,128,128,5,11,0,0,0,128,128,128,5,11,0,0,0] |
| ; AVX1-ONLY-NEXT: # xmm3 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm0 = [3,9,15,128,128,0,0,0,3,9,15,128,128,0,0,0] |
| ; AVX1-ONLY-NEXT: # xmm0 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm14, %xmm6 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm10, %xmm7 |
| ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm7, %xmm6 |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm13, %xmm5, %xmm6, %xmm5 |
| ; AVX1-ONLY-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm1, %xmm5 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm11, %xmm12 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm2, %xmm6 |
| ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm6[0],xmm5[0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm8, %xmm6 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm9, %xmm7 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm9, %xmm14 |
| ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm7, %xmm6 |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm13, %xmm5, %xmm6, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm7 = [2,8,14,0,2,8,14,0,2,8,14,0,2,8,14,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm9, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm8, %xmm5 |
| ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm5[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm2 = [0,0,0,4,10,128,128,128,0,0,0,4,10,128,128,128] |
| ; AVX1-ONLY-NEXT: # xmm2 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm3 = [0,0,0,128,128,0,6,12,0,0,0,128,128,0,6,12] |
| ; AVX1-ONLY-NEXT: # xmm3 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovdqa (%rsp), %xmm11 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm11, %xmm5 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm10, %xmm6 |
| ; AVX1-ONLY-NEXT: vpor %xmm5, %xmm6, %xmm5 |
| ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,255,255,255,255,255,255,0,0,0,0,0> |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm6, %xmm0, %xmm5, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm13, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm7, %xmm1 |
| ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm15, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm5, %xmm2 |
| ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm6, %xmm0, %xmm1, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm9, %xmm0 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm8, %xmm1 |
| ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm3 = [0,0,0,5,11,128,128,128,0,0,0,5,11,128,128,128] |
| ; AVX1-ONLY-NEXT: # xmm3 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm8 = [0,0,0,128,128,1,7,13,0,0,0,128,128,1,7,13] |
| ; AVX1-ONLY-NEXT: # xmm8 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm11, %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm8, %xmm10, %xmm2 |
| ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm6, %xmm0, %xmm1, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm13, %xmm0 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm7, %xmm1 |
| ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm15, %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm8, %xmm5, %xmm2 |
| ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm6, %xmm0, %xmm1, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm1 = [4,10,128,128,128,0,0,0,4,10,128,128,128,0,0,0] |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm5, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, %xmm9 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm2 = [128,128,0,6,12,0,0,0,128,128,0,6,12,0,0,0] |
| ; AVX1-ONLY-NEXT: # xmm2 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm13, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, %xmm6 |
| ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm1, %xmm1 |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm2 = [4,10,0,0,4,10,0,0,4,10,0,0,4,10,0,0] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm4, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, %xmm7 |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm3 = [0,2,8,14,0,2,8,14,0,2,8,14,0,2,8,14] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm12, %xmm2 |
| ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm0 = <255,255,255,255,255,0,0,0,0,0,u,u,u,u,u,u> |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, %xmm10 |
| ; AVX1-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm14, %xmm8 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm14, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm9, %xmm2 |
| ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm15, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm0, %xmm3 |
| ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm3[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm10, %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm10, %xmm7 |
| ; AVX1-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm2 = [5,11,128,128,128,0,0,0,5,11,128,128,128,0,0,0] |
| ; AVX1-ONLY-NEXT: # xmm2 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm5, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, %xmm6 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm3 = [128,128,1,7,13,0,0,0,128,128,1,7,13,0,0,0] |
| ; AVX1-ONLY-NEXT: # xmm3 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm13, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, %xmm14 |
| ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm3 = [5,11,0,0,5,11,0,0,5,11,0,0,5,11,0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm4, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, %xmm10 |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm11 = [0,3,9,15,0,3,9,15,0,3,9,15,0,3,9,15] |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm12, %xmm4 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm12, %xmm3 |
| ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm3[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm7, %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm7, %xmm12 |
| ; AVX1-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm8, %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm9, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm9, %xmm7 |
| ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm15, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm15, %xmm6 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm0, %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, %xmm5 |
| ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm3[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vpblendvb %xmm12, %xmm1, %xmm2, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm11 = [128,128,128,2,8,14,0,0,128,128,128,2,8,14,0,0] |
| ; AVX1-ONLY-NEXT: # xmm11 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovdqa 112(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm0, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm10 = [0,6,12,128,128,128,0,0,0,6,12,128,128,128,0,0] |
| ; AVX1-ONLY-NEXT: # xmm10 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovdqa 96(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm1, %xmm1 |
| ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm1, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa 80(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm12 = [0,0,0,128,128,128,4,10,0,0,0,128,128,128,4,10] |
| ; AVX1-ONLY-NEXT: # xmm12 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, %xmm14 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm15 = [0,0,0,2,8,14,128,128,0,0,0,2,8,14,128,128] |
| ; AVX1-ONLY-NEXT: # xmm15 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm2, %xmm2 |
| ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = xmm4[u,u,4,10,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm9[u,u,u,u,0,6,12,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm13, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm8, %xmm3 |
| ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm3, %xmm2 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm0[3,4,5],xmm2[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255] |
| ; AVX1-ONLY-NEXT: vandnps %ymm1, %ymm4, %ymm1 |
| ; AVX1-ONLY-NEXT: vandps %ymm4, %ymm2, %ymm2 |
| ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm2, %ymm2 |
| ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm1 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0] |
| ; AVX1-ONLY-NEXT: vandps %ymm1, %ymm2, %ymm2 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandnps %ymm3, %ymm1, %ymm3 |
| ; AVX1-ONLY-NEXT: vorps %ymm3, %ymm2, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa 304(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm0, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa 288(%rdi), %xmm13 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm13, %xmm3 |
| ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm3, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa 272(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm3, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, %xmm12 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa 256(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm3, %xmm15 |
| ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm15, %xmm0 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm5[u,u,4,10,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm15 = xmm6[u,u,u,u,0,6,12,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm15[0],xmm2[0],xmm15[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm7, %xmm11 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm3, %xmm10 |
| ; AVX1-ONLY-NEXT: vpor %xmm11, %xmm10, %xmm10 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm10[0,1,2],xmm2[3,4,5],xmm10[6,7] |
| ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm4, %ymm0 |
| ; AVX1-ONLY-NEXT: vandps %ymm4, %ymm2, %ymm2 |
| ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm2, %ymm0 |
| ; AVX1-ONLY-NEXT: vandps %ymm1, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm1, %ymm2 |
| ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm0 = [128,128,128,3,9,15,0,0,128,128,128,3,9,15,0,0] |
| ; AVX1-ONLY-NEXT: # xmm0 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm6, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm10 = [1,7,13,128,128,128,0,0,1,7,13,128,128,128,0,0] |
| ; AVX1-ONLY-NEXT: # xmm10 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm7, %xmm11 |
| ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm11, %xmm2 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm11 = xmm14[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm14[5,11] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm15 = xmm7[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero |
| ; AVX1-ONLY-NEXT: vpor %xmm11, %xmm15, %xmm11 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm11, %ymm2 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm11 = xmm5[u,u,5,11,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm15 = xmm9[u,u,u,u,1,7,13,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm11 = xmm15[0],xmm11[0],xmm15[1],xmm11[1] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm5, %xmm15 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm8, %xmm14 |
| ; AVX1-ONLY-NEXT: vpor %xmm15, %xmm14, %xmm14 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm11 = xmm14[0,1,2],xmm11[3,4,5],xmm14[6,7] |
| ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm4, %ymm2 |
| ; AVX1-ONLY-NEXT: vandps %ymm4, %ymm11, %ymm11 |
| ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm11, %ymm2 |
| ; AVX1-ONLY-NEXT: vandps %ymm1, %ymm2, %ymm2 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandnps %ymm11, %ymm1, %ymm11 |
| ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm11, %ymm2 |
| ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm8, %xmm2 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm13, %xmm11 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm13, %xmm9 |
| ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm11, %xmm2 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm11 = xmm12[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm12[5,11] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm14 = xmm5[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero |
| ; AVX1-ONLY-NEXT: vpor %xmm11, %xmm14, %xmm11 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm11, %ymm2 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm11 = xmm5[u,u,5,11,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm14 = xmm5[u,u,u,u,1,7,13,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm11 = xmm14[0],xmm11[0],xmm14[1],xmm11[1] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm5, %xmm0 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm3, %xmm10 |
| ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm10, %xmm0 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm11[3,4,5],xmm0[6,7] |
| ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm4, %ymm2 |
| ; AVX1-ONLY-NEXT: vandps %ymm4, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vandps %ymm1, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm1, %ymm1 |
| ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm0 = [128,128,128,4,10,0,0,0,128,128,128,4,10,0,0,0] |
| ; AVX1-ONLY-NEXT: # xmm0 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm6, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm14 = [2,8,14,128,128,0,0,0,2,8,14,128,128,0,0,0] |
| ; AVX1-ONLY-NEXT: # xmm14 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm13, %xmm2 |
| ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm2, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm5 = [0,0,0,4,10,128,128,128,0,0,0,4,10,128,128,128] |
| ; AVX1-ONLY-NEXT: # xmm5 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm7, %xmm15 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm7, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm3 = [0,0,0,128,128,0,6,12,0,0,0,128,128,0,6,12] |
| ; AVX1-ONLY-NEXT: # xmm3 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm12, %xmm4 |
| ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm4, %xmm2 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 |
| ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm10 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255] |
| ; AVX1-ONLY-NEXT: vandnps %ymm1, %ymm10, %ymm1 |
| ; AVX1-ONLY-NEXT: vandps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm2 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm2, %ymm1 |
| ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm11 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0] |
| ; AVX1-ONLY-NEXT: vandps %ymm1, %ymm11, %ymm1 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm11, %ymm2 |
| ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm1, %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm8, %xmm2 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm9, %xmm4 |
| ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm4, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm8, %xmm4 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm5, %xmm14 |
| ; AVX1-ONLY-NEXT: vpor %xmm4, %xmm14, %xmm4 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2 |
| ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm10, %ymm2 |
| ; AVX1-ONLY-NEXT: vandps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm4 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm4, %ymm2 |
| ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm11, %ymm2 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandnps %ymm4, %ymm11, %ymm4 |
| ; AVX1-ONLY-NEXT: vorps %ymm4, %ymm2, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm1 = [128,128,128,5,11,0,0,0,128,128,128,5,11,0,0,0] |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm6, %xmm4 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm0 = [3,9,15,128,128,0,0,0,3,9,15,128,128,0,0,0] |
| ; AVX1-ONLY-NEXT: # xmm0 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm13, %xmm14 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm13, %xmm7 |
| ; AVX1-ONLY-NEXT: vpor %xmm4, %xmm14, %xmm4 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm2 = [0,0,0,5,11,128,128,128,0,0,0,5,11,128,128,128] |
| ; AVX1-ONLY-NEXT: # xmm2 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm15, %xmm6 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm15, %xmm14 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm3 = [0,0,0,128,128,1,7,13,0,0,0,128,128,1,7,13] |
| ; AVX1-ONLY-NEXT: # xmm3 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm12, %xmm15 |
| ; AVX1-ONLY-NEXT: vpor %xmm14, %xmm15, %xmm14 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm14, %ymm4 |
| ; AVX1-ONLY-NEXT: vandnps %ymm4, %ymm10, %ymm4 |
| ; AVX1-ONLY-NEXT: vandps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm14 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vorps %ymm4, %ymm14, %ymm4 |
| ; AVX1-ONLY-NEXT: vandps %ymm4, %ymm11, %ymm4 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandnps %ymm14, %ymm11, %ymm14 |
| ; AVX1-ONLY-NEXT: vorps %ymm4, %ymm14, %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm14 = zero,zero,zero,xmm1[5,11,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm9, %xmm15 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm9, %xmm13 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpor %xmm14, %xmm15, %xmm14 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm8, %xmm15 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm5, %xmm0 |
| ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm15, %xmm0 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm10, %ymm0 |
| ; AVX1-ONLY-NEXT: vandps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm10, %ymm0 |
| ; AVX1-ONLY-NEXT: vandps %ymm0, %ymm11, %ymm0 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandnps %ymm10, %ymm11, %ymm10 |
| ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm10, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm9 = [4,10,128,128,128,0,0,0,4,10,128,128,128,0,0,0] |
| ; AVX1-ONLY-NEXT: # xmm9 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm7, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,xmm2[0,6,12,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm10, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm7 = [0,0,128,128,128,2,8,14,0,0,128,128,128,2,8,14] |
| ; AVX1-ONLY-NEXT: # xmm7 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm12, %xmm10 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm15 = [0,0,0,6,12,128,128,128,0,0,0,6,12,128,128,128] |
| ; AVX1-ONLY-NEXT: # xmm15 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm6, %xmm2 |
| ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm10, %xmm2 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm10 = [0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535] |
| ; AVX1-ONLY-NEXT: vandnps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm2 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandps %ymm0, %ymm10, %ymm0 |
| ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm8, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa (%rsp), %xmm6 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm6, %xmm4 |
| ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm4, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = xmm5[u,u,u,u,u,u,u,u,4,10,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm14 = xmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,2,8,14] |
| ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm14[1],xmm4[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3,4],xmm2[5,6,7] |
| ; AVX1-ONLY-NEXT: vandps %ymm0, %ymm11, %ymm0 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2 |
| ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm11, %ymm2 |
| ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm13, %xmm2 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm1[0,6,12,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm4, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm0, %xmm4 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm0, %xmm14 |
| ; AVX1-ONLY-NEXT: vpor %xmm4, %xmm14, %xmm4 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2 |
| ; AVX1-ONLY-NEXT: vandnps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm4 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm10, %ymm2 |
| ; AVX1-ONLY-NEXT: vorps %ymm4, %ymm2, %ymm2 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm0, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm9, %xmm4 |
| ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm4, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[u,u,u,u,u,u,u,u,4,10,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm14 = xmm15[u,u,u,u,u,u,u,u,u,u,u,u,u,2,8,14] |
| ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm14[1],xmm4[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0,1,2,3,4],xmm1[5,6,7] |
| ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm11, %ymm2 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX1-ONLY-NEXT: vandnps %ymm1, %ymm11, %ymm1 |
| ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm2, %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm1 = [5,11,128,128,128,0,0,0,5,11,128,128,128,0,0,0] |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm2, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[1,7,13,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm4, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm4 = [0,0,128,128,128,3,9,15,0,0,128,128,128,3,9,15] |
| ; AVX1-ONLY-NEXT: # xmm4 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm12, %xmm7 |
| ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm14 = [0,0,1,7,13,128,128,128,0,0,1,7,13,128,128,128] |
| ; AVX1-ONLY-NEXT: # xmm14 = mem[0,0] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm12, %xmm12 |
| ; AVX1-ONLY-NEXT: vpor %xmm7, %xmm12, %xmm7 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm7, %ymm2 |
| ; AVX1-ONLY-NEXT: vandnps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm7 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm10, %ymm2 |
| ; AVX1-ONLY-NEXT: vorps %ymm7, %ymm2, %ymm2 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm8, %xmm7 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm6, %xmm12 |
| ; AVX1-ONLY-NEXT: vpor %xmm7, %xmm12, %xmm7 |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm8 = [5,11,0,0,5,11,0,0,5,11,0,0,5,11,0,0] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm8, %xmm5, %xmm12 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm13 = xmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,3,9,15] |
| ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm13[1],xmm12[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm12[0,1,2,3,4],xmm7[5,6,7] |
| ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm11, %ymm2 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7 |
| ; AVX1-ONLY-NEXT: vandnps %ymm7, %ymm11, %ymm7 |
| ; AVX1-ONLY-NEXT: vorps %ymm7, %ymm2, %ymm2 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm3, %xmm6 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,xmm1[1,7,13,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm7, %xmm6 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm1, %xmm5 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm1, %xmm3 |
| ; AVX1-ONLY-NEXT: vpor %xmm5, %xmm3, %xmm3 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm3, %ymm3 |
| ; AVX1-ONLY-NEXT: vandnps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm5 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vandps %ymm3, %ymm10, %ymm3 |
| ; AVX1-ONLY-NEXT: vorps %ymm5, %ymm3, %ymm3 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm5, %xmm4 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm9, %xmm5 |
| ; AVX1-ONLY-NEXT: vpor %xmm4, %xmm5, %xmm4 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm8, %xmm0, %xmm5 |
| ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm15[u,u,u,u,u,u,u,u,u,u,u,u,u,3,9,15] |
| ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm6[1],xmm5[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3,4],xmm4[5,6,7] |
| ; AVX1-ONLY-NEXT: vandps %ymm3, %ymm11, %ymm3 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4 |
| ; AVX1-ONLY-NEXT: vandnps %ymm4, %ymm11, %ymm4 |
| ; AVX1-ONLY-NEXT: vorps %ymm4, %ymm3, %ymm3 |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%r9) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%r9) |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 32(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rax) |
| ; AVX1-ONLY-NEXT: addq $808, %rsp # imm = 0x328 |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i8_stride6_vf64: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: subq $360, %rsp # imm = 0x168 |
| ; AVX2-ONLY-NEXT: vmovdqa 192(%rdi), %ymm15 |
| ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %ymm5 |
| ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %ymm7 |
| ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm6 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0> |
| ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm12 = ymm0[0,1],ymm1[0,1] |
| ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm6, %ymm12, %ymm0, %ymm4 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm0 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255> |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm0, %ymm5, %ymm7, %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm0, %ymm8 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm5, (%rsp) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm10 = <0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm10, %xmm1, %xmm11 |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm1, %xmm2 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm3 = <128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm2, %xmm13 |
| ; AVX2-ONLY-NEXT: vpor %xmm11, %xmm13, %xmm11 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm13 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0] |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm13, %ymm11, %ymm9, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %ymm9 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm8, %ymm15, %ymm9, %ymm14 |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm15, %ymm8 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpshufb %xmm10, %xmm14, %xmm10 |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm14, %xmm15 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm15, %xmm3 |
| ; AVX2-ONLY-NEXT: vpor %xmm3, %xmm10, %xmm3 |
| ; AVX2-ONLY-NEXT: vmovdqa 288(%rdi), %ymm10 |
| ; AVX2-ONLY-NEXT: vmovdqa 256(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm11 = ymm0[0,1],ymm10[0,1] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm10 = ymm0[2,3],ymm10[2,3] |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm6, %ymm11, %ymm10, %ymm6 |
| ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u] |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm13, %ymm3, %ymm0, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm0 = <1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm0, %xmm1, %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm3 = <128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm2, %xmm2 |
| ; AVX2-ONLY-NEXT: vpor %xmm1, %xmm2, %xmm1 |
| ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11] |
| ; AVX2-ONLY-NEXT: vpshufb %ymm2, %ymm4, %ymm4 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm13, %ymm1, %ymm4, %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpshufb %xmm0, %xmm14, %xmm0 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm15, %xmm1 |
| ; AVX2-ONLY-NEXT: vpor %xmm0, %xmm1, %xmm0 |
| ; AVX2-ONLY-NEXT: vpshufb %ymm2, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm13, %ymm0, %ymm1, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm15 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255> |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm15, %ymm7, %ymm5, %ymm0 |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm7 = <128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm7, %xmm1, %xmm2 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm5 = <2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm5, %xmm0, %xmm3 |
| ; AVX2-ONLY-NEXT: vpor %xmm2, %xmm3, %xmm3 |
| ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm14 = [2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12] |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm4 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0> |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm4, %ymm11, %ymm12, %ymm2 |
| ; AVX2-ONLY-NEXT: vpshufb %ymm14, %ymm2, %ymm6 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm13, %ymm3, %ymm6, %ymm3 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm15, %ymm9, %ymm8, %ymm3 |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm9, %ymm12 |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm3, %xmm6 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm7, %xmm6, %xmm7 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm5, %xmm3, %xmm5 |
| ; AVX2-ONLY-NEXT: vpor %xmm7, %xmm5, %xmm5 |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm4, %ymm10, %ymm9, %ymm4 |
| ; AVX2-ONLY-NEXT: vpshufb %ymm14, %ymm4, %ymm7 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm13, %ymm5, %ymm7, %ymm14 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm5 = <128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm5, %xmm1, %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm7 = <3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm7, %xmm0, %xmm0 |
| ; AVX2-ONLY-NEXT: vpor %xmm1, %xmm0, %xmm0 |
| ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm1 = [3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13] |
| ; AVX2-ONLY-NEXT: vpshufb %ymm1, %ymm2, %ymm2 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm13, %ymm0, %ymm2, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpshufb %xmm5, %xmm6, %xmm0 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm7, %xmm3, %xmm2 |
| ; AVX2-ONLY-NEXT: vpor %xmm0, %xmm2, %xmm0 |
| ; AVX2-ONLY-NEXT: vpshufb %ymm1, %ymm4, %ymm1 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm13, %ymm0, %ymm1, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 160(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm7 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u> |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm7, %ymm0, %ymm2, %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm1 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255> |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm3 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm15, %ymm0, %ymm2, %ymm8 |
| ; AVX2-ONLY-NEXT: vmovdqa 352(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovdqa 320(%rdi), %ymm5 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm1, %ymm5, %ymm4, %ymm1 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm15, %ymm4, %ymm5, %ymm15 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm7, %ymm4, %ymm5, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqu (%rsp), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm7, %ymm12, %ymm0, %ymm7 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm0 = <0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u> |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm0, %ymm11, %ymm2, %ymm13 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm0, %ymm10, %ymm9, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm6, %xmm3, %xmm4 |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm3, %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm2, %xmm0, %xmm10 |
| ; AVX2-ONLY-NEXT: vpor %xmm4, %xmm10, %xmm4 |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0] |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm10, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpshufb %xmm6, %xmm1, %xmm4 |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm1, %xmm6 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm2, %xmm6, %xmm2 |
| ; AVX2-ONLY-NEXT: vpor %xmm4, %xmm2, %xmm2 |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm10, %ymm14, %ymm2, %ymm2 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm2, (%rsp) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm4, %xmm3, %xmm3 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm2, %xmm0, %xmm0 |
| ; AVX2-ONLY-NEXT: vpor %xmm3, %xmm0, %xmm0 |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm10, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpshufb %xmm4, %xmm1, %xmm0 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm2, %xmm6, %xmm1 |
| ; AVX2-ONLY-NEXT: vpor %xmm0, %xmm1, %xmm0 |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm10, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm8, %ymm11 |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm8, %xmm2 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm12, %xmm2, %xmm3 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm9, %xmm11, %xmm6 |
| ; AVX2-ONLY-NEXT: vpor %xmm3, %xmm6, %xmm3 |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm5, %xmm6 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm8 = <128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm8, %xmm6, %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm4 = <4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm4, %xmm5, %xmm14 |
| ; AVX2-ONLY-NEXT: vpor %xmm0, %xmm14, %xmm1 |
| ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm14 = [4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14] |
| ; AVX2-ONLY-NEXT: vpshufb %ymm14, %ymm13, %ymm0 |
| ; AVX2-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm10, %ymm0, %ymm1, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm15, %xmm0 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm12, %xmm0, %xmm1 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm9, %xmm15, %xmm9 |
| ; AVX2-ONLY-NEXT: vpor %xmm1, %xmm9, %xmm1 |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm7, %xmm9 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm8, %xmm9, %xmm8 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm4, %xmm7, %xmm4 |
| ; AVX2-ONLY-NEXT: vpor %xmm4, %xmm8, %xmm4 |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpshufb %ymm14, %ymm3, %ymm8 |
| ; AVX2-ONLY-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm8[5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm10, %ymm4, %ymm1, %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm4, %xmm2, %xmm2 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm8, %xmm11, %xmm12 |
| ; AVX2-ONLY-NEXT: vpor %xmm2, %xmm12, %xmm2 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm12 = <128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm12, %xmm6, %xmm6 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm14 = <5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm14, %xmm5, %xmm5 |
| ; AVX2-ONLY-NEXT: vpor %xmm6, %xmm5, %xmm5 |
| ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm6 = [5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15] |
| ; AVX2-ONLY-NEXT: vpshufb %ymm6, %ymm13, %ymm11 |
| ; AVX2-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm11[5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm10, %ymm5, %ymm2, %ymm2 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm4, %xmm0, %xmm0 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm8, %xmm15, %xmm4 |
| ; AVX2-ONLY-NEXT: vpor %xmm0, %xmm4, %xmm0 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm12, %xmm9, %xmm4 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm14, %xmm7, %xmm5 |
| ; AVX2-ONLY-NEXT: vpor %xmm4, %xmm5, %xmm4 |
| ; AVX2-ONLY-NEXT: vpshufb %ymm6, %ymm3, %ymm5 |
| ; AVX2-ONLY-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX2-ONLY-NEXT: vpblendvb %ymm10, %ymm4, %ymm0, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm9, %xmm4 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm5, %xmm4, %xmm6 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm7, %xmm9, %xmm8 |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm9, %ymm10 |
| ; AVX2-ONLY-NEXT: vpor %xmm6, %xmm8, %xmm6 |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6 |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendw {{.*#+}} ymm6 = ymm8[0,1,2],ymm6[3,4,5,6,7],ymm8[8,9,10],ymm6[11,12,13,14,15] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm9, %xmm8 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm5, %xmm8, %xmm5 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm7, %xmm9, %xmm7 |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm9, %ymm11 |
| ; AVX2-ONLY-NEXT: vpor %xmm5, %xmm7, %xmm5 |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5 |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendw {{.*#+}} ymm5 = ymm7[0,1,2],ymm5[3,4,5,6,7],ymm7[8,9,10],ymm5[11,12,13,14,15] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm5[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm7, %xmm4, %xmm4 |
| ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128> |
| ; AVX2-ONLY-NEXT: vpshufb %xmm9, %xmm10, %xmm10 |
| ; AVX2-ONLY-NEXT: vpor %xmm4, %xmm10, %xmm4 |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendw {{.*#+}} ymm4 = ymm10[0,1,2],ymm4[3,4,5,6,7],ymm10[8,9,10],ymm4[11,12,13,14,15] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm7, %xmm8, %xmm7 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm9, %xmm11, %xmm8 |
| ; AVX2-ONLY-NEXT: vpor %xmm7, %xmm8, %xmm7 |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7 |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7],ymm8[8,9,10],ymm7[11,12,13,14,15] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm5, 32(%rsi) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm6, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm7, 32(%rdx) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm4, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovups (%rsp), %ymm4 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm4, 32(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm4, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm4, 32(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm4, (%r8) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm1, 32(%r9) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%r9) |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm0, 32(%rax) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm2, (%rax) |
| ; AVX2-ONLY-NEXT: addq $360, %rsp # imm = 0x168 |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512F-LABEL: load_i8_stride6_vf64: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: subq $88, %rsp |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm0 = <0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u> |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm16 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535] |
| ; AVX512F-NEXT: vmovdqa64 224(%rdi), %ymm29 |
| ; AVX512F-NEXT: vmovdqa64 192(%rdi), %ymm31 |
| ; AVX512F-NEXT: vmovdqa64 %ymm16, %ymm10 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm29, %ymm31, %ymm10 |
| ; AVX512F-NEXT: vpshufb %xmm0, %xmm10, %xmm1 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm7 = <128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u> |
| ; AVX512F-NEXT: vextracti128 $1, %ymm10, %xmm11 |
| ; AVX512F-NEXT: vpshufb %xmm7, %xmm11, %xmm3 |
| ; AVX512F-NEXT: vpor %xmm1, %xmm3, %xmm1 |
| ; AVX512F-NEXT: vmovdqa64 (%rdi), %ymm28 |
| ; AVX512F-NEXT: vmovdqa64 32(%rdi), %ymm30 |
| ; AVX512F-NEXT: vmovdqa64 128(%rdi), %ymm27 |
| ; AVX512F-NEXT: vmovdqa64 160(%rdi), %ymm20 |
| ; AVX512F-NEXT: vmovdqa64 %ymm16, %ymm12 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm27, %ymm20, %ymm12 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm12, %xmm13 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10> |
| ; AVX512F-NEXT: vpshufb %xmm3, %xmm13, %xmm8 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128> |
| ; AVX512F-NEXT: vpshufb %xmm4, %xmm12, %xmm9 |
| ; AVX512F-NEXT: vpor %xmm8, %xmm9, %xmm8 |
| ; AVX512F-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8 |
| ; AVX512F-NEXT: vinserti32x4 $2, %xmm1, %zmm8, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %ymm16, %ymm14 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm30, %ymm28, %ymm14 |
| ; AVX512F-NEXT: vpshufb %xmm0, %xmm14, %xmm0 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm14, %xmm15 |
| ; AVX512F-NEXT: vpshufb %xmm7, %xmm15, %xmm1 |
| ; AVX512F-NEXT: vpor %xmm0, %xmm1, %xmm5 |
| ; AVX512F-NEXT: vmovdqa64 320(%rdi), %ymm24 |
| ; AVX512F-NEXT: vmovdqa64 352(%rdi), %ymm23 |
| ; AVX512F-NEXT: vmovdqa64 %ymm16, %ymm0 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm24, %ymm23, %ymm0 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 |
| ; AVX512F-NEXT: vpshufb %xmm3, %xmm1, %xmm3 |
| ; AVX512F-NEXT: vpshufb %xmm4, %xmm0, %xmm4 |
| ; AVX512F-NEXT: vpor %xmm3, %xmm4, %xmm3 |
| ; AVX512F-NEXT: vinserti32x4 $1, %xmm3, %ymm0, %ymm17 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = <1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u> |
| ; AVX512F-NEXT: vpshufb %xmm3, %xmm10, %xmm4 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm10 = <128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u> |
| ; AVX512F-NEXT: vpshufb %xmm10, %xmm11, %xmm11 |
| ; AVX512F-NEXT: vpor %xmm4, %xmm11, %xmm2 |
| ; AVX512F-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11> |
| ; AVX512F-NEXT: vpshufb %xmm4, %xmm13, %xmm11 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm13 = <u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128> |
| ; AVX512F-NEXT: vpshufb %xmm13, %xmm12, %xmm12 |
| ; AVX512F-NEXT: vpor %xmm11, %xmm12, %xmm2 |
| ; AVX512F-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-NEXT: vpshufb %xmm3, %xmm14, %xmm3 |
| ; AVX512F-NEXT: vpshufb %xmm10, %xmm15, %xmm10 |
| ; AVX512F-NEXT: vpor %xmm3, %xmm10, %xmm2 |
| ; AVX512F-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-NEXT: vpshufb %xmm4, %xmm1, %xmm1 |
| ; AVX512F-NEXT: vpshufb %xmm13, %xmm0, %xmm0 |
| ; AVX512F-NEXT: vporq %xmm1, %xmm0, %xmm26 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = <128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u> |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm13 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535] |
| ; AVX512F-NEXT: vmovdqa %ymm13, %ymm10 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm31, %ymm29, %ymm10 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm10, %xmm15 |
| ; AVX512F-NEXT: vpshufb %xmm1, %xmm15, %xmm0 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = <2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u> |
| ; AVX512F-NEXT: vpshufb %xmm3, %xmm10, %xmm4 |
| ; AVX512F-NEXT: vpor %xmm0, %xmm4, %xmm0 |
| ; AVX512F-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %ymm16, %ymm0 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm20, %ymm27, %ymm0 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128> |
| ; AVX512F-NEXT: vpshufb %xmm8, %xmm0, %xmm11 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm4 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12> |
| ; AVX512F-NEXT: vpshufb %xmm7, %xmm4, %xmm12 |
| ; AVX512F-NEXT: vpor %xmm11, %xmm12, %xmm2 |
| ; AVX512F-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-NEXT: vmovdqa %ymm13, %ymm11 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm28, %ymm30, %ymm11 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm11, %xmm14 |
| ; AVX512F-NEXT: vpshufb %xmm1, %xmm14, %xmm1 |
| ; AVX512F-NEXT: vpshufb %xmm3, %xmm11, %xmm3 |
| ; AVX512F-NEXT: vpor %xmm1, %xmm3, %xmm12 |
| ; AVX512F-NEXT: vmovdqa64 %ymm16, %ymm3 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm23, %ymm24, %ymm3 |
| ; AVX512F-NEXT: vpshufb %xmm8, %xmm3, %xmm8 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm3, %xmm9 |
| ; AVX512F-NEXT: vpshufb %xmm7, %xmm9, %xmm7 |
| ; AVX512F-NEXT: vpor %xmm7, %xmm8, %xmm1 |
| ; AVX512F-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = <128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u> |
| ; AVX512F-NEXT: vpshufb %xmm1, %xmm15, %xmm8 |
| ; AVX512F-NEXT: vmovdqa64 %xmm1, %xmm19 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = <3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u> |
| ; AVX512F-NEXT: vpshufb %xmm1, %xmm10, %xmm10 |
| ; AVX512F-NEXT: vmovdqa64 %xmm1, %xmm18 |
| ; AVX512F-NEXT: vpor %xmm8, %xmm10, %xmm1 |
| ; AVX512F-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128> |
| ; AVX512F-NEXT: vpshufb %xmm8, %xmm0, %xmm0 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13> |
| ; AVX512F-NEXT: vpshufb %xmm7, %xmm4, %xmm4 |
| ; AVX512F-NEXT: vpor %xmm0, %xmm4, %xmm0 |
| ; AVX512F-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-NEXT: vmovdqa 256(%rdi), %ymm0 |
| ; AVX512F-NEXT: vshufi64x2 {{.*#+}} ymm21 = ymm0[2,3],mem[2,3] |
| ; AVX512F-NEXT: vinserti32x4 $1, 288(%rdi), %ymm0, %ymm22 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm1 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0] |
| ; AVX512F-NEXT: vmovdqa %ymm1, %ymm15 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm15 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm10 = ymm15[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vmovdqa64 %ymm17, %ymm0 |
| ; AVX512F-NEXT: vpblendw {{.*#+}} ymm4 = ymm10[0,1,2],ymm0[3,4,5,6,7],ymm10[8,9,10],ymm0[11,12,13,14,15] |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-NEXT: vmovdqa64 64(%rdi), %ymm17 |
| ; AVX512F-NEXT: vshufi64x2 {{.*#+}} ymm10 = ymm17[2,3],mem[2,3] |
| ; AVX512F-NEXT: vinserti32x4 $1, 96(%rdi), %ymm17, %ymm25 |
| ; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm17 |
| ; AVX512F-NEXT: vmovdqa %ymm1, %ymm4 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm10, %ymm25, %ymm4 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[2,8,14,4,10,16,22,28,18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; AVX512F-NEXT: vpternlogq $248, %ymm2, %ymm5, %ymm0 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535] |
| ; AVX512F-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm0 # 64-byte Folded Reload |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] |
| ; AVX512F-NEXT: vpternlogq $184, %zmm0, %zmm5, %zmm17 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm15[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vinserti32x4 $1, %xmm26, %ymm0, %ymm15 |
| ; AVX512F-NEXT: vpblendw {{.*#+}} ymm15 = ymm0[0,1,2],ymm15[3,4,5,6,7],ymm0[8,9,10],ymm15[11,12,13,14,15] |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7] |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[3,9,15,5,11,17,23,29,19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; AVX512F-NEXT: vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm15 # 32-byte Folded Reload |
| ; AVX512F-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 16-byte Folded Reload |
| ; AVX512F-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 16-byte Folded Reload |
| ; AVX512F-NEXT: vpternlogq $226, %zmm2, %zmm6, %zmm15 |
| ; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm26 |
| ; AVX512F-NEXT: vpternlogq $184, %zmm15, %zmm5, %zmm26 |
| ; AVX512F-NEXT: vmovdqa64 %xmm19, %xmm0 |
| ; AVX512F-NEXT: vpshufb %xmm0, %xmm14, %xmm0 |
| ; AVX512F-NEXT: vmovdqa64 %xmm18, %xmm2 |
| ; AVX512F-NEXT: vpshufb %xmm2, %xmm11, %xmm2 |
| ; AVX512F-NEXT: vpor %xmm0, %xmm2, %xmm14 |
| ; AVX512F-NEXT: vpshufb %xmm8, %xmm3, %xmm0 |
| ; AVX512F-NEXT: vpshufb %xmm7, %xmm9, %xmm2 |
| ; AVX512F-NEXT: vpor %xmm0, %xmm2, %xmm0 |
| ; AVX512F-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm8 = <128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u> |
| ; AVX512F-NEXT: vmovdqa64 %ymm29, %ymm4 |
| ; AVX512F-NEXT: vpternlogq $226, %ymm31, %ymm16, %ymm4 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm4, %xmm11 |
| ; AVX512F-NEXT: vpshufb %xmm8, %xmm11, %xmm0 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm2 = xmm4[4,10],zero,zero,zero,xmm4[2,8,14],zero,zero,xmm4[u,u,u,u,u,u] |
| ; AVX512F-NEXT: vporq %xmm0, %xmm2, %xmm29 |
| ; AVX512F-NEXT: vmovdqa64 %ymm20, %ymm2 |
| ; AVX512F-NEXT: vpternlogq $226, %ymm27, %ymm13, %ymm2 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm0 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14> |
| ; AVX512F-NEXT: vpshufb %xmm5, %xmm0, %xmm3 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128> |
| ; AVX512F-NEXT: vpshufb %xmm6, %xmm2, %xmm7 |
| ; AVX512F-NEXT: vporq %xmm3, %xmm7, %xmm20 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm28, %ymm30, %ymm16 |
| ; AVX512F-NEXT: vmovdqa %ymm1, %ymm3 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm25, %ymm10, %ymm3 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm24, %ymm23, %ymm13 |
| ; AVX512F-NEXT: vextracti128 $1, %ymm13, %xmm7 |
| ; AVX512F-NEXT: vpshufb %xmm5, %xmm7, %xmm5 |
| ; AVX512F-NEXT: vpshufb %xmm6, %xmm13, %xmm6 |
| ; AVX512F-NEXT: vporq %xmm5, %xmm6, %xmm19 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm5 = <128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u> |
| ; AVX512F-NEXT: vpshufb %xmm5, %xmm11, %xmm6 |
| ; AVX512F-NEXT: vmovdqa64 %xmm5, %xmm23 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm11 = <5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u> |
| ; AVX512F-NEXT: vpshufb %xmm11, %xmm4, %xmm9 |
| ; AVX512F-NEXT: vporq %xmm6, %xmm9, %xmm27 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15> |
| ; AVX512F-NEXT: vpshufb %xmm9, %xmm0, %xmm0 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm15 = <u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128> |
| ; AVX512F-NEXT: vpshufb %xmm15, %xmm2, %xmm2 |
| ; AVX512F-NEXT: vpor %xmm0, %xmm2, %xmm0 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,4,10,0,6,12,18,24,30,20,26,128,128,128,128,128,128,128,128,128,128,128] |
| ; AVX512F-NEXT: vpshufb %ymm2, %ymm3, %ymm4 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm18 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255] |
| ; AVX512F-NEXT: vpternlogq $236, %ymm18, %ymm4, %ymm12 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,5,11,1,7,13,19,25,31,21,27,128,128,128,128,128,128,128,128,128,128,128] |
| ; AVX512F-NEXT: vpshufb %ymm4, %ymm3, %ymm3 |
| ; AVX512F-NEXT: vpternlogq $236, %ymm18, %ymm3, %ymm14 |
| ; AVX512F-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 16-byte Folded Reload |
| ; AVX512F-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm1 |
| ; AVX512F-NEXT: vpshufb %ymm2, %ymm1, %ymm2 |
| ; AVX512F-NEXT: vpternlogq $248, %ymm18, %ymm3, %ymm2 |
| ; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpshufb %ymm4, %ymm1, %ymm1 |
| ; AVX512F-NEXT: vmovdqa64 %ymm16, %ymm6 |
| ; AVX512F-NEXT: vextracti32x4 $1, %ymm16, %xmm3 |
| ; AVX512F-NEXT: vpshufb %xmm8, %xmm3, %xmm4 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} xmm8 = xmm6[4,10],zero,zero,zero,xmm6[2,8,14],zero,zero,xmm6[u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpor %xmm4, %xmm8, %xmm4 |
| ; AVX512F-NEXT: vpshufb %xmm9, %xmm7, %xmm7 |
| ; AVX512F-NEXT: vpshufb %xmm15, %xmm13, %xmm8 |
| ; AVX512F-NEXT: vpor %xmm7, %xmm8, %xmm7 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [0,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535] |
| ; AVX512F-NEXT: vpternlogq $226, %ymm25, %ymm8, %ymm10 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm9 = ymm10[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm9[5,6,7] |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX512F-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 16-byte Folded Reload |
| ; AVX512F-NEXT: vpternlogq $248, %ymm18, %ymm9, %ymm1 |
| ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm1 |
| ; AVX512F-NEXT: vinserti32x4 $1, %xmm19, %ymm0, %ymm5 |
| ; AVX512F-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm8 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm9 = ymm8[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm13 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0] |
| ; AVX512F-NEXT: vpternlogq $242, %ymm5, %ymm13, %ymm9 |
| ; AVX512F-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm5 |
| ; AVX512F-NEXT: vinserti32x4 $1, %xmm20, %ymm0, %ymm9 |
| ; AVX512F-NEXT: vinserti32x4 $2, %xmm29, %zmm9, %zmm9 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm15 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; AVX512F-NEXT: vpternlogq $226, %zmm9, %zmm15, %zmm4 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0] |
| ; AVX512F-NEXT: vpternlogq $184, %zmm4, %zmm9, %zmm5 |
| ; AVX512F-NEXT: vmovdqa64 %xmm23, %xmm4 |
| ; AVX512F-NEXT: vpshufb %xmm4, %xmm3, %xmm3 |
| ; AVX512F-NEXT: vpshufb %xmm11, %xmm6, %xmm4 |
| ; AVX512F-NEXT: vpor %xmm3, %xmm4, %xmm3 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm4 = ymm10[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512F-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm4[5,6,7] |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm4 |
| ; AVX512F-NEXT: vpshufb {{.*#+}} ymm7 = ymm8[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; AVX512F-NEXT: vpternlogq $242, %ymm4, %ymm13, %ymm7 |
| ; AVX512F-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm4 |
| ; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX512F-NEXT: vinserti32x4 $2, %xmm27, %zmm0, %zmm0 |
| ; AVX512F-NEXT: vpternlogq $226, %zmm0, %zmm15, %zmm3 |
| ; AVX512F-NEXT: vpternlogq $184, %zmm3, %zmm9, %zmm4 |
| ; AVX512F-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload |
| ; AVX512F-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 16-byte Folded Reload |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] |
| ; AVX512F-NEXT: vpternlogq $184, %zmm0, %zmm3, %zmm12 |
| ; AVX512F-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload |
| ; AVX512F-NEXT: vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 16-byte Folded Reload |
| ; AVX512F-NEXT: vpternlogq $184, %zmm0, %zmm3, %zmm14 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] |
| ; AVX512F-NEXT: vpternlogq $184, %zmm12, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpternlogq $184, %zmm14, %zmm0, %zmm1 |
| ; AVX512F-NEXT: vmovdqa64 %zmm17, (%rsi) |
| ; AVX512F-NEXT: vmovdqa64 %zmm26, (%rdx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm2, (%rcx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, (%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm5, (%r9) |
| ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, (%rax) |
| ; AVX512F-NEXT: addq $88, %rsp |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: load_i8_stride6_vf64: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u> |
| ; AVX512BW-NEXT: vmovdqa 224(%rdi), %ymm0 |
| ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %ymm23 |
| ; AVX512BW-NEXT: movw $18724, %r10w # imm = 0x4924 |
| ; AVX512BW-NEXT: kmovd %r10d, %k1 |
| ; AVX512BW-NEXT: vpblendmw %ymm0, %ymm23, %ymm9 {%k1} |
| ; AVX512BW-NEXT: vpshufb %xmm2, %xmm9, %xmm1 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm4 = <128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u> |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm9, %xmm12 |
| ; AVX512BW-NEXT: vpshufb %xmm4, %xmm12, %xmm3 |
| ; AVX512BW-NEXT: vpor %xmm1, %xmm3, %xmm5 |
| ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm10 |
| ; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm3 |
| ; AVX512BW-NEXT: vmovdqa 64(%rdi), %ymm6 |
| ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %ymm26 |
| ; AVX512BW-NEXT: vmovdqa 160(%rdi), %ymm1 |
| ; AVX512BW-NEXT: vpblendmw %ymm26, %ymm1, %ymm15 {%k1} |
| ; AVX512BW-NEXT: vextracti32x4 $1, %ymm15, %xmm16 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10> |
| ; AVX512BW-NEXT: vpshufb %xmm17, %xmm16, %xmm11 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm18 = <u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128> |
| ; AVX512BW-NEXT: vpshufb %xmm18, %xmm15, %xmm13 |
| ; AVX512BW-NEXT: vpor %xmm11, %xmm13, %xmm11 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11 |
| ; AVX512BW-NEXT: vinserti32x4 $2, %xmm5, %zmm11, %zmm11 |
| ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm6[2,3],mem[2,3] |
| ; AVX512BW-NEXT: vinserti128 $1, 96(%rdi), %ymm6, %ymm13 |
| ; AVX512BW-NEXT: movw $-28124, %r10w # imm = 0x9224 |
| ; AVX512BW-NEXT: kmovd %r10d, %k4 |
| ; AVX512BW-NEXT: vpblendmw %ymm5, %ymm13, %ymm19 {%k4} |
| ; AVX512BW-NEXT: vpblendmw %ymm3, %ymm10, %ymm20 {%k1} |
| ; AVX512BW-NEXT: vpshufb %xmm2, %xmm20, %xmm2 |
| ; AVX512BW-NEXT: vextracti32x4 $1, %ymm20, %xmm21 |
| ; AVX512BW-NEXT: vpshufb %xmm4, %xmm21, %xmm4 |
| ; AVX512BW-NEXT: vpor %xmm2, %xmm4, %xmm2 |
| ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm6 = [0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10] |
| ; AVX512BW-NEXT: movl $4192256, %r10d # imm = 0x3FF800 |
| ; AVX512BW-NEXT: kmovd %r10d, %k2 |
| ; AVX512BW-NEXT: vpshufb %ymm6, %ymm19, %ymm2 {%k2} |
| ; AVX512BW-NEXT: vmovdqu16 %zmm11, %zmm2 {%k2} |
| ; AVX512BW-NEXT: vmovdqa 256(%rdi), %ymm11 |
| ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm11[2,3],mem[2,3] |
| ; AVX512BW-NEXT: vinserti128 $1, 288(%rdi), %ymm11, %ymm14 |
| ; AVX512BW-NEXT: vpblendmw %ymm4, %ymm14, %ymm22 {%k4} |
| ; AVX512BW-NEXT: vpshufb %ymm6, %ymm22, %ymm7 |
| ; AVX512BW-NEXT: vmovdqa 320(%rdi), %ymm11 |
| ; AVX512BW-NEXT: vmovdqa 352(%rdi), %ymm6 |
| ; AVX512BW-NEXT: vpblendmw %ymm11, %ymm6, %ymm24 {%k1} |
| ; AVX512BW-NEXT: vextracti32x4 $1, %ymm24, %xmm25 |
| ; AVX512BW-NEXT: vpshufb %xmm17, %xmm25, %xmm17 |
| ; AVX512BW-NEXT: vpshufb %xmm18, %xmm24, %xmm18 |
| ; AVX512BW-NEXT: vporq %xmm17, %xmm18, %xmm17 |
| ; AVX512BW-NEXT: vinserti32x4 $1, %xmm17, %ymm0, %ymm8 |
| ; AVX512BW-NEXT: vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15] |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7 |
| ; AVX512BW-NEXT: movabsq $-8796093022208, %rdi # imm = 0xFFFFF80000000000 |
| ; AVX512BW-NEXT: kmovq %rdi, %k3 |
| ; AVX512BW-NEXT: vmovdqu8 %zmm7, %zmm2 {%k3} |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm7 = <1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u> |
| ; AVX512BW-NEXT: vpshufb %xmm7, %xmm9, %xmm8 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm9 = <128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u> |
| ; AVX512BW-NEXT: vpshufb %xmm9, %xmm12, %xmm12 |
| ; AVX512BW-NEXT: vpor %xmm8, %xmm12, %xmm8 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11> |
| ; AVX512BW-NEXT: vpshufb %xmm12, %xmm16, %xmm16 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128> |
| ; AVX512BW-NEXT: vpshufb %xmm17, %xmm15, %xmm15 |
| ; AVX512BW-NEXT: vporq %xmm16, %xmm15, %xmm15 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm15, %ymm0, %ymm15 |
| ; AVX512BW-NEXT: vinserti32x4 $2, %xmm8, %zmm15, %zmm8 |
| ; AVX512BW-NEXT: vpshufb %xmm7, %xmm20, %xmm7 |
| ; AVX512BW-NEXT: vpshufb %xmm9, %xmm21, %xmm9 |
| ; AVX512BW-NEXT: vpor %xmm7, %xmm9, %xmm9 |
| ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm7 = [1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11] |
| ; AVX512BW-NEXT: vpshufb %ymm7, %ymm19, %ymm9 {%k2} |
| ; AVX512BW-NEXT: vmovdqu16 %zmm8, %zmm9 {%k2} |
| ; AVX512BW-NEXT: vpshufb %ymm7, %ymm22, %ymm7 |
| ; AVX512BW-NEXT: vpshufb %xmm12, %xmm25, %xmm8 |
| ; AVX512BW-NEXT: vpshufb %xmm17, %xmm24, %xmm12 |
| ; AVX512BW-NEXT: vpor %xmm8, %xmm12, %xmm8 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8 |
| ; AVX512BW-NEXT: vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15] |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7 |
| ; AVX512BW-NEXT: vmovdqu8 %zmm7, %zmm9 {%k3} |
| ; AVX512BW-NEXT: vpblendmw %ymm13, %ymm5, %ymm15 {%k4} |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm7 = <128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u> |
| ; AVX512BW-NEXT: movw $9362, %di # imm = 0x2492 |
| ; AVX512BW-NEXT: kmovd %edi, %k2 |
| ; AVX512BW-NEXT: vpblendmw %ymm10, %ymm3, %ymm8 {%k2} |
| ; AVX512BW-NEXT: vextracti32x4 $1, %ymm8, %xmm16 |
| ; AVX512BW-NEXT: vpshufb %xmm7, %xmm16, %xmm12 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = <2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u> |
| ; AVX512BW-NEXT: vpshufb %xmm17, %xmm8, %xmm18 |
| ; AVX512BW-NEXT: vporq %xmm12, %xmm18, %xmm18 |
| ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm19 = [2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12] |
| ; AVX512BW-NEXT: movl $2095104, %edi # imm = 0x1FF800 |
| ; AVX512BW-NEXT: kmovd %edi, %k5 |
| ; AVX512BW-NEXT: vpshufb %ymm19, %ymm15, %ymm18 {%k5} |
| ; AVX512BW-NEXT: vpblendmw %ymm23, %ymm0, %ymm20 {%k2} |
| ; AVX512BW-NEXT: vextracti32x4 $1, %ymm20, %xmm21 |
| ; AVX512BW-NEXT: vpshufb %xmm7, %xmm21, %xmm7 |
| ; AVX512BW-NEXT: vpshufb %xmm17, %xmm20, %xmm12 |
| ; AVX512BW-NEXT: vpor %xmm7, %xmm12, %xmm7 |
| ; AVX512BW-NEXT: vpblendmw %ymm1, %ymm26, %ymm17 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm22 = <u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128> |
| ; AVX512BW-NEXT: vpshufb %xmm22, %xmm17, %xmm12 |
| ; AVX512BW-NEXT: vextracti32x4 $1, %ymm17, %xmm24 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm25 = <u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12> |
| ; AVX512BW-NEXT: vpshufb %xmm25, %xmm24, %xmm27 |
| ; AVX512BW-NEXT: vporq %xmm12, %xmm27, %xmm12 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12 |
| ; AVX512BW-NEXT: vinserti32x4 $2, %xmm7, %zmm12, %zmm12 |
| ; AVX512BW-NEXT: movl $2097151, %edi # imm = 0x1FFFFF |
| ; AVX512BW-NEXT: kmovq %rdi, %k6 |
| ; AVX512BW-NEXT: vmovdqu8 %zmm18, %zmm12 {%k6} |
| ; AVX512BW-NEXT: vpblendmw %ymm14, %ymm4, %ymm7 {%k4} |
| ; AVX512BW-NEXT: vpblendmw %ymm6, %ymm11, %ymm18 {%k1} |
| ; AVX512BW-NEXT: vpshufb %xmm22, %xmm18, %xmm22 |
| ; AVX512BW-NEXT: vextracti32x4 $1, %ymm18, %xmm27 |
| ; AVX512BW-NEXT: vpshufb %xmm25, %xmm27, %xmm25 |
| ; AVX512BW-NEXT: vporq %xmm22, %xmm25, %xmm22 |
| ; AVX512BW-NEXT: vinserti32x4 $1, %xmm22, %ymm0, %ymm22 |
| ; AVX512BW-NEXT: vpshufb %ymm19, %ymm7, %ymm22 {%k5} |
| ; AVX512BW-NEXT: vinserti64x4 $1, %ymm22, %zmm0, %zmm19 |
| ; AVX512BW-NEXT: vmovdqu8 %zmm19, %zmm12 {%k3} |
| ; AVX512BW-NEXT: movw $9289, %di # imm = 0x2449 |
| ; AVX512BW-NEXT: kmovd %edi, %k4 |
| ; AVX512BW-NEXT: vmovdqu16 %ymm14, %ymm4 {%k4} |
| ; AVX512BW-NEXT: vmovdqu16 %ymm13, %ymm5 {%k4} |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm13 = <128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u> |
| ; AVX512BW-NEXT: vpshufb %xmm13, %xmm16, %xmm14 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm16 = <3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u> |
| ; AVX512BW-NEXT: vpshufb %xmm16, %xmm8, %xmm8 |
| ; AVX512BW-NEXT: vpor %xmm14, %xmm8, %xmm8 |
| ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm14 = [3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13] |
| ; AVX512BW-NEXT: vpshufb %ymm14, %ymm15, %ymm8 {%k5} |
| ; AVX512BW-NEXT: vpshufb %xmm13, %xmm21, %xmm13 |
| ; AVX512BW-NEXT: vpshufb %xmm16, %xmm20, %xmm15 |
| ; AVX512BW-NEXT: vpor %xmm13, %xmm15, %xmm13 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm15 = <u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128> |
| ; AVX512BW-NEXT: vpshufb %xmm15, %xmm17, %xmm16 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13> |
| ; AVX512BW-NEXT: vpshufb %xmm17, %xmm24, %xmm19 |
| ; AVX512BW-NEXT: vporq %xmm16, %xmm19, %xmm16 |
| ; AVX512BW-NEXT: vinserti32x4 $1, %xmm16, %ymm0, %ymm16 |
| ; AVX512BW-NEXT: vinserti32x4 $2, %xmm13, %zmm16, %zmm13 |
| ; AVX512BW-NEXT: vmovdqu8 %zmm8, %zmm13 {%k6} |
| ; AVX512BW-NEXT: vpshufb %xmm15, %xmm18, %xmm8 |
| ; AVX512BW-NEXT: vpshufb %xmm17, %xmm27, %xmm15 |
| ; AVX512BW-NEXT: vpor %xmm8, %xmm15, %xmm8 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8 |
| ; AVX512BW-NEXT: vpshufb %ymm14, %ymm7, %ymm8 {%k5} |
| ; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm7 |
| ; AVX512BW-NEXT: vmovdqu8 %zmm7, %zmm13 {%k3} |
| ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm7 = [4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14,4,10,0,6,12,2,8,14] |
| ; AVX512BW-NEXT: vpshufb %ymm7, %ymm5, %ymm8 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm14 = <128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u> |
| ; AVX512BW-NEXT: vmovdqu16 %ymm10, %ymm3 {%k1} |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm3, %xmm15 |
| ; AVX512BW-NEXT: vpshufb %xmm14, %xmm15, %xmm10 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm16 = <4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u> |
| ; AVX512BW-NEXT: vpshufb %xmm16, %xmm3, %xmm17 |
| ; AVX512BW-NEXT: vporq %xmm10, %xmm17, %xmm10 |
| ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm8[5,6,7] |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX512BW-NEXT: vmovdqu16 %ymm23, %ymm0 {%k1} |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm8 |
| ; AVX512BW-NEXT: vpshufb %xmm14, %xmm8, %xmm14 |
| ; AVX512BW-NEXT: vpshufb %xmm16, %xmm0, %xmm16 |
| ; AVX512BW-NEXT: vporq %xmm14, %xmm16, %xmm14 |
| ; AVX512BW-NEXT: vmovdqu16 %ymm26, %ymm1 {%k2} |
| ; AVX512BW-NEXT: vextracti32x4 $1, %ymm1, %xmm16 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14> |
| ; AVX512BW-NEXT: vpshufb %xmm17, %xmm16, %xmm18 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm19 = <u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128> |
| ; AVX512BW-NEXT: vpshufb %xmm19, %xmm1, %xmm20 |
| ; AVX512BW-NEXT: vporq %xmm18, %xmm20, %xmm18 |
| ; AVX512BW-NEXT: vinserti32x4 $1, %xmm18, %ymm0, %ymm18 |
| ; AVX512BW-NEXT: vinserti32x4 $2, %xmm14, %zmm18, %zmm14 |
| ; AVX512BW-NEXT: movabsq $4398044413952, %rdi # imm = 0x3FFFFE00000 |
| ; AVX512BW-NEXT: kmovq %rdi, %k1 |
| ; AVX512BW-NEXT: vmovdqu8 %zmm14, %zmm10 {%k1} |
| ; AVX512BW-NEXT: vpshufb %ymm7, %ymm4, %ymm7 |
| ; AVX512BW-NEXT: vmovdqu16 %ymm11, %ymm6 {%k2} |
| ; AVX512BW-NEXT: vextracti128 $1, %ymm6, %xmm11 |
| ; AVX512BW-NEXT: vpshufb %xmm17, %xmm11, %xmm14 |
| ; AVX512BW-NEXT: vpshufb %xmm19, %xmm6, %xmm17 |
| ; AVX512BW-NEXT: vporq %xmm14, %xmm17, %xmm14 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14 |
| ; AVX512BW-NEXT: movl $-2097152, %edi # imm = 0xFFE00000 |
| ; AVX512BW-NEXT: kmovd %edi, %k2 |
| ; AVX512BW-NEXT: vmovdqu8 %ymm14, %ymm7 {%k2} |
| ; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm7 |
| ; AVX512BW-NEXT: vmovdqu16 %zmm7, %zmm10 {%k2} |
| ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm7 = [5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15,5,11,1,7,13,3,9,15] |
| ; AVX512BW-NEXT: vpshufb %ymm7, %ymm5, %ymm5 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm14 = <128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u> |
| ; AVX512BW-NEXT: vpshufb %xmm14, %xmm15, %xmm15 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} xmm17 = <5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u> |
| ; AVX512BW-NEXT: vpshufb %xmm17, %xmm3, %xmm3 |
| ; AVX512BW-NEXT: vpor %xmm3, %xmm15, %xmm3 |
| ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm5[5,6,7] |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7] |
| ; AVX512BW-NEXT: vpshufb %xmm14, %xmm8, %xmm5 |
| ; AVX512BW-NEXT: vpshufb %xmm17, %xmm0, %xmm0 |
| ; AVX512BW-NEXT: vpor %xmm5, %xmm0, %xmm0 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15> |
| ; AVX512BW-NEXT: vpshufb %xmm5, %xmm16, %xmm8 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm14 = <u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128> |
| ; AVX512BW-NEXT: vpshufb %xmm14, %xmm1, %xmm1 |
| ; AVX512BW-NEXT: vpor %xmm1, %xmm8, %xmm1 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm3 {%k1} |
| ; AVX512BW-NEXT: vpshufb %ymm7, %ymm4, %ymm0 |
| ; AVX512BW-NEXT: vpshufb %xmm5, %xmm11, %xmm1 |
| ; AVX512BW-NEXT: vpshufb %xmm14, %xmm6, %xmm4 |
| ; AVX512BW-NEXT: vpor %xmm1, %xmm4, %xmm1 |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX512BW-NEXT: vmovdqu8 %ymm1, %ymm0 {%k2} |
| ; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm3 {%k2} |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, (%rsi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, (%rdx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm12, (%rcx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm13, (%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm10, (%r9) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm3, (%rax) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %wide.vec = load <384 x i8>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <384 x i8> %wide.vec, <384 x i8> poison, <64 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42, i32 48, i32 54, i32 60, i32 66, i32 72, i32 78, i32 84, i32 90, i32 96, i32 102, i32 108, i32 114, i32 120, i32 126, i32 132, i32 138, i32 144, i32 150, i32 156, i32 162, i32 168, i32 174, i32 180, i32 186, i32 192, i32 198, i32 204, i32 210, i32 216, i32 222, i32 228, i32 234, i32 240, i32 246, i32 252, i32 258, i32 264, i32 270, i32 276, i32 282, i32 288, i32 294, i32 300, i32 306, i32 312, i32 318, i32 324, i32 330, i32 336, i32 342, i32 348, i32 354, i32 360, i32 366, i32 372, i32 378> |
| %strided.vec1 = shufflevector <384 x i8> %wide.vec, <384 x i8> poison, <64 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43, i32 49, i32 55, i32 61, i32 67, i32 73, i32 79, i32 85, i32 91, i32 97, i32 103, i32 109, i32 115, i32 121, i32 127, i32 133, i32 139, i32 145, i32 151, i32 157, i32 163, i32 169, i32 175, i32 181, i32 187, i32 193, i32 199, i32 205, i32 211, i32 217, i32 223, i32 229, i32 235, i32 241, i32 247, i32 253, i32 259, i32 265, i32 271, i32 277, i32 283, i32 289, i32 295, i32 301, i32 307, i32 313, i32 319, i32 325, i32 331, i32 337, i32 343, i32 349, i32 355, i32 361, i32 367, i32 373, i32 379> |
| %strided.vec2 = shufflevector <384 x i8> %wide.vec, <384 x i8> poison, <64 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44, i32 50, i32 56, i32 62, i32 68, i32 74, i32 80, i32 86, i32 92, i32 98, i32 104, i32 110, i32 116, i32 122, i32 128, i32 134, i32 140, i32 146, i32 152, i32 158, i32 164, i32 170, i32 176, i32 182, i32 188, i32 194, i32 200, i32 206, i32 212, i32 218, i32 224, i32 230, i32 236, i32 242, i32 248, i32 254, i32 260, i32 266, i32 272, i32 278, i32 284, i32 290, i32 296, i32 302, i32 308, i32 314, i32 320, i32 326, i32 332, i32 338, i32 344, i32 350, i32 356, i32 362, i32 368, i32 374, i32 380> |
| %strided.vec3 = shufflevector <384 x i8> %wide.vec, <384 x i8> poison, <64 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45, i32 51, i32 57, i32 63, i32 69, i32 75, i32 81, i32 87, i32 93, i32 99, i32 105, i32 111, i32 117, i32 123, i32 129, i32 135, i32 141, i32 147, i32 153, i32 159, i32 165, i32 171, i32 177, i32 183, i32 189, i32 195, i32 201, i32 207, i32 213, i32 219, i32 225, i32 231, i32 237, i32 243, i32 249, i32 255, i32 261, i32 267, i32 273, i32 279, i32 285, i32 291, i32 297, i32 303, i32 309, i32 315, i32 321, i32 327, i32 333, i32 339, i32 345, i32 351, i32 357, i32 363, i32 369, i32 375, i32 381> |
| %strided.vec4 = shufflevector <384 x i8> %wide.vec, <384 x i8> poison, <64 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46, i32 52, i32 58, i32 64, i32 70, i32 76, i32 82, i32 88, i32 94, i32 100, i32 106, i32 112, i32 118, i32 124, i32 130, i32 136, i32 142, i32 148, i32 154, i32 160, i32 166, i32 172, i32 178, i32 184, i32 190, i32 196, i32 202, i32 208, i32 214, i32 220, i32 226, i32 232, i32 238, i32 244, i32 250, i32 256, i32 262, i32 268, i32 274, i32 280, i32 286, i32 292, i32 298, i32 304, i32 310, i32 316, i32 322, i32 328, i32 334, i32 340, i32 346, i32 352, i32 358, i32 364, i32 370, i32 376, i32 382> |
| %strided.vec5 = shufflevector <384 x i8> %wide.vec, <384 x i8> poison, <64 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47, i32 53, i32 59, i32 65, i32 71, i32 77, i32 83, i32 89, i32 95, i32 101, i32 107, i32 113, i32 119, i32 125, i32 131, i32 137, i32 143, i32 149, i32 155, i32 161, i32 167, i32 173, i32 179, i32 185, i32 191, i32 197, i32 203, i32 209, i32 215, i32 221, i32 227, i32 233, i32 239, i32 245, i32 251, i32 257, i32 263, i32 269, i32 275, i32 281, i32 287, i32 293, i32 299, i32 305, i32 311, i32 317, i32 323, i32 329, i32 335, i32 341, i32 347, i32 353, i32 359, i32 365, i32 371, i32 377, i32 383> |
| store <64 x i8> %strided.vec0, ptr %out.vec0, align 64 |
| store <64 x i8> %strided.vec1, ptr %out.vec1, align 64 |
| store <64 x i8> %strided.vec2, ptr %out.vec2, align 64 |
| store <64 x i8> %strided.vec3, ptr %out.vec3, align 64 |
| store <64 x i8> %strided.vec4, ptr %out.vec4, align 64 |
| store <64 x i8> %strided.vec5, ptr %out.vec5, align 64 |
| ret void |
| } |
| ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
| ; AVX1: {{.*}} |
| ; AVX2-FAST: {{.*}} |
| ; AVX2-FAST-PERLANE: {{.*}} |
| ; AVX2-SLOW: {{.*}} |
| ; AVX512: {{.*}} |
| ; AVX512BW-FAST: {{.*}} |
| ; AVX512BW-ONLY-FAST: {{.*}} |
| ; AVX512BW-ONLY-SLOW: {{.*}} |
| ; AVX512BW-SLOW: {{.*}} |
| ; AVX512DQ-FAST: {{.*}} |
| ; AVX512DQ-SLOW: {{.*}} |
| ; AVX512DQBW-FAST: {{.*}} |
| ; AVX512DQBW-SLOW: {{.*}} |
| ; AVX512F-FAST: {{.*}} |
| ; AVX512F-ONLY-FAST: {{.*}} |
| ; AVX512F-ONLY-SLOW: {{.*}} |
| ; AVX512F-SLOW: {{.*}} |
| ; FALLBACK0: {{.*}} |
| ; FALLBACK1: {{.*}} |
| ; FALLBACK10: {{.*}} |
| ; FALLBACK11: {{.*}} |
| ; FALLBACK12: {{.*}} |
| ; FALLBACK2: {{.*}} |
| ; FALLBACK3: {{.*}} |
| ; FALLBACK4: {{.*}} |
| ; FALLBACK5: {{.*}} |
| ; FALLBACK6: {{.*}} |
| ; FALLBACK7: {{.*}} |
| ; FALLBACK8: {{.*}} |
| ; FALLBACK9: {{.*}} |