| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12 |
| |
| ; These patterns are produced by LoopVectorizer for interleaved loads. |
| |
| define void @load_i8_stride4_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind { |
| ; SSE-LABEL: load_i8_stride4_vf2: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero |
| ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0] |
| ; SSE-NEXT: pand %xmm0, %xmm1 |
| ; SSE-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE-NEXT: pxor %xmm2, %xmm2 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm2, %xmm2 |
| ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 |
| ; SSE-NEXT: packuswb %xmm3, %xmm3 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: movd %xmm1, %eax |
| ; SSE-NEXT: movw %ax, (%rsi) |
| ; SSE-NEXT: movd %xmm2, %eax |
| ; SSE-NEXT: movw %ax, (%rdx) |
| ; SSE-NEXT: movd %xmm3, %eax |
| ; SSE-NEXT: movw %ax, (%rcx) |
| ; SSE-NEXT: movd %xmm0, %eax |
| ; SSE-NEXT: movw %ax, (%r8) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-LABEL: load_i8_stride4_vf2: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero |
| ; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,4,u,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-NEXT: vpextrw $0, %xmm1, (%rsi) |
| ; AVX1-NEXT: vpextrw $0, %xmm2, (%rdx) |
| ; AVX1-NEXT: vpextrw $0, %xmm3, (%rcx) |
| ; AVX1-NEXT: vpextrw $0, %xmm0, (%r8) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX512-LABEL: load_i8_stride4_vf2: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero |
| ; AVX512-NEXT: vpmovdb %xmm0, %xmm1 |
| ; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512-NEXT: vpextrw $0, %xmm1, (%rsi) |
| ; AVX512-NEXT: vpextrw $0, %xmm2, (%rdx) |
| ; AVX512-NEXT: vpextrw $0, %xmm3, (%rcx) |
| ; AVX512-NEXT: vpextrw $0, %xmm0, (%r8) |
| ; AVX512-NEXT: retq |
| %wide.vec = load <8 x i8>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <8 x i8> %wide.vec, <8 x i8> poison, <2 x i32> <i32 0, i32 4> |
| %strided.vec1 = shufflevector <8 x i8> %wide.vec, <8 x i8> poison, <2 x i32> <i32 1, i32 5> |
| %strided.vec2 = shufflevector <8 x i8> %wide.vec, <8 x i8> poison, <2 x i32> <i32 2, i32 6> |
| %strided.vec3 = shufflevector <8 x i8> %wide.vec, <8 x i8> poison, <2 x i32> <i32 3, i32 7> |
| store <2 x i8> %strided.vec0, ptr %out.vec0, align 64 |
| store <2 x i8> %strided.vec1, ptr %out.vec1, align 64 |
| store <2 x i8> %strided.vec2, ptr %out.vec2, align 64 |
| store <2 x i8> %strided.vec3, ptr %out.vec3, align 64 |
| ret void |
| } |
| |
| define void @load_i8_stride4_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind { |
| ; SSE-LABEL: load_i8_stride4_vf4: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,0,255,0,255,0] |
| ; SSE-NEXT: pand %xmm1, %xmm0 |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: pxor %xmm2, %xmm2 |
| ; SSE-NEXT: movdqa %xmm1, %xmm3 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm1[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] |
| ; SSE-NEXT: packuswb %xmm2, %xmm2 |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm5[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,0,3,2,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm4, %xmm4 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] |
| ; SSE-NEXT: packuswb %xmm1, %xmm1 |
| ; SSE-NEXT: movd %xmm0, (%rsi) |
| ; SSE-NEXT: movd %xmm2, (%rdx) |
| ; SSE-NEXT: movd %xmm4, (%rcx) |
| ; SSE-NEXT: movd %xmm1, (%r8) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-LABEL: load_i8_stride4_vf4: |
| ; AVX1: # %bb.0: |
| ; AVX1-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX1-NEXT: vmovd %xmm1, (%rsi) |
| ; AVX1-NEXT: vmovd %xmm2, (%rdx) |
| ; AVX1-NEXT: vmovd %xmm3, (%rcx) |
| ; AVX1-NEXT: vmovd %xmm0, (%r8) |
| ; AVX1-NEXT: retq |
| ; |
| ; AVX512-LABEL: load_i8_stride4_vf4: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u] |
| ; AVX512-NEXT: vpmovdb %xmm0, (%rsi) |
| ; AVX512-NEXT: vmovd %xmm1, (%rdx) |
| ; AVX512-NEXT: vmovd %xmm2, (%rcx) |
| ; AVX512-NEXT: vmovd %xmm3, (%r8) |
| ; AVX512-NEXT: retq |
| %wide.vec = load <16 x i8>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <16 x i8> %wide.vec, <16 x i8> poison, <4 x i32> <i32 0, i32 4, i32 8, i32 12> |
| %strided.vec1 = shufflevector <16 x i8> %wide.vec, <16 x i8> poison, <4 x i32> <i32 1, i32 5, i32 9, i32 13> |
| %strided.vec2 = shufflevector <16 x i8> %wide.vec, <16 x i8> poison, <4 x i32> <i32 2, i32 6, i32 10, i32 14> |
| %strided.vec3 = shufflevector <16 x i8> %wide.vec, <16 x i8> poison, <4 x i32> <i32 3, i32 7, i32 11, i32 15> |
| store <4 x i8> %strided.vec0, ptr %out.vec0, align 64 |
| store <4 x i8> %strided.vec1, ptr %out.vec1, align 64 |
| store <4 x i8> %strided.vec2, ptr %out.vec2, align 64 |
| store <4 x i8> %strided.vec3, ptr %out.vec3, align 64 |
| ret void |
| } |
| |
| define void @load_i8_stride4_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind { |
| ; SSE-LABEL: load_i8_stride4_vf8: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE-NEXT: movdqa 16(%rdi), %xmm4 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,0,255,0,255,0] |
| ; SSE-NEXT: movdqa %xmm4, %xmm2 |
| ; SSE-NEXT: pand %xmm0, %xmm2 |
| ; SSE-NEXT: pand %xmm1, %xmm0 |
| ; SSE-NEXT: packuswb %xmm2, %xmm0 |
| ; SSE-NEXT: packuswb %xmm0, %xmm0 |
| ; SSE-NEXT: pxor %xmm7, %xmm7 |
| ; SSE-NEXT: movdqa %xmm4, %xmm2 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm3[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm4, %xmm3 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3],xmm3[4],xmm7[4],xmm3[5],xmm7[5],xmm3[6],xmm7[6],xmm3[7],xmm7[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm6[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm5[0],xmm8[1],xmm5[1] |
| ; SSE-NEXT: movdqa %xmm1, %xmm5 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm7[8],xmm5[9],xmm7[9],xmm5[10],xmm7[10],xmm5[11],xmm7[11],xmm5[12],xmm7[12],xmm5[13],xmm7[13],xmm5[14],xmm7[14],xmm5[15],xmm7[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm6[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm1, %xmm6 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1] |
| ; SSE-NEXT: packuswb %xmm8, %xmm7 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,3,2,3] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255] |
| ; SSE-NEXT: pand %xmm8, %xmm4 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4] |
| ; SSE-NEXT: pand %xmm8, %xmm1 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm4, %xmm1 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm5[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm6[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] |
| ; SSE-NEXT: packuswb %xmm3, %xmm4 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,3,2,3] |
| ; SSE-NEXT: movq %xmm0, (%rsi) |
| ; SSE-NEXT: movq %xmm7, (%rdx) |
| ; SSE-NEXT: movq %xmm1, (%rcx) |
| ; SSE-NEXT: movq %xmm2, (%r8) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i8_stride4_vf8: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm0 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm2, %xmm3 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm1, %xmm0 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm3 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm2, %xmm4 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm1, %xmm3 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm4 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm2, %xmm5 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm1, %xmm4 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm5 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm2, %xmm2 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm1, %xmm1 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vmovq %xmm0, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovq %xmm3, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovq %xmm4, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovq %xmm1, (%r8) |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i8_stride4_vf8: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm0 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa 16(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm0, %xmm2, %xmm3 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm0, %xmm1, %xmm0 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm2, %xmm4 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm1, %xmm3 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm4 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm4, %xmm2, %xmm5 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm4, %xmm1, %xmm4 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm5 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm5, %xmm2, %xmm2 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm5, %xmm1, %xmm1 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vmovq %xmm0, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovq %xmm3, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovq %xmm4, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovq %xmm1, (%r8) |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512-LABEL: load_i8_stride4_vf8: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovdqa (%rdi), %ymm0 |
| ; AVX512-NEXT: vpsrld $8, %ymm0, %ymm1 |
| ; AVX512-NEXT: vpsrld $16, %ymm0, %ymm2 |
| ; AVX512-NEXT: vpsrld $24, %ymm0, %ymm3 |
| ; AVX512-NEXT: vpmovdb %ymm0, (%rsi) |
| ; AVX512-NEXT: vpmovdb %ymm1, (%rdx) |
| ; AVX512-NEXT: vpmovdb %ymm2, (%rcx) |
| ; AVX512-NEXT: vpmovdb %ymm3, (%r8) |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %wide.vec = load <32 x i8>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <32 x i8> %wide.vec, <32 x i8> poison, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28> |
| %strided.vec1 = shufflevector <32 x i8> %wide.vec, <32 x i8> poison, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29> |
| %strided.vec2 = shufflevector <32 x i8> %wide.vec, <32 x i8> poison, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30> |
| %strided.vec3 = shufflevector <32 x i8> %wide.vec, <32 x i8> poison, <8 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31> |
| store <8 x i8> %strided.vec0, ptr %out.vec0, align 64 |
| store <8 x i8> %strided.vec1, ptr %out.vec1, align 64 |
| store <8 x i8> %strided.vec2, ptr %out.vec2, align 64 |
| store <8 x i8> %strided.vec3, ptr %out.vec3, align 64 |
| ret void |
| } |
| |
| define void @load_i8_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind { |
| ; SSE-LABEL: load_i8_stride4_vf16: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movdqa (%rdi), %xmm1 |
| ; SSE-NEXT: movdqa 16(%rdi), %xmm2 |
| ; SSE-NEXT: movdqa 32(%rdi), %xmm6 |
| ; SSE-NEXT: movdqa 48(%rdi), %xmm11 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,0,255,0,255,0] |
| ; SSE-NEXT: movdqa %xmm11, %xmm3 |
| ; SSE-NEXT: pand %xmm0, %xmm3 |
| ; SSE-NEXT: movdqa %xmm6, %xmm4 |
| ; SSE-NEXT: pand %xmm0, %xmm4 |
| ; SSE-NEXT: packuswb %xmm3, %xmm4 |
| ; SSE-NEXT: movdqa %xmm2, %xmm3 |
| ; SSE-NEXT: pand %xmm0, %xmm3 |
| ; SSE-NEXT: pand %xmm1, %xmm0 |
| ; SSE-NEXT: packuswb %xmm3, %xmm0 |
| ; SSE-NEXT: packuswb %xmm4, %xmm0 |
| ; SSE-NEXT: pxor %xmm9, %xmm9 |
| ; SSE-NEXT: movdqa %xmm11, %xmm3 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm9[8],xmm3[9],xmm9[9],xmm3[10],xmm9[10],xmm3[11],xmm9[11],xmm3[12],xmm9[12],xmm3[13],xmm9[13],xmm3[14],xmm9[14],xmm3[15],xmm9[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm4[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm11, %xmm4 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm7[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm5[0],xmm8[1],xmm5[1] |
| ; SSE-NEXT: movdqa %xmm6, %xmm5 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm9[8],xmm5[9],xmm9[9],xmm5[10],xmm9[10],xmm5[11],xmm9[11],xmm5[12],xmm9[12],xmm5[13],xmm9[13],xmm5[14],xmm9[14],xmm5[15],xmm9[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm7[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm6, %xmm7 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm7[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm12[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm10[0],xmm14[1],xmm10[1] |
| ; SSE-NEXT: packuswb %xmm8, %xmm14 |
| ; SSE-NEXT: movdqa %xmm2, %xmm8 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm9[8],xmm8[9],xmm9[9],xmm8[10],xmm9[10],xmm8[11],xmm9[11],xmm8[12],xmm9[12],xmm8[13],xmm9[13],xmm8[14],xmm9[14],xmm8[15],xmm9[15] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm8[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm10[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm2, %xmm10 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm10[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm15 = xmm13[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm12[0],xmm15[1],xmm12[1] |
| ; SSE-NEXT: movdqa %xmm1, %xmm13 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm1, %xmm12 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm9[8],xmm12[9],xmm9[9],xmm12[10],xmm9[10],xmm12[11],xmm9[11],xmm12[12],xmm9[12],xmm12[13],xmm9[13],xmm12[14],xmm9[14],xmm12[15],xmm9[15] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1],xmm13[2],xmm9[2],xmm13[3],xmm9[3],xmm13[4],xmm9[4],xmm13[5],xmm9[5],xmm13[6],xmm9[6],xmm13[7],xmm9[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm12[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm9[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm13[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1] |
| ; SSE-NEXT: packuswb %xmm15, %xmm9 |
| ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,3],xmm14[0,3] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255] |
| ; SSE-NEXT: pand %xmm1, %xmm11 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm11[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,7,6,5,4] |
| ; SSE-NEXT: pand %xmm1, %xmm6 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,0,3,2,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm11, %xmm6 |
| ; SSE-NEXT: pand %xmm1, %xmm2 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm1, %xmm11 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,7,6,5,4] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm11[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm2, %xmm1 |
| ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm6[0,3] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm5[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm7[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] |
| ; SSE-NEXT: packuswb %xmm3, %xmm4 |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm8[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm10[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm12[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm13[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1] |
| ; SSE-NEXT: packuswb %xmm3, %xmm5 |
| ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm4[0,3] |
| ; SSE-NEXT: movdqa %xmm0, (%rsi) |
| ; SSE-NEXT: movaps %xmm9, (%rdx) |
| ; SSE-NEXT: movaps %xmm1, (%rcx) |
| ; SSE-NEXT: movaps %xmm5, (%r8) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i8_stride4_vf16: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm2 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm4, %xmm5 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm3, %xmm2 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm5 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm1, %xmm6 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm0, %xmm5 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm5 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm4, %xmm6 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm3, %xmm5 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm1, %xmm7 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm0, %xmm6 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm6 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm4, %xmm7 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm3, %xmm6 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm7 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm1, %xmm8 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm0, %xmm7 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm7 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm4, %xmm4 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm3, %xmm3 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm4 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm1, %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm0, %xmm0 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm5, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm6, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%r8) |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i8_stride4_vf16: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovdqa 48(%rdi), %xmm3 |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm4 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm4, %xmm3, %xmm5 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm4, %xmm2, %xmm4 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm5 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm5, %xmm1, %xmm6 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm5, %xmm0, %xmm5 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm5 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm5, %xmm3, %xmm6 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm5, %xmm2, %xmm5 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm6 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm6, %xmm1, %xmm7 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm6, %xmm0, %xmm6 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm6 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm6, %xmm3, %xmm7 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm6, %xmm2, %xmm6 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm7 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm7, %xmm1, %xmm8 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm7, %xmm0, %xmm7 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm7 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm7, %xmm3, %xmm3 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm7, %xmm2, %xmm2 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm3 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm1, %xmm1 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm0, %xmm0 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3] |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm4, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm5, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm6, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, (%r8) |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512-LABEL: load_i8_stride4_vf16: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512-NEXT: vpsrld $8, %zmm0, %zmm1 |
| ; AVX512-NEXT: vpsrld $16, %zmm0, %zmm2 |
| ; AVX512-NEXT: vpsrld $24, %zmm0, %zmm3 |
| ; AVX512-NEXT: vpmovdb %zmm0, (%rsi) |
| ; AVX512-NEXT: vpmovdb %zmm1, (%rdx) |
| ; AVX512-NEXT: vpmovdb %zmm2, (%rcx) |
| ; AVX512-NEXT: vpmovdb %zmm3, (%r8) |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %wide.vec = load <64 x i8>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <64 x i8> %wide.vec, <64 x i8> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60> |
| %strided.vec1 = shufflevector <64 x i8> %wide.vec, <64 x i8> poison, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61> |
| %strided.vec2 = shufflevector <64 x i8> %wide.vec, <64 x i8> poison, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62> |
| %strided.vec3 = shufflevector <64 x i8> %wide.vec, <64 x i8> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63> |
| store <16 x i8> %strided.vec0, ptr %out.vec0, align 64 |
| store <16 x i8> %strided.vec1, ptr %out.vec1, align 64 |
| store <16 x i8> %strided.vec2, ptr %out.vec2, align 64 |
| store <16 x i8> %strided.vec3, ptr %out.vec3, align 64 |
| ret void |
| } |
| |
| define void @load_i8_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind { |
| ; SSE-LABEL: load_i8_stride4_vf32: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: subq $120, %rsp |
| ; SSE-NEXT: movdqa 64(%rdi), %xmm4 |
| ; SSE-NEXT: movdqa 80(%rdi), %xmm13 |
| ; SSE-NEXT: movdqa 96(%rdi), %xmm15 |
| ; SSE-NEXT: movdqa 112(%rdi), %xmm9 |
| ; SSE-NEXT: movdqa (%rdi), %xmm10 |
| ; SSE-NEXT: movdqa 16(%rdi), %xmm14 |
| ; SSE-NEXT: movdqa 32(%rdi), %xmm8 |
| ; SSE-NEXT: movdqa 48(%rdi), %xmm3 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,0,255,0,255,0,255,0] |
| ; SSE-NEXT: movdqa %xmm3, %xmm0 |
| ; SSE-NEXT: pand %xmm6, %xmm0 |
| ; SSE-NEXT: movdqa %xmm8, %xmm1 |
| ; SSE-NEXT: pand %xmm6, %xmm1 |
| ; SSE-NEXT: packuswb %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa %xmm14, %xmm0 |
| ; SSE-NEXT: pand %xmm6, %xmm0 |
| ; SSE-NEXT: movdqa %xmm10, %xmm2 |
| ; SSE-NEXT: pand %xmm6, %xmm2 |
| ; SSE-NEXT: packuswb %xmm0, %xmm2 |
| ; SSE-NEXT: packuswb %xmm1, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm9, %xmm0 |
| ; SSE-NEXT: pand %xmm6, %xmm0 |
| ; SSE-NEXT: movdqa %xmm15, %xmm1 |
| ; SSE-NEXT: pand %xmm6, %xmm1 |
| ; SSE-NEXT: packuswb %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa %xmm13, %xmm0 |
| ; SSE-NEXT: pand %xmm6, %xmm0 |
| ; SSE-NEXT: pand %xmm4, %xmm6 |
| ; SSE-NEXT: movdqa %xmm4, %xmm2 |
| ; SSE-NEXT: packuswb %xmm0, %xmm6 |
| ; SSE-NEXT: packuswb %xmm1, %xmm6 |
| ; SSE-NEXT: pxor %xmm4, %xmm4 |
| ; SSE-NEXT: movdqa %xmm3, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm3, %xmm1 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1] |
| ; SSE-NEXT: movdqa %xmm8, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm8, %xmm1 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1] |
| ; SSE-NEXT: packuswb %xmm5, %xmm7 |
| ; SSE-NEXT: movdqa %xmm14, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm14, %xmm1 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] |
| ; SSE-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm5[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1] |
| ; SSE-NEXT: movdqa %xmm10, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm10, %xmm1 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1] |
| ; SSE-NEXT: packuswb %xmm11, %xmm5 |
| ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm7[0,3] |
| ; SSE-NEXT: movdqa %xmm9, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm9, %xmm1 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1] |
| ; SSE-NEXT: movdqa %xmm15, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm0[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm15, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm11[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1] |
| ; SSE-NEXT: packuswb %xmm7, %xmm0 |
| ; SSE-NEXT: movdqa %xmm13, %xmm1 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15] |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm13, %xmm12 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3],xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm12[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm11[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1] |
| ; SSE-NEXT: movdqa %xmm2, %xmm7 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm2, %xmm11 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm4[8],xmm11[9],xmm4[9],xmm11[10],xmm4[10],xmm11[11],xmm4[11],xmm11[12],xmm4[12],xmm11[13],xmm4[13],xmm11[14],xmm4[14],xmm11[15],xmm4[15] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3],xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm11[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm4[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm7[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] |
| ; SSE-NEXT: packuswb %xmm1, %xmm4 |
| ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm0[0,3] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255] |
| ; SSE-NEXT: pand %xmm0, %xmm3 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4] |
| ; SSE-NEXT: pand %xmm0, %xmm8 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm8[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm1, %xmm2 |
| ; SSE-NEXT: pand %xmm0, %xmm14 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm14[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,7,6,5,4] |
| ; SSE-NEXT: pand %xmm0, %xmm10 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm10[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm3, %xmm1 |
| ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm2[0,3] |
| ; SSE-NEXT: pand %xmm0, %xmm9 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm9[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4] |
| ; SSE-NEXT: pand %xmm0, %xmm15 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm15[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm3[1,0,3,2,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm2, %xmm8 |
| ; SSE-NEXT: pand %xmm0, %xmm13 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm0, %xmm2 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[1,0,3,2,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm0, %xmm3 |
| ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm8[0,3] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm0 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm2 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm0 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm8 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1] |
| ; SSE-NEXT: packuswb %xmm2, %xmm8 |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm0 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, (%rsp), %xmm2 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm2 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm0 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm0[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm0 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1] |
| ; SSE-NEXT: packuswb %xmm2, %xmm0 |
| ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm8[0,3] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm2 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm8 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm2 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm9 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm2[0],xmm9[1],xmm2[1] |
| ; SSE-NEXT: packuswb %xmm8, %xmm9 |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm2 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm12[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm11[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1] |
| ; SSE-NEXT: packuswb %xmm8, %xmm7 |
| ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,3],xmm9[0,3] |
| ; SSE-NEXT: movdqa %xmm6, 16(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, (%rsi) |
| ; SSE-NEXT: movaps %xmm4, 16(%rdx) |
| ; SSE-NEXT: movaps %xmm5, (%rdx) |
| ; SSE-NEXT: movaps %xmm3, 16(%rcx) |
| ; SSE-NEXT: movaps %xmm1, (%rcx) |
| ; SSE-NEXT: movaps %xmm7, 16(%r8) |
| ; SSE-NEXT: movaps %xmm0, (%r8) |
| ; SSE-NEXT: addq $120, %rsp |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i8_stride4_vf32: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm8 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX1-ONLY-NEXT: vmovdqa 112(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm8, %xmm0, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa 96(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm8, %xmm2, %xmm3 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm9 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX1-ONLY-NEXT: vmovdqa 80(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm3, %xmm4 |
| ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm5, %xmm6 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0,1,2,3],xmm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm10 |
| ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm8, %xmm7, %xmm11 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm8, %xmm6, %xmm8 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm8[0],xmm11[0],xmm8[1],xmm11[1] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm4, %xmm11 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm1, %xmm9 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1,2,3],xmm8[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm9 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm0, %xmm10 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm2, %xmm11 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm11 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm3, %xmm12 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm5, %xmm13 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm10 = xmm12[0,1,2,3],xmm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm7, %xmm12 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm6, %xmm9 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm12[0],xmm9[1],xmm12[1] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm4, %xmm12 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm1, %xmm11 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm10 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm0, %xmm11 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm2, %xmm12 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm12 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm3, %xmm13 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm5, %xmm14 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm11 = xmm13[0,1,2,3],xmm11[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm11 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm7, %xmm13 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm6, %xmm10 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm4, %xmm13 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm1, %xmm12 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm10 = xmm12[0,1,2,3],xmm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm11 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm0, %xmm0 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm2, %xmm2 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm2 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm3, %xmm3 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm5, %xmm5 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm7, %xmm3 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm6, %xmm5 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm4, %xmm4 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm1, %xmm1 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm9, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm10, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%r8) |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i8_stride4_vf32: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovdqa 16(%rdi), %xmm3 |
| ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vmovdqa 48(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm6 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm6, %xmm5, %xmm7 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm6, %xmm4, %xmm6 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm7 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm7, %xmm3, %xmm8 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm7, %xmm2, %xmm7 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm7 = xmm7[0,1],xmm6[2,3] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} ymm8 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX2-ONLY-NEXT: vpshufb %ymm8, %ymm1, %ymm9 |
| ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm6 = [0,4,0,4,0,4,0,4] |
| ; AVX2-ONLY-NEXT: vpermd %ymm9, %ymm6, %ymm9 |
| ; AVX2-ONLY-NEXT: vpshufb %ymm8, %ymm0, %ymm8 |
| ; AVX2-ONLY-NEXT: vpermd %ymm8, %ymm6, %ymm8 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm8 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm8, %xmm5, %xmm9 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm8, %xmm4, %xmm8 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm9 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm9, %xmm3, %xmm10 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm9, %xmm2, %xmm9 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm8 = xmm9[0,1],xmm8[2,3] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} ymm9 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX2-ONLY-NEXT: vpshufb %ymm9, %ymm1, %ymm10 |
| ; AVX2-ONLY-NEXT: vpermd %ymm10, %ymm6, %ymm10 |
| ; AVX2-ONLY-NEXT: vpshufb %ymm9, %ymm0, %ymm9 |
| ; AVX2-ONLY-NEXT: vpermd %ymm9, %ymm6, %ymm9 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],ymm10[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm9 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm9, %xmm5, %xmm10 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm9, %xmm4, %xmm9 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm10 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm10, %xmm3, %xmm11 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm10, %xmm2, %xmm10 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm9 = xmm10[0,1],xmm9[2,3] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} ymm10 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX2-ONLY-NEXT: vpshufb %ymm10, %ymm1, %ymm11 |
| ; AVX2-ONLY-NEXT: vpermd %ymm11, %ymm6, %ymm11 |
| ; AVX2-ONLY-NEXT: vpshufb %ymm10, %ymm0, %ymm10 |
| ; AVX2-ONLY-NEXT: vpermd %ymm10, %ymm6, %ymm10 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm11[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm10 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm10, %xmm5, %xmm5 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm10, %xmm4, %xmm4 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm5 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm5, %xmm3, %xmm3 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm5, %xmm2, %xmm2 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} ymm3 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX2-ONLY-NEXT: vpshufb %ymm3, %ymm1, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vpshufb %ymm3, %ymm0, %ymm0 |
| ; AVX2-ONLY-NEXT: vpermd %ymm0, %ymm6, %ymm0 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm7, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm8, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm9, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm0, (%r8) |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512F-LABEL: load_i8_stride4_vf32: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm0 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX512F-NEXT: vmovdqa 96(%rdi), %ymm1 |
| ; AVX512F-NEXT: vpshufb %ymm0, %ymm1, %ymm2 |
| ; AVX512F-NEXT: vmovdqa 64(%rdi), %ymm3 |
| ; AVX512F-NEXT: vpshufb %ymm0, %ymm3, %ymm0 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [0,4,0,4,0,4,8,12] |
| ; AVX512F-NEXT: vpermt2d %ymm2, %ymm4, %ymm0 |
| ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm2 |
| ; AVX512F-NEXT: vpmovdb %zmm2, %xmm5 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm5 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX512F-NEXT: vpshufb %ymm5, %ymm1, %ymm6 |
| ; AVX512F-NEXT: vpshufb %ymm5, %ymm3, %ymm5 |
| ; AVX512F-NEXT: vpermt2d %ymm6, %ymm4, %ymm5 |
| ; AVX512F-NEXT: vpsrld $8, %zmm2, %zmm6 |
| ; AVX512F-NEXT: vpmovdb %zmm6, %xmm6 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7] |
| ; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm6 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX512F-NEXT: vpshufb %ymm6, %ymm1, %ymm7 |
| ; AVX512F-NEXT: vpshufb %ymm6, %ymm3, %ymm6 |
| ; AVX512F-NEXT: vpermt2d %ymm7, %ymm4, %ymm6 |
| ; AVX512F-NEXT: vpsrld $16, %zmm2, %zmm7 |
| ; AVX512F-NEXT: vpmovdb %zmm7, %xmm7 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm7 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX512F-NEXT: vpshufb %ymm7, %ymm1, %ymm1 |
| ; AVX512F-NEXT: vpshufb %ymm7, %ymm3, %ymm3 |
| ; AVX512F-NEXT: vpermt2d %ymm1, %ymm4, %ymm3 |
| ; AVX512F-NEXT: vpsrld $24, %zmm2, %zmm1 |
| ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-NEXT: vmovdqa %ymm0, (%rsi) |
| ; AVX512F-NEXT: vmovdqa %ymm5, (%rdx) |
| ; AVX512F-NEXT: vmovdqa %ymm6, (%rcx) |
| ; AVX512F-NEXT: vmovdqa %ymm1, (%r8) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: load_i8_stride4_vf32: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm0 = [0,4,8,12,1,5,9,13] |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1 |
| ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm2 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm3 = zero,zero,zero,zero,zmm2[0,4,8,12,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[16,20,24,28,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[32,36,40,44,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[48,52,56,60,u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm4 = zmm1[0,4,8,12],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,16,20,24,28],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,32,36,40,44],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,48,52,56,60],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vporq %zmm3, %zmm4, %zmm3 |
| ; AVX512BW-NEXT: vpermd %zmm3, %zmm0, %zmm3 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm4 = zero,zero,zero,zero,zmm2[1,5,9,13,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[17,21,25,29,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[33,37,41,45,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[49,53,57,61,u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm5 = zmm1[1,5,9,13],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,17,21,25,29],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,33,37,41,45],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,49,53,57,61],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vporq %zmm4, %zmm5, %zmm4 |
| ; AVX512BW-NEXT: vpermd %zmm4, %zmm0, %zmm4 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm5 = zero,zero,zero,zero,zmm2[2,6,10,14,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[18,22,26,30,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[34,38,42,46,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[50,54,58,62,u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm6 = zmm1[2,6,10,14],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,18,22,26,30],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,34,38,42,46],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,50,54,58,62],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vporq %zmm5, %zmm6, %zmm5 |
| ; AVX512BW-NEXT: vpermd %zmm5, %zmm0, %zmm5 |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm2 = zero,zero,zero,zero,zmm2[3,7,11,15,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[19,23,27,31,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[35,39,43,47,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[51,55,59,63,u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[3,7,11,15],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,19,23,27,31],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,35,39,43,47],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,51,55,59,63],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u] |
| ; AVX512BW-NEXT: vporq %zmm2, %zmm1, %zmm1 |
| ; AVX512BW-NEXT: vpermd %zmm1, %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa %ymm3, (%rsi) |
| ; AVX512BW-NEXT: vmovdqa %ymm4, (%rdx) |
| ; AVX512BW-NEXT: vmovdqa %ymm5, (%rcx) |
| ; AVX512BW-NEXT: vmovdqa %ymm0, (%r8) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %wide.vec = load <128 x i8>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <32 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60, i32 64, i32 68, i32 72, i32 76, i32 80, i32 84, i32 88, i32 92, i32 96, i32 100, i32 104, i32 108, i32 112, i32 116, i32 120, i32 124> |
| %strided.vec1 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <32 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61, i32 65, i32 69, i32 73, i32 77, i32 81, i32 85, i32 89, i32 93, i32 97, i32 101, i32 105, i32 109, i32 113, i32 117, i32 121, i32 125> |
| %strided.vec2 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <32 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62, i32 66, i32 70, i32 74, i32 78, i32 82, i32 86, i32 90, i32 94, i32 98, i32 102, i32 106, i32 110, i32 114, i32 118, i32 122, i32 126> |
| %strided.vec3 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <32 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63, i32 67, i32 71, i32 75, i32 79, i32 83, i32 87, i32 91, i32 95, i32 99, i32 103, i32 107, i32 111, i32 115, i32 119, i32 123, i32 127> |
| store <32 x i8> %strided.vec0, ptr %out.vec0, align 64 |
| store <32 x i8> %strided.vec1, ptr %out.vec1, align 64 |
| store <32 x i8> %strided.vec2, ptr %out.vec2, align 64 |
| store <32 x i8> %strided.vec3, ptr %out.vec3, align 64 |
| ret void |
| } |
| |
| define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind { |
| ; SSE-LABEL: load_i8_stride4_vf64: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: subq $632, %rsp # imm = 0x278 |
| ; SSE-NEXT: movdqa 16(%rdi), %xmm15 |
| ; SSE-NEXT: movdqa 32(%rdi), %xmm13 |
| ; SSE-NEXT: movdqa 48(%rdi), %xmm7 |
| ; SSE-NEXT: movdqa 128(%rdi), %xmm14 |
| ; SSE-NEXT: movdqa 144(%rdi), %xmm10 |
| ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa 160(%rdi), %xmm4 |
| ; SSE-NEXT: movdqa 176(%rdi), %xmm3 |
| ; SSE-NEXT: movdqa 64(%rdi), %xmm8 |
| ; SSE-NEXT: movdqa 80(%rdi), %xmm11 |
| ; SSE-NEXT: movdqa 96(%rdi), %xmm2 |
| ; SSE-NEXT: movdqa 112(%rdi), %xmm1 |
| ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,0,255,0,255,0,255,0] |
| ; SSE-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE-NEXT: movdqa %xmm1, %xmm5 |
| ; SSE-NEXT: pand %xmm6, %xmm0 |
| ; SSE-NEXT: movdqa %xmm2, %xmm1 |
| ; SSE-NEXT: movdqa %xmm2, %xmm9 |
| ; SSE-NEXT: pand %xmm6, %xmm1 |
| ; SSE-NEXT: packuswb %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa %xmm11, %xmm0 |
| ; SSE-NEXT: pand %xmm6, %xmm0 |
| ; SSE-NEXT: movdqa %xmm8, %xmm2 |
| ; SSE-NEXT: pand %xmm6, %xmm2 |
| ; SSE-NEXT: packuswb %xmm0, %xmm2 |
| ; SSE-NEXT: packuswb %xmm1, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm3, %xmm0 |
| ; SSE-NEXT: pand %xmm6, %xmm0 |
| ; SSE-NEXT: movdqa %xmm4, %xmm1 |
| ; SSE-NEXT: pand %xmm6, %xmm1 |
| ; SSE-NEXT: packuswb %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa %xmm10, %xmm0 |
| ; SSE-NEXT: pand %xmm6, %xmm0 |
| ; SSE-NEXT: movdqa %xmm14, %xmm2 |
| ; SSE-NEXT: pand %xmm6, %xmm2 |
| ; SSE-NEXT: packuswb %xmm0, %xmm2 |
| ; SSE-NEXT: packuswb %xmm1, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm7, %xmm0 |
| ; SSE-NEXT: pand %xmm6, %xmm0 |
| ; SSE-NEXT: movdqa %xmm13, %xmm1 |
| ; SSE-NEXT: pand %xmm6, %xmm1 |
| ; SSE-NEXT: packuswb %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa %xmm15, %xmm0 |
| ; SSE-NEXT: pand %xmm6, %xmm0 |
| ; SSE-NEXT: movdqa (%rdi), %xmm10 |
| ; SSE-NEXT: movdqa %xmm10, %xmm2 |
| ; SSE-NEXT: pand %xmm6, %xmm2 |
| ; SSE-NEXT: packuswb %xmm0, %xmm2 |
| ; SSE-NEXT: packuswb %xmm1, %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa 240(%rdi), %xmm12 |
| ; SSE-NEXT: movdqa %xmm12, %xmm0 |
| ; SSE-NEXT: pand %xmm6, %xmm0 |
| ; SSE-NEXT: movdqa 224(%rdi), %xmm1 |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm6, %xmm1 |
| ; SSE-NEXT: packuswb %xmm0, %xmm1 |
| ; SSE-NEXT: movdqa 208(%rdi), %xmm0 |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm6, %xmm0 |
| ; SSE-NEXT: movdqa 192(%rdi), %xmm2 |
| ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pand %xmm2, %xmm6 |
| ; SSE-NEXT: packuswb %xmm0, %xmm6 |
| ; SSE-NEXT: packuswb %xmm1, %xmm6 |
| ; SSE-NEXT: pxor %xmm2, %xmm2 |
| ; SSE-NEXT: movdqa %xmm5, %xmm1 |
| ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm5, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1] |
| ; SSE-NEXT: movdqa %xmm9, %xmm1 |
| ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm9, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1] |
| ; SSE-NEXT: packuswb %xmm5, %xmm9 |
| ; SSE-NEXT: movdqa %xmm11, %xmm1 |
| ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm11, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1] |
| ; SSE-NEXT: movdqa %xmm8, %xmm1 |
| ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm8, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm11[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| ; SSE-NEXT: packuswb %xmm5, %xmm1 |
| ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm9[0,3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm3, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1] |
| ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm4, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7] |
| ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm4[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1] |
| ; SSE-NEXT: packuswb %xmm5, %xmm9 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1] |
| ; SSE-NEXT: movdqa %xmm14, %xmm1 |
| ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm14, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm11[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| ; SSE-NEXT: packuswb %xmm5, %xmm1 |
| ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm9[0,3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm7, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm7, %xmm1 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1] |
| ; SSE-NEXT: movdqa %xmm13, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm0[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm13, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm9[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1] |
| ; SSE-NEXT: packuswb %xmm5, %xmm0 |
| ; SSE-NEXT: movdqa %xmm15, %xmm1 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm15, %xmm1 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm9[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] |
| ; SSE-NEXT: movdqa %xmm10, %xmm3 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm3, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm5[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm10, %xmm4 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7] |
| ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm5[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm3[0],xmm14[1],xmm3[1] |
| ; SSE-NEXT: packuswb %xmm1, %xmm14 |
| ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,3],xmm0[0,3] |
| ; SSE-NEXT: movdqa %xmm12, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm12, %xmm1 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm9, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm9, %xmm3 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] |
| ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] |
| ; SSE-NEXT: packuswb %xmm1, %xmm3 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm11, %xmm0 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: movdqa %xmm11, %xmm0 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] |
| ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,1,1,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; SSE-NEXT: movdqa %xmm8, %xmm1 |
| ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15] |
| ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movdqa %xmm8, %xmm5 |
| ; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3],xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,3,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; SSE-NEXT: packuswb %xmm0, %xmm1 |
| ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3] |
| ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255] |
| ; SSE-NEXT: pand %xmm0, %xmm7 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm7[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4] |
| ; SSE-NEXT: pand %xmm0, %xmm13 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm13[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,0,3,2,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm2, %xmm3 |
| ; SSE-NEXT: pand %xmm0, %xmm15 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm15[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4] |
| ; SSE-NEXT: pand %xmm0, %xmm10 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm10[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm4[1,0,3,2,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm2, %xmm10 |
| ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,3],xmm3[0,3] |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm0, %xmm2 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4] |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm0, %xmm3 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,0,3,2,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm2, %xmm3 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm0, %xmm2 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,7,6,5,4] |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm0, %xmm2 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm4, %xmm2 |
| ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm3[0,3] |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm0, %xmm3 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4] |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm0, %xmm4 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,0,3,2,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm3, %xmm4 |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm0, %xmm3 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm3[0,1,2,3,7,6,5,4] |
| ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: pand %xmm0, %xmm3 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,0,3,2,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm7, %xmm3 |
| ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm4[0,3] |
| ; SSE-NEXT: pand %xmm0, %xmm12 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm12[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4] |
| ; SSE-NEXT: pand %xmm0, %xmm9 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm9[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,0,3,2,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm4, %xmm7 |
| ; SSE-NEXT: pand %xmm0, %xmm11 |
| ; SSE-NEXT: pand %xmm0, %xmm8 |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm11[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm8[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,0,3,2,4,5,6,7] |
| ; SSE-NEXT: packuswb %xmm0, %xmm4 |
| ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm7[0,3] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm0 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm7 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm0 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm8 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1] |
| ; SSE-NEXT: packuswb %xmm7, %xmm8 |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm0 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm7 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1] |
| ; SSE-NEXT: pshufd $231, (%rsp), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm0 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm0[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm0 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1] |
| ; SSE-NEXT: packuswb %xmm7, %xmm0 |
| ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm8[0,3] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm7 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm8 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm7 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm9 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm7[0],xmm9[1],xmm7[1] |
| ; SSE-NEXT: packuswb %xmm8, %xmm9 |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm7 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm8 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm7 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm7[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm7 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1] |
| ; SSE-NEXT: packuswb %xmm8, %xmm7 |
| ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,3],xmm9[0,3] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm8 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm9 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm8 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm11 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm11[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm8[0],xmm11[1],xmm8[1] |
| ; SSE-NEXT: packuswb %xmm9, %xmm11 |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm8 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm9 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm8 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm8[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm8 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm12[0],xmm8[1],xmm12[1] |
| ; SSE-NEXT: packuswb %xmm9, %xmm8 |
| ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,3],xmm11[0,3] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm9 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm11 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm11[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm9[0],xmm11[1],xmm9[1] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm9 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm12 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm12[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm9[0],xmm12[1],xmm9[1] |
| ; SSE-NEXT: packuswb %xmm11, %xmm12 |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm9 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm11 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm11[0,1,3,1,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm9[0],xmm11[1],xmm9[1] |
| ; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm9 = mem[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm5[3,1,2,3] |
| ; SSE-NEXT: pshuflw {{.*#+}} xmm13 = xmm13[3,1,2,3,4,5,6,7] |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1] |
| ; SSE-NEXT: packuswb %xmm11, %xmm13 |
| ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,3],xmm12[0,3] |
| ; SSE-NEXT: movdqa %xmm6, 48(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, (%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 32(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 16(%rsi) |
| ; SSE-NEXT: movaps %xmm1, 48(%rdx) |
| ; SSE-NEXT: movaps %xmm14, (%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 32(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 16(%rdx) |
| ; SSE-NEXT: movaps %xmm4, 48(%rcx) |
| ; SSE-NEXT: movaps %xmm3, 32(%rcx) |
| ; SSE-NEXT: movaps %xmm2, 16(%rcx) |
| ; SSE-NEXT: movaps %xmm10, (%rcx) |
| ; SSE-NEXT: movaps %xmm13, 48(%r8) |
| ; SSE-NEXT: movaps %xmm8, 32(%r8) |
| ; SSE-NEXT: movaps %xmm7, 16(%r8) |
| ; SSE-NEXT: movaps %xmm0, (%r8) |
| ; SSE-NEXT: addq $632, %rsp # imm = 0x278 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i8_stride4_vf64: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: subq $296, %rsp # imm = 0x128 |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm1 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX1-ONLY-NEXT: vmovdqa 112(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm2, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, %xmm6 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa 96(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm3, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, %xmm8 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm3 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX1-ONLY-NEXT: vmovdqa 80(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm0, %xmm5 |
| ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm0, %xmm7 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm9 |
| ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm5, %xmm10 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm4, %xmm11 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm2, %xmm11 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm0, %xmm12 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3],xmm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa 240(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm0, %xmm11 |
| ; AVX1-ONLY-NEXT: vmovdqa 224(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm0, %xmm12 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm12[0],xmm11[0],xmm12[1],xmm11[1] |
| ; AVX1-ONLY-NEXT: vmovdqa 208(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm0, %xmm14 |
| ; AVX1-ONLY-NEXT: vmovdqa 192(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm0, %xmm15 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm14[0,1,2,3],xmm13[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovdqa 176(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm0, %xmm15 |
| ; AVX1-ONLY-NEXT: vmovdqa 160(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm0, %xmm1 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1] |
| ; AVX1-ONLY-NEXT: vmovdqa 144(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm0, %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa 128(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm4, %xmm3 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm1 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm0 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm6, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm8, %xmm4 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm8, %xmm2 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm7, %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm8, %xmm5 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm12, %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm11, %xmm5 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm10, %xmm5 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm9, %xmm6 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm13, %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm14, %xmm3 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm15, %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm5, %xmm5 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovdqa (%rsp), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm3, %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm5, %xmm0 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm3, %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm5, %xmm2 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm0 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm1, %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm4, %xmm2 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm2 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm7, %xmm3 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm8, %xmm5 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm12, %xmm3 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm11, %xmm5 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm10, %xmm5 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm9, %xmm6 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm13, %xmm1 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm14, %xmm3 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm15, %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm4, %xmm5 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovdqa (%rsp), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm8, %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm9, %xmm0 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm10, %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm11, %xmm2 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm1 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm2, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm3, %xmm3 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vbroadcastss {{.*#+}} xmm3 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm7, %xmm5 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm6, %xmm6 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm12, %xmm5 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm6, %xmm6 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1] |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm6, %xmm6 |
| ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm7, %xmm7 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm13, %xmm5 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm14, %xmm6 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm15, %xmm6 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm4, %xmm7 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm8, %xmm6 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm9, %xmm1 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1] |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm10, %xmm6 |
| ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm11, %xmm3 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm3 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 32(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 32(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%r8) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%r8) |
| ; AVX1-ONLY-NEXT: addq $296, %rsp # imm = 0x128 |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i8_stride4_vf64: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: subq $216, %rsp |
| ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 16(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm6 |
| ; AVX2-ONLY-NEXT: vmovdqa 48(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm3 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm7, %xmm2 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm6, %xmm8 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm8[0],xmm2[0],xmm8[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm10 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm10, %xmm5, %xmm8 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm10, %xmm4, %xmm9 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm8 = xmm8[0,1],xmm2[2,3] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} ymm13 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX2-ONLY-NEXT: vpshufb %ymm13, %ymm1, %ymm9 |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm1, %ymm4 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,4,0,4,0,4,0,4] |
| ; AVX2-ONLY-NEXT: vpermd %ymm9, %ymm2, %ymm9 |
| ; AVX2-ONLY-NEXT: vpshufb %ymm13, %ymm0, %ymm11 |
| ; AVX2-ONLY-NEXT: vpermd %ymm11, %ymm2, %ymm11 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3,4,5],ymm9[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 176(%rdi), %xmm8 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm8, %xmm11 |
| ; AVX2-ONLY-NEXT: vmovdqa 160(%rdi), %xmm9 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm9, %xmm3 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm11[0],xmm3[1],xmm11[1] |
| ; AVX2-ONLY-NEXT: vmovdqa 144(%rdi), %xmm11 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm10, %xmm11, %xmm14 |
| ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %xmm12 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm10, %xmm12, %xmm10 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm14 = xmm10[0],xmm14[0],xmm10[1],xmm14[1] |
| ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm3 = xmm14[0,1],xmm3[2,3] |
| ; AVX2-ONLY-NEXT: vpshufb %ymm13, %ymm0, %ymm14 |
| ; AVX2-ONLY-NEXT: vpermd %ymm14, %ymm2, %ymm15 |
| ; AVX2-ONLY-NEXT: vmovdqa 192(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpshufb %ymm13, %ymm0, %ymm13 |
| ; AVX2-ONLY-NEXT: vpermd %ymm13, %ymm2, %ymm13 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm15[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm13[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm7, %xmm13 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm7, %xmm10 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm6, %xmm15 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm6, %xmm7 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm15[0],xmm13[0],xmm15[1],xmm13[1] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm15 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm15, %xmm5, %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm5, %xmm14 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vpshufb %xmm15, %xmm5, %xmm1 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm13[2,3] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX2-ONLY-NEXT: vpshufb %ymm1, %ymm4, %ymm13 |
| ; AVX2-ONLY-NEXT: vpermd %ymm13, %ymm2, %ymm13 |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpshufb %ymm1, %ymm6, %ymm4 |
| ; AVX2-ONLY-NEXT: vpermd %ymm4, %ymm2, %ymm4 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm13[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm8, %xmm0 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm9, %xmm3 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm15, %xmm11, %xmm3 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm15, %xmm12, %xmm4 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm0 = xmm3[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpshufb %ymm1, %ymm12, %ymm3 |
| ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm2, %ymm3 |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpshufb %ymm1, %ymm4, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm2, %ymm1 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm0 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm0, %xmm10, %xmm1 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm0, %xmm7, %xmm3 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm3 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm14, %xmm4 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm5, %xmm13 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm13[0],xmm4[0],xmm13[1],xmm4[1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} ymm4 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpshufb %ymm4, %ymm7, %ymm13 |
| ; AVX2-ONLY-NEXT: vpermd %ymm13, %ymm2, %ymm13 |
| ; AVX2-ONLY-NEXT: vpshufb %ymm4, %ymm6, %ymm15 |
| ; AVX2-ONLY-NEXT: vpermd %ymm15, %ymm2, %ymm15 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3,4,5],ymm13[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm1[0,1,2,3],ymm13[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm0, %xmm8, %xmm1 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm0, %xmm9, %xmm0 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm11, %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vpshufb %xmm3, %xmm14, %xmm3 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm12, %ymm10 |
| ; AVX2-ONLY-NEXT: vpshufb %ymm4, %ymm12, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermd %ymm1, %ymm2, %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpshufb %ymm4, %ymm12, %ymm3 |
| ; AVX2-ONLY-NEXT: vpermd %ymm3, %ymm2, %ymm3 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm0 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX2-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vpshufb %xmm0, %xmm1, %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vpshufb %xmm0, %xmm4, %xmm4 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} xmm4 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX2-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vpshufb %xmm4, %xmm5, %xmm5 |
| ; AVX2-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vpshufb %xmm4, %xmm6, %xmm6 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm5[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vpbroadcastd {{.*#+}} ymm5 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX2-ONLY-NEXT: vpshufb %ymm5, %ymm7, %ymm6 |
| ; AVX2-ONLY-NEXT: vpermd %ymm6, %ymm2, %ymm6 |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpshufb %ymm5, %ymm7, %ymm7 |
| ; AVX2-ONLY-NEXT: vpermd %ymm7, %ymm2, %ymm7 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5],ymm6[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm0, %xmm8, %xmm6 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm0, %xmm9, %xmm0 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1] |
| ; AVX2-ONLY-NEXT: vpshufb %xmm4, %xmm11, %xmm6 |
| ; AVX2-ONLY-NEXT: vpshufb %xmm4, %xmm14, %xmm4 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm0 = xmm4[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vpshufb %ymm5, %ymm10, %ymm4 |
| ; AVX2-ONLY-NEXT: vpshufb %ymm5, %ymm12, %ymm5 |
| ; AVX2-ONLY-NEXT: vpermd %ymm4, %ymm2, %ymm4 |
| ; AVX2-ONLY-NEXT: vpermd %ymm5, %ymm2, %ymm2 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm4[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, 32(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, 32(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm3, 32(%rcx) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm13, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm0, 32(%r8) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm1, (%r8) |
| ; AVX2-ONLY-NEXT: addq $216, %rsp |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512F-LABEL: load_i8_stride4_vf64: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm2 |
| ; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm7 = [0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12] |
| ; AVX512F-NEXT: vmovdqa 224(%rdi), %ymm3 |
| ; AVX512F-NEXT: vpshufb %ymm7, %ymm3, %ymm5 |
| ; AVX512F-NEXT: vmovdqa 192(%rdi), %ymm4 |
| ; AVX512F-NEXT: vpshufb %ymm7, %ymm4, %ymm6 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm1 = [0,4,0,4,0,4,8,12] |
| ; AVX512F-NEXT: vpermt2d %ymm5, %ymm1, %ymm6 |
| ; AVX512F-NEXT: vpmovdb %zmm2, %xmm5 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm8 |
| ; AVX512F-NEXT: vmovdqa 96(%rdi), %ymm5 |
| ; AVX512F-NEXT: vpshufb %ymm7, %ymm5, %ymm9 |
| ; AVX512F-NEXT: vmovdqa 64(%rdi), %ymm6 |
| ; AVX512F-NEXT: vpshufb %ymm7, %ymm6, %ymm7 |
| ; AVX512F-NEXT: vpermt2d %ymm9, %ymm1, %ymm7 |
| ; AVX512F-NEXT: vpmovdb %zmm0, %xmm9 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm8[4,5,6,7] |
| ; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm8 = [1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13] |
| ; AVX512F-NEXT: vpshufb %ymm8, %ymm3, %ymm9 |
| ; AVX512F-NEXT: vpshufb %ymm8, %ymm4, %ymm10 |
| ; AVX512F-NEXT: vpermt2d %ymm9, %ymm1, %ymm10 |
| ; AVX512F-NEXT: vpsrld $8, %zmm2, %zmm9 |
| ; AVX512F-NEXT: vpmovdb %zmm9, %xmm9 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm9 |
| ; AVX512F-NEXT: vpshufb %ymm8, %ymm5, %ymm10 |
| ; AVX512F-NEXT: vpshufb %ymm8, %ymm6, %ymm8 |
| ; AVX512F-NEXT: vpermt2d %ymm10, %ymm1, %ymm8 |
| ; AVX512F-NEXT: vpsrld $8, %zmm0, %zmm10 |
| ; AVX512F-NEXT: vpmovdb %zmm10, %xmm10 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],zmm9[4,5,6,7] |
| ; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm9 = [2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14] |
| ; AVX512F-NEXT: vpshufb %ymm9, %ymm3, %ymm10 |
| ; AVX512F-NEXT: vpshufb %ymm9, %ymm4, %ymm11 |
| ; AVX512F-NEXT: vpermt2d %ymm10, %ymm1, %ymm11 |
| ; AVX512F-NEXT: vpsrld $16, %zmm2, %zmm10 |
| ; AVX512F-NEXT: vpmovdb %zmm10, %xmm10 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm10 |
| ; AVX512F-NEXT: vpshufb %ymm9, %ymm5, %ymm11 |
| ; AVX512F-NEXT: vpshufb %ymm9, %ymm6, %ymm9 |
| ; AVX512F-NEXT: vpermt2d %ymm11, %ymm1, %ymm9 |
| ; AVX512F-NEXT: vpsrld $16, %zmm0, %zmm11 |
| ; AVX512F-NEXT: vpmovdb %zmm11, %xmm11 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,2,3],zmm10[4,5,6,7] |
| ; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm10 = [3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15] |
| ; AVX512F-NEXT: vpshufb %ymm10, %ymm3, %ymm3 |
| ; AVX512F-NEXT: vpshufb %ymm10, %ymm4, %ymm4 |
| ; AVX512F-NEXT: vpermt2d %ymm3, %ymm1, %ymm4 |
| ; AVX512F-NEXT: vpsrld $24, %zmm2, %zmm2 |
| ; AVX512F-NEXT: vpmovdb %zmm2, %xmm2 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpshufb %ymm10, %ymm5, %ymm3 |
| ; AVX512F-NEXT: vpshufb %ymm10, %ymm6, %ymm4 |
| ; AVX512F-NEXT: vpermt2d %ymm3, %ymm1, %ymm4 |
| ; AVX512F-NEXT: vpsrld $24, %zmm0, %zmm0 |
| ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7] |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, (%rsi) |
| ; AVX512F-NEXT: vmovdqa64 %zmm8, (%rdx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, (%rcx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, (%r8) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: load_i8_stride4_vf64: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm2 |
| ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm3 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = <128,128,128,128,0,4,8,12,u,u,u,u,u,u,u,u,128,128,128,128,16,20,24,28,u,u,u,u,u,u,u,u,128,128,128,128,32,36,40,44,u,u,u,u,u,u,u,u,128,128,128,128,48,52,56,60,u,u,u,u,u,u,u,u> |
| ; AVX512BW-NEXT: vpshufb %zmm4, %zmm3, %zmm5 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = <0,4,8,12,128,128,128,128,u,u,u,u,u,u,u,u,16,20,24,28,128,128,128,128,u,u,u,u,u,u,u,u,32,36,40,44,128,128,128,128,u,u,u,u,u,u,u,u,48,52,56,60,128,128,128,128,u,u,u,u,u,u,u,u> |
| ; AVX512BW-NEXT: vpshufb %zmm6, %zmm2, %zmm7 |
| ; AVX512BW-NEXT: vporq %zmm5, %zmm7, %zmm5 |
| ; AVX512BW-NEXT: vpshufb %zmm4, %zmm1, %zmm4 |
| ; AVX512BW-NEXT: vpshufb %zmm6, %zmm0, %zmm6 |
| ; AVX512BW-NEXT: vporq %zmm4, %zmm6, %zmm4 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,4,8,12,1,5,9,13,16,20,24,28,17,21,25,29] |
| ; AVX512BW-NEXT: vpermt2d %zmm5, %zmm6, %zmm4 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm5 = <128,128,128,128,1,5,9,13,u,u,u,u,u,u,u,u,128,128,128,128,17,21,25,29,u,u,u,u,u,u,u,u,128,128,128,128,33,37,41,45,u,u,u,u,u,u,u,u,128,128,128,128,49,53,57,61,u,u,u,u,u,u,u,u> |
| ; AVX512BW-NEXT: vpshufb %zmm5, %zmm3, %zmm7 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = <1,5,9,13,128,128,128,128,u,u,u,u,u,u,u,u,17,21,25,29,128,128,128,128,u,u,u,u,u,u,u,u,33,37,41,45,128,128,128,128,u,u,u,u,u,u,u,u,49,53,57,61,128,128,128,128,u,u,u,u,u,u,u,u> |
| ; AVX512BW-NEXT: vpshufb %zmm8, %zmm2, %zmm9 |
| ; AVX512BW-NEXT: vporq %zmm7, %zmm9, %zmm7 |
| ; AVX512BW-NEXT: vpshufb %zmm5, %zmm1, %zmm5 |
| ; AVX512BW-NEXT: vpshufb %zmm8, %zmm0, %zmm8 |
| ; AVX512BW-NEXT: vporq %zmm5, %zmm8, %zmm5 |
| ; AVX512BW-NEXT: vpermt2d %zmm7, %zmm6, %zmm5 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = <128,128,128,128,2,6,10,14,u,u,u,u,u,u,u,u,128,128,128,128,18,22,26,30,u,u,u,u,u,u,u,u,128,128,128,128,34,38,42,46,u,u,u,u,u,u,u,u,128,128,128,128,50,54,58,62,u,u,u,u,u,u,u,u> |
| ; AVX512BW-NEXT: vpshufb %zmm7, %zmm3, %zmm8 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm9 = <2,6,10,14,128,128,128,128,u,u,u,u,u,u,u,u,18,22,26,30,128,128,128,128,u,u,u,u,u,u,u,u,34,38,42,46,128,128,128,128,u,u,u,u,u,u,u,u,50,54,58,62,128,128,128,128,u,u,u,u,u,u,u,u> |
| ; AVX512BW-NEXT: vpshufb %zmm9, %zmm2, %zmm10 |
| ; AVX512BW-NEXT: vporq %zmm8, %zmm10, %zmm8 |
| ; AVX512BW-NEXT: vpshufb %zmm7, %zmm1, %zmm7 |
| ; AVX512BW-NEXT: vpshufb %zmm9, %zmm0, %zmm9 |
| ; AVX512BW-NEXT: vporq %zmm7, %zmm9, %zmm7 |
| ; AVX512BW-NEXT: vpermt2d %zmm8, %zmm6, %zmm7 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = <128,128,128,128,3,7,11,15,u,u,u,u,u,u,u,u,128,128,128,128,19,23,27,31,u,u,u,u,u,u,u,u,128,128,128,128,35,39,43,47,u,u,u,u,u,u,u,u,128,128,128,128,51,55,59,63,u,u,u,u,u,u,u,u> |
| ; AVX512BW-NEXT: vpshufb %zmm8, %zmm3, %zmm3 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm9 = <3,7,11,15,128,128,128,128,u,u,u,u,u,u,u,u,19,23,27,31,128,128,128,128,u,u,u,u,u,u,u,u,35,39,43,47,128,128,128,128,u,u,u,u,u,u,u,u,51,55,59,63,128,128,128,128,u,u,u,u,u,u,u,u> |
| ; AVX512BW-NEXT: vpshufb %zmm9, %zmm2, %zmm2 |
| ; AVX512BW-NEXT: vporq %zmm3, %zmm2, %zmm2 |
| ; AVX512BW-NEXT: vpshufb %zmm8, %zmm1, %zmm1 |
| ; AVX512BW-NEXT: vpshufb %zmm9, %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vpermt2d %zmm2, %zmm6, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, (%rsi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, (%rdx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rcx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%r8) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %wide.vec = load <256 x i8>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <256 x i8> %wide.vec, <256 x i8> poison, <64 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60, i32 64, i32 68, i32 72, i32 76, i32 80, i32 84, i32 88, i32 92, i32 96, i32 100, i32 104, i32 108, i32 112, i32 116, i32 120, i32 124, i32 128, i32 132, i32 136, i32 140, i32 144, i32 148, i32 152, i32 156, i32 160, i32 164, i32 168, i32 172, i32 176, i32 180, i32 184, i32 188, i32 192, i32 196, i32 200, i32 204, i32 208, i32 212, i32 216, i32 220, i32 224, i32 228, i32 232, i32 236, i32 240, i32 244, i32 248, i32 252> |
| %strided.vec1 = shufflevector <256 x i8> %wide.vec, <256 x i8> poison, <64 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61, i32 65, i32 69, i32 73, i32 77, i32 81, i32 85, i32 89, i32 93, i32 97, i32 101, i32 105, i32 109, i32 113, i32 117, i32 121, i32 125, i32 129, i32 133, i32 137, i32 141, i32 145, i32 149, i32 153, i32 157, i32 161, i32 165, i32 169, i32 173, i32 177, i32 181, i32 185, i32 189, i32 193, i32 197, i32 201, i32 205, i32 209, i32 213, i32 217, i32 221, i32 225, i32 229, i32 233, i32 237, i32 241, i32 245, i32 249, i32 253> |
| %strided.vec2 = shufflevector <256 x i8> %wide.vec, <256 x i8> poison, <64 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62, i32 66, i32 70, i32 74, i32 78, i32 82, i32 86, i32 90, i32 94, i32 98, i32 102, i32 106, i32 110, i32 114, i32 118, i32 122, i32 126, i32 130, i32 134, i32 138, i32 142, i32 146, i32 150, i32 154, i32 158, i32 162, i32 166, i32 170, i32 174, i32 178, i32 182, i32 186, i32 190, i32 194, i32 198, i32 202, i32 206, i32 210, i32 214, i32 218, i32 222, i32 226, i32 230, i32 234, i32 238, i32 242, i32 246, i32 250, i32 254> |
| %strided.vec3 = shufflevector <256 x i8> %wide.vec, <256 x i8> poison, <64 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63, i32 67, i32 71, i32 75, i32 79, i32 83, i32 87, i32 91, i32 95, i32 99, i32 103, i32 107, i32 111, i32 115, i32 119, i32 123, i32 127, i32 131, i32 135, i32 139, i32 143, i32 147, i32 151, i32 155, i32 159, i32 163, i32 167, i32 171, i32 175, i32 179, i32 183, i32 187, i32 191, i32 195, i32 199, i32 203, i32 207, i32 211, i32 215, i32 219, i32 223, i32 227, i32 231, i32 235, i32 239, i32 243, i32 247, i32 251, i32 255> |
| store <64 x i8> %strided.vec0, ptr %out.vec0, align 64 |
| store <64 x i8> %strided.vec1, ptr %out.vec1, align 64 |
| store <64 x i8> %strided.vec2, ptr %out.vec2, align 64 |
| store <64 x i8> %strided.vec3, ptr %out.vec3, align 64 |
| ret void |
| } |
| ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
| ; AVX: {{.*}} |
| ; AVX2: {{.*}} |
| ; AVX2-FAST: {{.*}} |
| ; AVX2-FAST-PERLANE: {{.*}} |
| ; AVX2-SLOW: {{.*}} |
| ; AVX512BW-FAST: {{.*}} |
| ; AVX512BW-ONLY-FAST: {{.*}} |
| ; AVX512BW-ONLY-SLOW: {{.*}} |
| ; AVX512BW-SLOW: {{.*}} |
| ; AVX512DQ-FAST: {{.*}} |
| ; AVX512DQ-SLOW: {{.*}} |
| ; AVX512DQBW-FAST: {{.*}} |
| ; AVX512DQBW-SLOW: {{.*}} |
| ; AVX512F-FAST: {{.*}} |
| ; AVX512F-ONLY-FAST: {{.*}} |
| ; AVX512F-ONLY-SLOW: {{.*}} |
| ; AVX512F-SLOW: {{.*}} |
| ; FALLBACK0: {{.*}} |
| ; FALLBACK1: {{.*}} |
| ; FALLBACK10: {{.*}} |
| ; FALLBACK11: {{.*}} |
| ; FALLBACK12: {{.*}} |
| ; FALLBACK2: {{.*}} |
| ; FALLBACK3: {{.*}} |
| ; FALLBACK4: {{.*}} |
| ; FALLBACK5: {{.*}} |
| ; FALLBACK6: {{.*}} |
| ; FALLBACK7: {{.*}} |
| ; FALLBACK8: {{.*}} |
| ; FALLBACK9: {{.*}} |