| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck --check-prefixes=AVX2,AVX2-SLOW %s |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-shuffle | FileCheck --check-prefixes=AVX2,AVX2-FAST %s |
| |
| ; These patterns are produced by LoopVectorizer for interleaved loads. |
| |
| define void @vf2(<8 x i16>* %in.vec, <2 x i16>* %out.vec0, <2 x i16>* %out.vec1, <2 x i16>* %out.vec2, <2 x i16>* %out.vec3) nounwind { |
| ; AVX2-LABEL: vf2: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] |
| ; AVX2-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[0,2,2,3,4,5,6,7] |
| ; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[1,3,2,3,4,5,6,7] |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3] |
| ; AVX2-NEXT: vpshuflw {{.*#+}} xmm3 = xmm0[2,0,2,3,4,5,6,7] |
| ; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] |
| ; AVX2-NEXT: vmovd %xmm2, (%rsi) |
| ; AVX2-NEXT: vmovd %xmm1, (%rdx) |
| ; AVX2-NEXT: vmovd %xmm3, (%rcx) |
| ; AVX2-NEXT: vmovd %xmm0, (%r8) |
| ; AVX2-NEXT: retq |
| %wide.vec = load <8 x i16>, <8 x i16>* %in.vec, align 32 |
| |
| %strided.vec0 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <2 x i32> <i32 0, i32 4> |
| %strided.vec1 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <2 x i32> <i32 1, i32 5> |
| %strided.vec2 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <2 x i32> <i32 2, i32 6> |
| %strided.vec3 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <2 x i32> <i32 3, i32 7> |
| |
| store <2 x i16> %strided.vec0, <2 x i16>* %out.vec0, align 32 |
| store <2 x i16> %strided.vec1, <2 x i16>* %out.vec1, align 32 |
| store <2 x i16> %strided.vec2, <2 x i16>* %out.vec2, align 32 |
| store <2 x i16> %strided.vec3, <2 x i16>* %out.vec3, align 32 |
| |
| ret void |
| } |
| |
| define void @vf4(<16 x i16>* %in.vec, <4 x i16>* %out.vec0, <4 x i16>* %out.vec1, <4 x i16>* %out.vec2, <4 x i16>* %out.vec3) nounwind { |
| ; AVX2-SLOW-LABEL: vf4: |
| ; AVX2-SLOW: # %bb.0: |
| ; AVX2-SLOW-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX2-SLOW-NEXT: vmovdqa 16(%rdi), %xmm2 |
| ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm2[0],xmm0[1,2,3],xmm2[4],xmm0[5,6,7] |
| ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3],xmm1[4],xmm0[5,6,7] |
| ; AVX2-SLOW-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 |
| ; AVX2-SLOW-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,2,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[1,3,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[0,2,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[1,3,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm2[2,0,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm5 = xmm1[2,0,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; AVX2-SLOW-NEXT: vmovq %xmm0, (%rsi) |
| ; AVX2-SLOW-NEXT: vmovq %xmm3, (%rdx) |
| ; AVX2-SLOW-NEXT: vmovq %xmm4, (%rcx) |
| ; AVX2-SLOW-NEXT: vmovq %xmm1, (%r8) |
| ; AVX2-SLOW-NEXT: retq |
| ; |
| ; AVX2-FAST-LABEL: vf4: |
| ; AVX2-FAST: # %bb.0: |
| ; AVX2-FAST-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX2-FAST-NEXT: vmovdqa 16(%rdi), %xmm2 |
| ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm2[0],xmm0[1,2,3],xmm2[4],xmm0[5,6,7] |
| ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3],xmm1[4],xmm0[5,6,7] |
| ; AVX2-FAST-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 |
| ; AVX2-FAST-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 |
| ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm3 = [2,3,10,11,8,9,10,11,8,9,10,11,12,13,14,15] |
| ; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm2, %xmm4 |
| ; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm1, %xmm3 |
| ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] |
| ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm4 = xmm2[2,0,2,3,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm5 = xmm1[2,0,2,3,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; AVX2-FAST-NEXT: vmovq %xmm0, (%rsi) |
| ; AVX2-FAST-NEXT: vmovq %xmm3, (%rdx) |
| ; AVX2-FAST-NEXT: vmovq %xmm4, (%rcx) |
| ; AVX2-FAST-NEXT: vmovq %xmm1, (%r8) |
| ; AVX2-FAST-NEXT: retq |
| %wide.vec = load <16 x i16>, <16 x i16>* %in.vec, align 32 |
| |
| %strided.vec0 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <4 x i32> <i32 0, i32 4, i32 8, i32 12> |
| %strided.vec1 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <4 x i32> <i32 1, i32 5, i32 9, i32 13> |
| %strided.vec2 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <4 x i32> <i32 2, i32 6, i32 10, i32 14> |
| %strided.vec3 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <4 x i32> <i32 3, i32 7, i32 11, i32 15> |
| |
| store <4 x i16> %strided.vec0, <4 x i16>* %out.vec0, align 32 |
| store <4 x i16> %strided.vec1, <4 x i16>* %out.vec1, align 32 |
| store <4 x i16> %strided.vec2, <4 x i16>* %out.vec2, align 32 |
| store <4 x i16> %strided.vec3, <4 x i16>* %out.vec3, align 32 |
| |
| ret void |
| } |
| |
| define void @vf8(<32 x i16>* %in.vec, <8 x i16>* %out.vec0, <8 x i16>* %out.vec1, <8 x i16>* %out.vec2, <8 x i16>* %out.vec3) nounwind { |
| ; AVX2-SLOW-LABEL: vf8: |
| ; AVX2-SLOW: # %bb.0: |
| ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-SLOW-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %xmm2 |
| ; AVX2-SLOW-NEXT: vmovdqa 48(%rdi), %xmm3 |
| ; AVX2-SLOW-NEXT: vpxor %xmm4, %xmm4, %xmm4 |
| ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm5 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7] |
| ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm6 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7] |
| ; AVX2-SLOW-NEXT: vpackusdw %xmm5, %xmm6, %xmm5 |
| ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm6 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7] |
| ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7] |
| ; AVX2-SLOW-NEXT: vpackusdw %xmm6, %xmm4, %xmm4 |
| ; AVX2-SLOW-NEXT: vpackusdw %xmm5, %xmm4, %xmm8 |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[0,2,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[0,2,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,1,1,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[0,2,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[1,3,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[0,2,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[1,3,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1] |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[3,1,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm3[0,1,2,0,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm7 = xmm2[0,1,2,0,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm7 = xmm1[2,0,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm0[2,0,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1] |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1],xmm6[2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3] |
| ; AVX2-SLOW-NEXT: vmovdqa %xmm8, (%rsi) |
| ; AVX2-SLOW-NEXT: vmovdqa %xmm5, (%rdx) |
| ; AVX2-SLOW-NEXT: vmovdqa %xmm4, (%rcx) |
| ; AVX2-SLOW-NEXT: vmovdqa %xmm0, (%r8) |
| ; AVX2-SLOW-NEXT: retq |
| ; |
| ; AVX2-FAST-LABEL: vf8: |
| ; AVX2-FAST: # %bb.0: |
| ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-FAST-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %xmm2 |
| ; AVX2-FAST-NEXT: vmovdqa 48(%rdi), %xmm3 |
| ; AVX2-FAST-NEXT: vpxor %xmm4, %xmm4, %xmm4 |
| ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm5 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7] |
| ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7] |
| ; AVX2-FAST-NEXT: vpackusdw %xmm5, %xmm6, %xmm5 |
| ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7] |
| ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7] |
| ; AVX2-FAST-NEXT: vpackusdw %xmm6, %xmm4, %xmm4 |
| ; AVX2-FAST-NEXT: vpackusdw %xmm5, %xmm4, %xmm8 |
| ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm5 = [0,1,2,3,2,3,10,11,8,9,10,11,12,13,14,15] |
| ; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm3, %xmm6 |
| ; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm2, %xmm5 |
| ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] |
| ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm6 = [2,3,10,11,8,9,10,11,8,9,10,11,12,13,14,15] |
| ; AVX2-FAST-NEXT: vpshufb %xmm6, %xmm1, %xmm7 |
| ; AVX2-FAST-NEXT: vpshufb %xmm6, %xmm0, %xmm6 |
| ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3] |
| ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[3,1,2,3] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm6 = xmm3[0,1,2,0,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm7 = xmm2[0,1,2,0,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1] |
| ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm7 = xmm1[2,0,2,3,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm4 = xmm0[2,0,2,3,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1] |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1],xmm6[2,3] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3] |
| ; AVX2-FAST-NEXT: vmovdqa %xmm8, (%rsi) |
| ; AVX2-FAST-NEXT: vmovdqa %xmm5, (%rdx) |
| ; AVX2-FAST-NEXT: vmovdqa %xmm4, (%rcx) |
| ; AVX2-FAST-NEXT: vmovdqa %xmm0, (%r8) |
| ; AVX2-FAST-NEXT: retq |
| %wide.vec = load <32 x i16>, <32 x i16>* %in.vec, align 32 |
| |
| %strided.vec0 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28> |
| %strided.vec1 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29> |
| %strided.vec2 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30> |
| %strided.vec3 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <8 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31> |
| |
| store <8 x i16> %strided.vec0, <8 x i16>* %out.vec0, align 32 |
| store <8 x i16> %strided.vec1, <8 x i16>* %out.vec1, align 32 |
| store <8 x i16> %strided.vec2, <8 x i16>* %out.vec2, align 32 |
| store <8 x i16> %strided.vec3, <8 x i16>* %out.vec3, align 32 |
| |
| ret void |
| } |
| |
| define void @vf16(<64 x i16>* %in.vec, <16 x i16>* %out.vec0, <16 x i16>* %out.vec1, <16 x i16>* %out.vec2, <16 x i16>* %out.vec3) nounwind { |
| ; AVX2-SLOW-LABEL: vf16: |
| ; AVX2-SLOW: # %bb.0: |
| ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm10 |
| ; AVX2-SLOW-NEXT: vmovdqa 16(%rdi), %xmm11 |
| ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %xmm12 |
| ; AVX2-SLOW-NEXT: vmovdqa 48(%rdi), %xmm13 |
| ; AVX2-SLOW-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm13[0],xmm0[1,2,3],xmm13[4],xmm0[5,6,7] |
| ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm5 = xmm12[0],xmm0[1,2,3],xmm12[4],xmm0[5,6,7] |
| ; AVX2-SLOW-NEXT: vpackusdw %xmm2, %xmm5, %xmm2 |
| ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm5 = xmm11[0],xmm0[1,2,3],xmm11[4],xmm0[5,6,7] |
| ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm6 = xmm10[0],xmm0[1,2,3],xmm10[4],xmm0[5,6,7] |
| ; AVX2-SLOW-NEXT: vpackusdw %xmm5, %xmm6, %xmm5 |
| ; AVX2-SLOW-NEXT: vpackusdw %xmm2, %xmm5, %xmm8 |
| ; AVX2-SLOW-NEXT: vmovdqa 112(%rdi), %xmm6 |
| ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm6[0],xmm0[1,2,3],xmm6[4],xmm0[5,6,7] |
| ; AVX2-SLOW-NEXT: vmovdqa 96(%rdi), %xmm7 |
| ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0],xmm0[1,2,3],xmm7[4],xmm0[5,6,7] |
| ; AVX2-SLOW-NEXT: vpackusdw %xmm2, %xmm5, %xmm2 |
| ; AVX2-SLOW-NEXT: vpackusdw %xmm2, %xmm2, %xmm2 |
| ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm9 |
| ; AVX2-SLOW-NEXT: vmovdqa 80(%rdi), %xmm2 |
| ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm0[1,2,3],xmm2[4],xmm0[5,6,7] |
| ; AVX2-SLOW-NEXT: vmovdqa 64(%rdi), %xmm5 |
| ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm5[0],xmm0[1,2,3],xmm5[4],xmm0[5,6,7] |
| ; AVX2-SLOW-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 |
| ; AVX2-SLOW-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 |
| ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm9[6,7] |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm6[0,2,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm7[0,2,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[1,3,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm5[0,2,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[1,3,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] |
| ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm13[0,2,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm12[0,2,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm11[0,2,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[1,3,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm10[0,2,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[1,3,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3] |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm15 = xmm6[3,1,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm15[0,1,2,0,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm7[3,1,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm8[0,1,2,0,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] |
| ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm2[2,0,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[3,1,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm5[2,0,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1] |
| ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm4[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[3,1,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm13[0,1,2,0,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm12[3,1,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm7[0,1,2,0,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm11[3,1,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm6[2,0,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm10[3,1,2,3] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm4[2,0,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm15[0,1,3,1,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm8[0,1,3,1,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] |
| ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm5[3,1,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] |
| ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2 |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm13[0,1,3,1,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm7[0,1,3,1,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm6[3,1,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,3,4,5,6,7] |
| ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3] |
| ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-SLOW-NEXT: vmovaps %ymm2, (%rsi) |
| ; AVX2-SLOW-NEXT: vmovdqa %ymm9, (%rdx) |
| ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rcx) |
| ; AVX2-SLOW-NEXT: vmovdqa %ymm1, (%r8) |
| ; AVX2-SLOW-NEXT: vzeroupper |
| ; AVX2-SLOW-NEXT: retq |
| ; |
| ; AVX2-FAST-LABEL: vf16: |
| ; AVX2-FAST: # %bb.0: |
| ; AVX2-FAST-NEXT: vmovdqa 64(%rdi), %ymm8 |
| ; AVX2-FAST-NEXT: vmovdqa 96(%rdi), %ymm13 |
| ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm14 |
| ; AVX2-FAST-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %xmm3 |
| ; AVX2-FAST-NEXT: vmovdqa 48(%rdi), %xmm5 |
| ; AVX2-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm7 = xmm5[0],xmm2[1,2,3],xmm5[4],xmm2[5,6,7] |
| ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm3[0],xmm2[1,2,3],xmm3[4],xmm2[5,6,7] |
| ; AVX2-FAST-NEXT: vpackusdw %xmm7, %xmm4, %xmm4 |
| ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm7 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7] |
| ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm14[0],xmm2[1,2,3],xmm14[4],xmm2[5,6,7] |
| ; AVX2-FAST-NEXT: vpackusdw %xmm7, %xmm2, %xmm2 |
| ; AVX2-FAST-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 |
| ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,2,2,3,0,2,4,6] |
| ; AVX2-FAST-NEXT: vpermd %ymm13, %ymm4, %ymm7 |
| ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm10 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29] |
| ; AVX2-FAST-NEXT: vpermd %ymm8, %ymm4, %ymm4 |
| ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm11 = <0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u> |
| ; AVX2-FAST-NEXT: vpshufb %ymm11, %ymm4, %ymm12 |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3,4,5],ymm10[6,7] |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm10 = ymm2[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,2,3,10,11,8,9,10,11,12,13,14,15] |
| ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm5, %xmm6 |
| ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm3, %xmm2 |
| ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1] |
| ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm6 = [2,3,10,11,8,9,10,11,8,9,10,11,12,13,14,15] |
| ; AVX2-FAST-NEXT: vpshufb %xmm6, %xmm1, %xmm0 |
| ; AVX2-FAST-NEXT: vpshufb %xmm6, %xmm14, %xmm6 |
| ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm6[0],xmm0[0],xmm6[1],xmm0[1] |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3] |
| ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm15 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31> |
| ; AVX2-FAST-NEXT: vpshufb %ymm15, %ymm7, %ymm6 |
| ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm12 = <2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u> |
| ; AVX2-FAST-NEXT: vpshufb %ymm12, %ymm4, %ymm4 |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm6[6,7] |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm0 = [1,3,2,3,1,3,5,7] |
| ; AVX2-FAST-NEXT: vpermd %ymm13, %ymm0, %ymm4 |
| ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm6 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29] |
| ; AVX2-FAST-NEXT: vpermd %ymm8, %ymm0, %ymm0 |
| ; AVX2-FAST-NEXT: vpshufb %ymm11, %ymm0, %ymm8 |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm6[6,7] |
| ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[3,1,2,3] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm5[0,1,2,0,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[3,1,2,3] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm7 = xmm3[0,1,2,0,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm11 = xmm7[0],xmm2[0],xmm7[1],xmm2[1] |
| ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm7 = xmm1[2,0,2,3,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm6 = xmm14[3,1,2,3] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm6[2,0,2,3,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1] |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm11[2,3] |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX2-FAST-NEXT: vpshufb %ymm15, %ymm4, %ymm4 |
| ; AVX2-FAST-NEXT: vpshufb %ymm12, %ymm0, %ymm0 |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm4[6,7] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm4 = xmm5[0,1,3,1,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm4 = xmm6[3,1,2,3,4,5,6,7] |
| ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3] |
| ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-FAST-NEXT: vmovdqa %ymm10, (%rsi) |
| ; AVX2-FAST-NEXT: vmovdqa %ymm9, (%rdx) |
| ; AVX2-FAST-NEXT: vmovdqa %ymm2, (%rcx) |
| ; AVX2-FAST-NEXT: vmovdqa %ymm0, (%r8) |
| ; AVX2-FAST-NEXT: vzeroupper |
| ; AVX2-FAST-NEXT: retq |
| %wide.vec = load <64 x i16>, <64 x i16>* %in.vec, align 32 |
| |
| %strided.vec0 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60> |
| %strided.vec1 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61> |
| %strided.vec2 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62> |
| %strided.vec3 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63> |
| |
| store <16 x i16> %strided.vec0, <16 x i16>* %out.vec0, align 32 |
| store <16 x i16> %strided.vec1, <16 x i16>* %out.vec1, align 32 |
| store <16 x i16> %strided.vec2, <16 x i16>* %out.vec2, align 32 |
| store <16 x i16> %strided.vec3, <16 x i16>* %out.vec3, align 32 |
| |
| ret void |
| } |