| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12 |
| |
| ; These patterns are produced by LoopVectorizer for interleaved loads. |
| |
| define void @load_i32_stride4_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind { |
| ; SSE-LABEL: load_i32_stride4_vf2: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movdqa (%rdi), %xmm0 |
| ; SSE-NEXT: movdqa 16(%rdi), %xmm1 |
| ; SSE-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3] |
| ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE-NEXT: movq %xmm2, (%rsi) |
| ; SSE-NEXT: movq %xmm3, (%rdx) |
| ; SSE-NEXT: movq %xmm0, (%rcx) |
| ; SSE-NEXT: movq %xmm1, (%r8) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i32_stride4_vf2: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,1,1] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,3],xmm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; AVX1-ONLY-NEXT: vmovq %xmm2, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovq %xmm3, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovq %xmm0, (%rcx) |
| ; AVX1-ONLY-NEXT: vpextrq $1, %xmm0, (%r8) |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i32_stride4_vf2: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; AVX2-ONLY-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,1,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0],xmm1[1],xmm3[2,3] |
| ; AVX2-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; AVX2-ONLY-NEXT: vmovq %xmm2, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovq %xmm3, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovq %xmm0, (%rcx) |
| ; AVX2-ONLY-NEXT: vpextrq $1, %xmm0, (%r8) |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512F-SLOW-LABEL: load_i32_stride4_vf2: |
| ; AVX512F-SLOW: # %bb.0: |
| ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-SLOW-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX512F-SLOW-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,1,1] |
| ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0],xmm1[1],xmm3[2,3] |
| ; AVX512F-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; AVX512F-SLOW-NEXT: vmovq %xmm2, (%rsi) |
| ; AVX512F-SLOW-NEXT: vmovq %xmm3, (%rdx) |
| ; AVX512F-SLOW-NEXT: vmovq %xmm0, (%rcx) |
| ; AVX512F-SLOW-NEXT: vpextrq $1, %xmm0, (%r8) |
| ; AVX512F-SLOW-NEXT: retq |
| ; |
| ; AVX512F-FAST-LABEL: load_i32_stride4_vf2: |
| ; AVX512F-FAST: # %bb.0: |
| ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-FAST-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX512F-FAST-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; AVX512F-FAST-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,5,1,5] |
| ; AVX512F-FAST-NEXT: vpermi2d %xmm1, %xmm0, %xmm3 |
| ; AVX512F-FAST-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; AVX512F-FAST-NEXT: vmovq %xmm2, (%rsi) |
| ; AVX512F-FAST-NEXT: vmovq %xmm3, (%rdx) |
| ; AVX512F-FAST-NEXT: vmovq %xmm0, (%rcx) |
| ; AVX512F-FAST-NEXT: vpextrq $1, %xmm0, (%r8) |
| ; AVX512F-FAST-NEXT: retq |
| ; |
| ; AVX512BW-SLOW-LABEL: load_i32_stride4_vf2: |
| ; AVX512BW-SLOW: # %bb.0: |
| ; AVX512BW-SLOW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX512BW-SLOW-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,1,1] |
| ; AVX512BW-SLOW-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0],xmm1[1],xmm3[2,3] |
| ; AVX512BW-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; AVX512BW-SLOW-NEXT: vmovq %xmm2, (%rsi) |
| ; AVX512BW-SLOW-NEXT: vmovq %xmm3, (%rdx) |
| ; AVX512BW-SLOW-NEXT: vmovq %xmm0, (%rcx) |
| ; AVX512BW-SLOW-NEXT: vpextrq $1, %xmm0, (%r8) |
| ; AVX512BW-SLOW-NEXT: retq |
| ; |
| ; AVX512BW-FAST-LABEL: load_i32_stride4_vf2: |
| ; AVX512BW-FAST: # %bb.0: |
| ; AVX512BW-FAST-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX512BW-FAST-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; AVX512BW-FAST-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,5,1,5] |
| ; AVX512BW-FAST-NEXT: vpermi2d %xmm1, %xmm0, %xmm3 |
| ; AVX512BW-FAST-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; AVX512BW-FAST-NEXT: vmovq %xmm2, (%rsi) |
| ; AVX512BW-FAST-NEXT: vmovq %xmm3, (%rdx) |
| ; AVX512BW-FAST-NEXT: vmovq %xmm0, (%rcx) |
| ; AVX512BW-FAST-NEXT: vpextrq $1, %xmm0, (%r8) |
| ; AVX512BW-FAST-NEXT: retq |
| %wide.vec = load <8 x i32>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <8 x i32> %wide.vec, <8 x i32> poison, <2 x i32> <i32 0, i32 4> |
| %strided.vec1 = shufflevector <8 x i32> %wide.vec, <8 x i32> poison, <2 x i32> <i32 1, i32 5> |
| %strided.vec2 = shufflevector <8 x i32> %wide.vec, <8 x i32> poison, <2 x i32> <i32 2, i32 6> |
| %strided.vec3 = shufflevector <8 x i32> %wide.vec, <8 x i32> poison, <2 x i32> <i32 3, i32 7> |
| store <2 x i32> %strided.vec0, ptr %out.vec0, align 64 |
| store <2 x i32> %strided.vec1, ptr %out.vec1, align 64 |
| store <2 x i32> %strided.vec2, ptr %out.vec2, align 64 |
| store <2 x i32> %strided.vec3, ptr %out.vec3, align 64 |
| ret void |
| } |
| |
| define void @load_i32_stride4_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind { |
| ; SSE-LABEL: load_i32_stride4_vf4: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movaps (%rdi), %xmm0 |
| ; SSE-NEXT: movaps 16(%rdi), %xmm1 |
| ; SSE-NEXT: movaps 32(%rdi), %xmm2 |
| ; SSE-NEXT: movaps 48(%rdi), %xmm3 |
| ; SSE-NEXT: movaps %xmm2, %xmm4 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] |
| ; SSE-NEXT: movaps %xmm0, %xmm5 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1] |
| ; SSE-NEXT: movaps %xmm5, %xmm6 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm4[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm4[1] |
| ; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3] |
| ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; SSE-NEXT: movaps %xmm0, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm6, (%rsi) |
| ; SSE-NEXT: movaps %xmm5, (%rdx) |
| ; SSE-NEXT: movaps %xmm1, (%rcx) |
| ; SSE-NEXT: movaps %xmm0, (%r8) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i32_stride4_vf4: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 48(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm5 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,0] |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm6 = xmm3[1],xmm4[1],zero,zero |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3] |
| ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm6 = zero,zero,xmm0[2],xmm1[2] |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm7 = xmm3[2],xmm4[2],xmm3[3],xmm4[3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3] |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm4[3,0],xmm3[3,0] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm1[2,0],xmm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, (%r8) |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i32_stride4_vf4: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm0 = [0,4,0,4] |
| ; AVX2-ONLY-NEXT: # xmm0 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm0, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps 16(%rdi), %xmm3 |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm5 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm5[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovaps 48(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm6 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm7 = [1,5,1,5] |
| ; AVX2-ONLY-NEXT: # xmm7 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm8 |
| ; AVX2-ONLY-NEXT: vpermps %ymm8, %ymm7, %ymm7 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm7 = [2,6,2,6] |
| ; AVX2-ONLY-NEXT: # xmm7 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm7, %ymm1 |
| ; AVX2-ONLY-NEXT: vunpckhps {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vunpckhps {{.*#+}} xmm2 = xmm4[2],xmm5[2],xmm4[3],xmm5[3] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm3 = [3,7,3,7] |
| ; AVX2-ONLY-NEXT: # xmm3 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps %ymm8, %ymm3, %ymm3 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps %xmm6, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, (%r8) |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512-LABEL: load_i32_stride4_vf4: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [0,4,8,12] |
| ; AVX512-NEXT: vmovdqa (%rdi), %ymm1 |
| ; AVX512-NEXT: vmovdqa 32(%rdi), %ymm2 |
| ; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm0 |
| ; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [1,5,9,13] |
| ; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm3 |
| ; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [2,6,10,14] |
| ; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm4 |
| ; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [3,7,11,15] |
| ; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm5 |
| ; AVX512-NEXT: vmovdqa %xmm0, (%rsi) |
| ; AVX512-NEXT: vmovdqa %xmm3, (%rdx) |
| ; AVX512-NEXT: vmovdqa %xmm4, (%rcx) |
| ; AVX512-NEXT: vmovdqa %xmm5, (%r8) |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %wide.vec = load <16 x i32>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <16 x i32> %wide.vec, <16 x i32> poison, <4 x i32> <i32 0, i32 4, i32 8, i32 12> |
| %strided.vec1 = shufflevector <16 x i32> %wide.vec, <16 x i32> poison, <4 x i32> <i32 1, i32 5, i32 9, i32 13> |
| %strided.vec2 = shufflevector <16 x i32> %wide.vec, <16 x i32> poison, <4 x i32> <i32 2, i32 6, i32 10, i32 14> |
| %strided.vec3 = shufflevector <16 x i32> %wide.vec, <16 x i32> poison, <4 x i32> <i32 3, i32 7, i32 11, i32 15> |
| store <4 x i32> %strided.vec0, ptr %out.vec0, align 64 |
| store <4 x i32> %strided.vec1, ptr %out.vec1, align 64 |
| store <4 x i32> %strided.vec2, ptr %out.vec2, align 64 |
| store <4 x i32> %strided.vec3, ptr %out.vec3, align 64 |
| ret void |
| } |
| |
| define void @load_i32_stride4_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind { |
| ; SSE-LABEL: load_i32_stride4_vf8: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movaps (%rdi), %xmm0 |
| ; SSE-NEXT: movaps 16(%rdi), %xmm3 |
| ; SSE-NEXT: movaps 32(%rdi), %xmm2 |
| ; SSE-NEXT: movaps 48(%rdi), %xmm4 |
| ; SSE-NEXT: movaps 80(%rdi), %xmm5 |
| ; SSE-NEXT: movaps 64(%rdi), %xmm1 |
| ; SSE-NEXT: movaps 112(%rdi), %xmm6 |
| ; SSE-NEXT: movaps 96(%rdi), %xmm7 |
| ; SSE-NEXT: movaps %xmm7, %xmm8 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1] |
| ; SSE-NEXT: movaps %xmm1, %xmm9 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm9 = xmm9[0],xmm5[0],xmm9[1],xmm5[1] |
| ; SSE-NEXT: movaps %xmm9, %xmm10 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm10 = xmm10[0],xmm8[0] |
| ; SSE-NEXT: movaps %xmm2, %xmm11 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1] |
| ; SSE-NEXT: movaps %xmm0, %xmm12 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm12 = xmm12[0],xmm3[0],xmm12[1],xmm3[1] |
| ; SSE-NEXT: movaps %xmm12, %xmm13 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm13 = xmm13[0],xmm11[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm8[1] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm11[1] |
| ; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm6[2],xmm7[3],xmm6[3] |
| ; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm5[2],xmm1[3],xmm5[3] |
| ; SSE-NEXT: movaps %xmm1, %xmm5 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm7[0] |
| ; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3] |
| ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3] |
| ; SSE-NEXT: movaps %xmm0, %xmm3 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm2[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm7[1] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm10, 16(%rsi) |
| ; SSE-NEXT: movaps %xmm13, (%rsi) |
| ; SSE-NEXT: movaps %xmm9, 16(%rdx) |
| ; SSE-NEXT: movaps %xmm12, (%rdx) |
| ; SSE-NEXT: movaps %xmm5, 16(%rcx) |
| ; SSE-NEXT: movaps %xmm3, (%rcx) |
| ; SSE-NEXT: movaps %xmm1, 16(%r8) |
| ; SSE-NEXT: movaps %xmm0, (%r8) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i32_stride4_vf8: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm1[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm1[0],ymm2[0],ymm1[2],ymm2[2] |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm5 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[4],ymm0[4],ymm4[5],ymm0[5] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,0],ymm5[4,5],ymm3[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovaps 48(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm7 = xmm6[0],xmm5[0] |
| ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm10 = xmm8[0],xmm9[0],xmm8[1],xmm9[1] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm7[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm7 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[4],ymm1[4],ymm2[5],ymm1[5] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm0[1,0],ymm4[1,0],ymm0[5,4],ymm4[5,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm10[2,0],ymm7[2,3],ymm10[6,4],ymm7[6,7] |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm10 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] |
| ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm11 = xmm8[1],xmm9[1],zero,zero |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm10 = xmm11[0,1],xmm10[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm10 = ymm1[1],ymm2[1],ymm1[3],ymm2[3] |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm11 = ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[6],ymm0[6],ymm4[7],ymm0[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,0],ymm11[4,5],ymm10[6,4] |
| ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm11 = zero,zero,xmm5[2],xmm6[2] |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm12 = xmm8[2],xmm9[2],xmm8[3],xmm9[3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm11 = xmm12[0,1],xmm11[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm4[3,0],ymm0[7,4],ymm4[7,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,3],ymm0[6,4],ymm1[6,7] |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm5[2],xmm6[2],xmm5[3],xmm6[3] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm2 = xmm9[3,0],xmm8[3,0] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm7, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm10, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%r8) |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i32_stride4_vf8: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm3 = [0,4,0,4,0,4,0,4] |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm3, %ymm5 |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm3, %ymm3 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm5[6,7] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm5 = [0,4,0,4] |
| ; AVX2-ONLY-NEXT: # xmm5 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm5, %ymm6 |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vmovaps 16(%rdi), %xmm8 |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm9 = xmm7[0],xmm8[0],xmm7[1],xmm8[1] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm6 = xmm9[0,1],xmm6[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm6 = [1,5,1,5,1,5,1,5] |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm6, %ymm9 |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm6, %ymm6 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm9[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 48(%rdi), %xmm9 |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm10 = xmm5[0],xmm9[0],xmm5[1],xmm9[1] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm11 = [1,5,1,5] |
| ; AVX2-ONLY-NEXT: # xmm11 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps %ymm0, %ymm11, %ymm11 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm10 = xmm11[0,1],xmm10[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm10 = [2,6,2,6,2,6,2,6] |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm10, %ymm11 |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm10, %ymm10 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm11[6,7] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm11 = [2,6,2,6] |
| ; AVX2-ONLY-NEXT: # xmm11 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm11, %ymm4 |
| ; AVX2-ONLY-NEXT: vunpckhps {{.*#+}} xmm7 = xmm7[2],xmm8[2],xmm7[3],xmm8[3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm4 = xmm7[0,1],xmm4[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm7 = [3,7,3,7,3,7,3,7] |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm7, %ymm2 |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm7, %ymm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7] |
| ; AVX2-ONLY-NEXT: vunpckhps {{.*#+}} xmm2 = xmm5[2],xmm9[2],xmm5[3],xmm9[3] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm5 = [3,7,3,7] |
| ; AVX2-ONLY-NEXT: # xmm5 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps %ymm0, %ymm5, %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm4, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%r8) |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512-LABEL: load_i32_stride4_vf8: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [0,4,8,12,16,20,24,28] |
| ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm1 |
| ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm2 |
| ; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm0 |
| ; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [1,5,9,13,17,21,25,29] |
| ; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm3 |
| ; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [2,6,10,14,18,22,26,30] |
| ; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm4 |
| ; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = [3,7,11,15,19,23,27,31] |
| ; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm5 |
| ; AVX512-NEXT: vmovdqa %ymm0, (%rsi) |
| ; AVX512-NEXT: vmovdqa %ymm3, (%rdx) |
| ; AVX512-NEXT: vmovdqa %ymm4, (%rcx) |
| ; AVX512-NEXT: vmovdqa %ymm5, (%r8) |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %wide.vec = load <32 x i32>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <32 x i32> %wide.vec, <32 x i32> poison, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28> |
| %strided.vec1 = shufflevector <32 x i32> %wide.vec, <32 x i32> poison, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29> |
| %strided.vec2 = shufflevector <32 x i32> %wide.vec, <32 x i32> poison, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30> |
| %strided.vec3 = shufflevector <32 x i32> %wide.vec, <32 x i32> poison, <8 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31> |
| store <8 x i32> %strided.vec0, ptr %out.vec0, align 64 |
| store <8 x i32> %strided.vec1, ptr %out.vec1, align 64 |
| store <8 x i32> %strided.vec2, ptr %out.vec2, align 64 |
| store <8 x i32> %strided.vec3, ptr %out.vec3, align 64 |
| ret void |
| } |
| |
| define void @load_i32_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind { |
| ; SSE-LABEL: load_i32_stride4_vf16: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: subq $40, %rsp |
| ; SSE-NEXT: movaps 208(%rdi), %xmm7 |
| ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 240(%rdi), %xmm9 |
| ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 224(%rdi), %xmm1 |
| ; SSE-NEXT: movaps 144(%rdi), %xmm11 |
| ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 128(%rdi), %xmm14 |
| ; SSE-NEXT: movaps 176(%rdi), %xmm6 |
| ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 160(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 80(%rdi), %xmm3 |
| ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 64(%rdi), %xmm10 |
| ; SSE-NEXT: movaps 112(%rdi), %xmm4 |
| ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 96(%rdi), %xmm8 |
| ; SSE-NEXT: movaps %xmm8, %xmm0 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] |
| ; SSE-NEXT: movaps %xmm10, %xmm5 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1] |
| ; SSE-NEXT: movaps %xmm5, %xmm3 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm2, %xmm4 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1] |
| ; SSE-NEXT: movaps %xmm14, %xmm6 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm6, %xmm0 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0] |
| ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm4[1] |
| ; SSE-NEXT: movaps %xmm1, %xmm0 |
| ; SSE-NEXT: movaps %xmm1, %xmm3 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1] |
| ; SSE-NEXT: movaps 192(%rdi), %xmm12 |
| ; SSE-NEXT: movaps %xmm12, %xmm4 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1] |
| ; SSE-NEXT: movaps %xmm4, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1] |
| ; SSE-NEXT: movaps 32(%rdi), %xmm2 |
| ; SSE-NEXT: movaps 48(%rdi), %xmm15 |
| ; SSE-NEXT: movaps %xmm2, %xmm1 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1] |
| ; SSE-NEXT: movaps (%rdi), %xmm11 |
| ; SSE-NEXT: movaps 16(%rdi), %xmm9 |
| ; SSE-NEXT: movaps %xmm11, %xmm13 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1] |
| ; SSE-NEXT: movaps %xmm13, %xmm7 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm1[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm1[1] |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm8 = xmm8[2],mem[2],xmm8[3],mem[3] |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm10 = xmm10[2],mem[2],xmm10[3],mem[3] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm14 = xmm14[2],mem[2],xmm14[3],mem[3] |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3] |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm12 = xmm12[2],mem[2],xmm12[3],mem[3] |
| ; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm15[2],xmm2[3],xmm15[3] |
| ; SSE-NEXT: unpckhps {{.*#+}} xmm11 = xmm11[2],xmm9[2],xmm11[3],xmm9[3] |
| ; SSE-NEXT: movaps %xmm10, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm8[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm8[1] |
| ; SSE-NEXT: movaps %xmm14, %xmm8 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm12, %xmm9 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm9 = xmm9[0],xmm3[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm3[1] |
| ; SSE-NEXT: movaps %xmm11, %xmm0 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm2[1] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, 48(%rsi) |
| ; SSE-NEXT: movaps %xmm7, (%rsi) |
| ; SSE-NEXT: movaps (%rsp), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, 32(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, 16(%rsi) |
| ; SSE-NEXT: movaps %xmm4, 48(%rdx) |
| ; SSE-NEXT: movaps %xmm13, (%rdx) |
| ; SSE-NEXT: movaps %xmm6, 32(%rdx) |
| ; SSE-NEXT: movaps %xmm5, 16(%rdx) |
| ; SSE-NEXT: movaps %xmm9, 48(%rcx) |
| ; SSE-NEXT: movaps %xmm8, 32(%rcx) |
| ; SSE-NEXT: movaps %xmm1, 16(%rcx) |
| ; SSE-NEXT: movaps %xmm0, (%rcx) |
| ; SSE-NEXT: movaps %xmm12, 48(%r8) |
| ; SSE-NEXT: movaps %xmm14, 32(%r8) |
| ; SSE-NEXT: movaps %xmm10, 16(%r8) |
| ; SSE-NEXT: movaps %xmm11, (%r8) |
| ; SSE-NEXT: addq $40, %rsp |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i32_stride4_vf16: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: subq $264, %rsp # imm = 0x108 |
| ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm5 |
| ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm4 |
| ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm3 |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm3[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm1[0],ymm3[2],ymm1[2] |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, %ymm14 |
| ; AVX1-ONLY-NEXT: vmovups %ymm3, (%rsp) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, %ymm15 |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm7 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5] |
| ; AVX1-ONLY-NEXT: vmovaps %ymm2, %ymm10 |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, %ymm3 |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm7[0,1],ymm0[2,0],ymm7[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 176(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm11 = xmm6[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 144(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm12 = xmm6[0],xmm1[0],xmm6[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm11 = xmm12[0,1],xmm11[2,0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm4[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm4[0],ymm6[0],ymm4[2],ymm6[2] |
| ; AVX1-ONLY-NEXT: vmovaps %ymm6, %ymm8 |
| ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm5[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm13 = ymm9[0],ymm5[0],ymm9[1],ymm5[1],ymm9[4],ymm5[4],ymm9[5],ymm5[5] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm13[0,1],ymm0[2,0],ymm13[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vmovaps 48(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm12[0],xmm11[0] |
| ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm6 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm6[0,1],xmm0[2,0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps %ymm15, %ymm4 |
| ; AVX1-ONLY-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm7 = ymm15[0],ymm14[0],ymm15[1],ymm14[1],ymm15[4],ymm14[4],ymm15[5],ymm14[5] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm10[1,0],ymm3[1,0],ymm10[5,4],ymm3[5,4] |
| ; AVX1-ONLY-NEXT: vmovaps %ymm10, %ymm15 |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, %ymm13 |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm6[2,0],ymm7[2,3],ymm6[6,4],ymm7[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm6 = xmm5[1],xmm1[1],zero,zero |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, %xmm14 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm7 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, %ymm10 |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm3 = ymm8[0],ymm0[0],ymm8[1],ymm0[1],ymm8[4],ymm0[4],ymm8[5],ymm0[5] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm2[1,0],ymm9[1,0],ymm2[5,4],ymm9[5,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm6[2,0],ymm3[2,3],ymm6[6,4],ymm3[6,7] |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm6 = xmm11[0],xmm12[0],xmm11[1],xmm12[1] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm7 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm7 = mem[0],xmm8[1],zero,zero |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm3[1],ymm4[1],ymm3[3],ymm4[3] |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm6 = ymm13[2],ymm15[2],ymm13[3],ymm15[3],ymm13[6],ymm15[6],ymm13[7],ymm15[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm6[0,1],ymm3[2,0],ymm6[4,5],ymm3[6,4] |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm6 = xmm5[2],xmm14[2],xmm5[3],xmm14[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm7 = zero,zero,xmm1[2],xmm3[2] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps %ymm10, %ymm14 |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm0[1],ymm10[1],ymm0[3],ymm10[3] |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, %ymm5 |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm7 = ymm9[2],ymm2[2],ymm9[3],ymm2[3],ymm9[6],ymm2[6],ymm9[7],ymm2[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,0],ymm7[4,5],ymm6[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm11, %xmm10 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm12, %xmm11 |
| ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm7 = zero,zero,xmm10[2],xmm12[2] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm0 = xmm12[2],xmm8[2],xmm12[3],xmm8[3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps (%rsp), %ymm2, %ymm4 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm4 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm15[3,0],ymm13[3,0],ymm15[7,4],ymm13[7,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0],ymm4[2,3],ymm2[6,4],ymm4[6,7] |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm4 = xmm1[2],xmm3[2],xmm1[3],xmm3[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm6 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm6 = xmm1[3,0],mem[3,0] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm6[2,0],xmm4[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm3 = ymm14[2],ymm5[2],ymm14[3],ymm5[3],ymm14[6],ymm5[6],ymm14[7],ymm5[7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm1[3,0],ymm9[3,0],ymm1[7,4],ymm9[7,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm4[2,0],ymm3[2,3],ymm4[6,4],ymm3[6,7] |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm4 = xmm10[2],xmm11[2],xmm10[3],xmm11[3] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm8[3,0],xmm12[3,0] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm4[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 32(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 32(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 32(%rcx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm2, 32(%r8) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%r8) |
| ; AVX1-ONLY-NEXT: addq $264, %rsp # imm = 0x108 |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i32_stride4_vf16: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: subq $104, %rsp |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm8 |
| ; AVX2-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm7 |
| ; AVX2-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm5 |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %ymm12 |
| ; AVX2-ONLY-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm0 = [0,4,0,4,0,4,0,4] |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm3, %ymm0, %ymm6 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 144(%rdi), %xmm10 |
| ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %xmm11 |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm6 = xmm11[0],xmm10[0],xmm11[1],xmm10[1] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm9 = [0,4,0,4] |
| ; AVX2-ONLY-NEXT: # xmm9 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps %ymm12, %ymm9, %ymm12 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm12[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm5, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm0, %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vpermps %ymm7, %ymm9, %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %xmm12 |
| ; AVX2-ONLY-NEXT: vmovaps 16(%rdi), %xmm13 |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm6 = xmm12[0],xmm13[0],xmm12[1],xmm13[1] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm6[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm0 = [1,5,1,5,1,5,1,5] |
| ; AVX2-ONLY-NEXT: vpermps %ymm5, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm0, %ymm6 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %xmm15 |
| ; AVX2-ONLY-NEXT: vmovaps 48(%rdi), %xmm6 |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm9 = xmm15[0],xmm6[0],xmm15[1],xmm6[1] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm7 = [1,5,1,5] |
| ; AVX2-ONLY-NEXT: # xmm7 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps %ymm8, %ymm7, %ymm14 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm9 = xmm14[0,1],xmm9[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm9[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, (%rsp) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm3, %ymm0, %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm14 |
| ; AVX2-ONLY-NEXT: vpermps %ymm14, %ymm7, %ymm7 |
| ; AVX2-ONLY-NEXT: vmovaps 176(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm8 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],xmm8[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm7 = [2,6,2,6,2,6,2,6] |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm7, %ymm8 |
| ; AVX2-ONLY-NEXT: vpermps %ymm3, %ymm7, %ymm9 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1,2,3,4,5],ymm8[6,7] |
| ; AVX2-ONLY-NEXT: vunpckhps {{.*#+}} xmm9 = xmm11[2],xmm10[2],xmm11[3],xmm10[3] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm10 = [2,6,2,6] |
| ; AVX2-ONLY-NEXT: # xmm10 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm11 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0,1],xmm11[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpermps %ymm5, %ymm7, %ymm9 |
| ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm7, %ymm7 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm9[6,7] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm9 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{.*#+}} xmm10 = xmm12[2],xmm13[2],xmm12[3],xmm13[3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm9 = xmm10[0,1],xmm9[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm9 = [3,7,3,7,3,7,3,7] |
| ; AVX2-ONLY-NEXT: vpermps %ymm5, %ymm9, %ymm5 |
| ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm9, %ymm4 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm5[6,7] |
| ; AVX2-ONLY-NEXT: vunpckhps {{.*#+}} xmm5 = xmm15[2],xmm6[2],xmm15[3],xmm6[3] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm6 = [3,7,3,7] |
| ; AVX2-ONLY-NEXT: # xmm6 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm10 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm5 = xmm10[0,1],xmm5[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm9, %ymm2 |
| ; AVX2-ONLY-NEXT: vpermps %ymm3, %ymm9, %ymm3 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7] |
| ; AVX2-ONLY-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; AVX2-ONLY-NEXT: vpermps %ymm14, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm8, 32(%rcx) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm7, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 32(%r8) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm4, (%r8) |
| ; AVX2-ONLY-NEXT: addq $104, %rsp |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512-LABEL: load_i32_stride4_vf16: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm2 |
| ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm3 |
| ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm4 = [0,4,8,12,16,20,24,28,0,4,8,12,16,20,24,28] |
| ; AVX512-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm5 |
| ; AVX512-NEXT: vpermt2d %zmm3, %zmm4, %zmm5 |
| ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm4 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm5[4,5,6,7] |
| ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [1,5,9,13,17,21,25,29,1,5,9,13,17,21,25,29] |
| ; AVX512-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm6 |
| ; AVX512-NEXT: vpermt2d %zmm3, %zmm5, %zmm6 |
| ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm5 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm6[4,5,6,7] |
| ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [2,6,10,14,18,22,26,30,2,6,10,14,18,22,26,30] |
| ; AVX512-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm7 |
| ; AVX512-NEXT: vpermt2d %zmm3, %zmm6, %zmm7 |
| ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm6 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm7[4,5,6,7] |
| ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [3,7,11,15,19,23,27,31,3,7,11,15,19,23,27,31] |
| ; AVX512-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512-NEXT: vpermt2d %zmm3, %zmm7, %zmm2 |
| ; AVX512-NEXT: vpermt2d %zmm1, %zmm7, %zmm0 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7] |
| ; AVX512-NEXT: vmovdqa64 %zmm4, (%rsi) |
| ; AVX512-NEXT: vmovdqa64 %zmm5, (%rdx) |
| ; AVX512-NEXT: vmovdqa64 %zmm6, (%rcx) |
| ; AVX512-NEXT: vmovdqa64 %zmm0, (%r8) |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %wide.vec = load <64 x i32>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <64 x i32> %wide.vec, <64 x i32> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60> |
| %strided.vec1 = shufflevector <64 x i32> %wide.vec, <64 x i32> poison, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61> |
| %strided.vec2 = shufflevector <64 x i32> %wide.vec, <64 x i32> poison, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62> |
| %strided.vec3 = shufflevector <64 x i32> %wide.vec, <64 x i32> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63> |
| store <16 x i32> %strided.vec0, ptr %out.vec0, align 64 |
| store <16 x i32> %strided.vec1, ptr %out.vec1, align 64 |
| store <16 x i32> %strided.vec2, ptr %out.vec2, align 64 |
| store <16 x i32> %strided.vec3, ptr %out.vec3, align 64 |
| ret void |
| } |
| |
| define void @load_i32_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind { |
| ; SSE-LABEL: load_i32_stride4_vf32: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: subq $440, %rsp # imm = 0x1B8 |
| ; SSE-NEXT: movaps 272(%rdi), %xmm7 |
| ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 304(%rdi), %xmm8 |
| ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 288(%rdi), %xmm5 |
| ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 336(%rdi), %xmm9 |
| ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 320(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: movaps 368(%rdi), %xmm10 |
| ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 352(%rdi), %xmm4 |
| ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 80(%rdi), %xmm11 |
| ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 64(%rdi), %xmm1 |
| ; SSE-NEXT: movaps 112(%rdi), %xmm6 |
| ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 96(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1] |
| ; SSE-NEXT: movaps %xmm1, %xmm3 |
| ; SSE-NEXT: movaps %xmm1, %xmm14 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm11[0],xmm3[1],xmm11[1] |
| ; SSE-NEXT: movaps %xmm3, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm4, %xmm1 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1] |
| ; SSE-NEXT: movaps %xmm2, %xmm4 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm4, %xmm0 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm1[1] |
| ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm5, %xmm0 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1] |
| ; SSE-NEXT: movaps 256(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1] |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 240(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 224(%rdi), %xmm12 |
| ; SSE-NEXT: movaps %xmm12, %xmm0 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE-NEXT: movaps 208(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 192(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 496(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 480(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE-NEXT: movaps 464(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 448(%rdi), %xmm6 |
| ; SSE-NEXT: movaps %xmm6, %xmm15 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm15 = xmm15[0],xmm2[0],xmm15[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm15, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm15 = xmm15[1],xmm0[1] |
| ; SSE-NEXT: movaps 176(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 160(%rdi), %xmm3 |
| ; SSE-NEXT: movaps %xmm3, %xmm1 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps 144(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 128(%rdi), %xmm13 |
| ; SSE-NEXT: movaps %xmm13, %xmm11 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm11, %xmm0 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm1[1] |
| ; SSE-NEXT: movaps 432(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 416(%rdi), %xmm8 |
| ; SSE-NEXT: movaps %xmm8, %xmm1 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps 400(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 384(%rdi), %xmm9 |
| ; SSE-NEXT: movaps %xmm9, %xmm7 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm7, %xmm0 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm1[1] |
| ; SSE-NEXT: movaps 32(%rdi), %xmm5 |
| ; SSE-NEXT: movaps 48(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm5, %xmm1 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps (%rdi), %xmm10 |
| ; SSE-NEXT: movaps 16(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm10, %xmm2 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm2, %xmm0 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm4 = xmm4[2],mem[2],xmm4[3],mem[3] |
| ; SSE-NEXT: movaps %xmm14, %xmm0 |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm14 = xmm14[2],mem[2],xmm14[3],mem[3] |
| ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm14 = xmm14[2],mem[2],xmm14[3],mem[3] |
| ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm12 = xmm12[2],mem[2],xmm12[3],mem[3] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm14 = xmm14[2],mem[2],xmm14[3],mem[3] |
| ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm14 = xmm14[2],mem[2],xmm14[3],mem[3] |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm6 = xmm6[2],mem[2],xmm6[3],mem[3] |
| ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3] |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm13 = xmm13[2],mem[2],xmm13[3],mem[3] |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm8 = xmm8[2],mem[2],xmm8[3],mem[3] |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm9 = xmm9[2],mem[2],xmm9[3],mem[3] |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm5 = xmm5[2],mem[2],xmm5[3],mem[3] |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm10 = xmm10[2],mem[2],xmm10[3],mem[3] |
| ; SSE-NEXT: movaps %xmm0, %xmm6 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm4[0] |
| ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm4[1] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm13, %xmm0 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm3[1] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, %xmm3 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm12[0] |
| ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm12[1] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm3, %xmm12 |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movlhps {{.*#+}} xmm12 = xmm12[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm1, %xmm3 |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm3, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm9, %xmm6 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm8[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm8[1] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm3, %xmm4 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm14[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm14[1] |
| ; SSE-NEXT: movaps %xmm3, %xmm8 |
| ; SSE-NEXT: movaps %xmm10, %xmm14 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm14 = xmm14[0],xmm5[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm5[1] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm3, 16(%rsi) |
| ; SSE-NEXT: movaps %xmm7, 96(%rdx) |
| ; SSE-NEXT: movaps %xmm11, 32(%rdx) |
| ; SSE-NEXT: movaps %xmm15, 112(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%rdx) |
| ; SSE-NEXT: movaps %xmm2, (%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rdx) |
| ; SSE-NEXT: movaps %xmm6, 96(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rcx) |
| ; SSE-NEXT: movaps %xmm4, 112(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rcx) |
| ; SSE-NEXT: movaps %xmm12, 64(%rcx) |
| ; SSE-NEXT: movaps %xmm14, (%rcx) |
| ; SSE-NEXT: movaps %xmm1, 80(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rcx) |
| ; SSE-NEXT: movaps %xmm8, 112(%r8) |
| ; SSE-NEXT: movaps %xmm9, 96(%r8) |
| ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%r8) |
| ; SSE-NEXT: movaps %xmm13, 32(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%r8) |
| ; SSE-NEXT: movaps %xmm10, (%r8) |
| ; SSE-NEXT: addq $440, %rsp # imm = 0x1B8 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i32_stride4_vf32: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: subq $1000, %rsp # imm = 0x3E8 |
| ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %ymm3 |
| ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %ymm4 |
| ; AVX1-ONLY-NEXT: vmovaps 320(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 352(%rdi), %ymm5 |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm5[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm5[0],ymm13[0],ymm5[2],ymm13[2] |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, %ymm6 |
| ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm2[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm1 = ymm5[0],ymm2[0],ymm5[1],ymm2[1],ymm5[4],ymm2[4],ymm5[5],ymm2[5] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 304(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovaps 272(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm7, %xmm9 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm4[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm4[0],ymm0[0],ymm4[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm3[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[4],ymm3[4],ymm1[5],ymm3[5] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 432(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovaps 400(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm2[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, %ymm15 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, %ymm12 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,0],ymm3[4,5],ymm2[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 176(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm3 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps 144(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm8 = xmm7[0],xmm4[0],xmm7[1],xmm4[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm8[0,1],xmm3[2,0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm8 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm10 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm10[0,1],ymm8[2,0],ymm10[4,5],ymm8[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 48(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, (%rsp) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm14 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm14[0,1],xmm0[2,0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm13[0],ymm6[0],ymm13[1],ymm6[1],ymm13[4],ymm6[4],ymm13[5],ymm6[5] |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, %ymm10 |
| ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm5[1,0],ymm10[1,0],ymm5[5,4],ymm10[5,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm1 = xmm9[1],xmm11[1],zero,zero |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm14 = xmm6[0],xmm9[0],xmm6[1],xmm9[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm14[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm12[1,0],ymm15[1,0],ymm12[5,4],ymm15[5,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm1 = xmm7[1],xmm4[1],zero,zero |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm14 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm14 = xmm2[0],mem[0],xmm2[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm14[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm14 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[4],ymm2[4],ymm0[5],ymm2[5] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,0],ymm3[1,0],ymm1[5,4],ymm3[5,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm1[2,0],ymm14[2,3],ymm1[6,4],ymm14[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[0],xmm1[1],zero,zero |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm14 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm14 = xmm4[0],mem[0],xmm4[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm14[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm8 = ymm12[0],ymm7[0],ymm12[1],ymm7[1],ymm12[4],ymm7[4],ymm12[5],ymm7[5] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,0],ymm15[1,0],ymm1[5,4],ymm15[5,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm1[2,0],ymm8[2,3],ymm1[6,4],ymm8[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] |
| ; AVX1-ONLY-NEXT: vmovaps (%rsp), %xmm14 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm14 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm14 = mem[0],xmm14[1],zero,zero |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm14[0,1],xmm1[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm8 = ymm1[1],ymm13[1],ymm1[3],ymm13[3] |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm10[2],ymm5[2],ymm10[3],ymm5[3],ymm10[6],ymm5[6],ymm10[7],ymm5[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm1[0,1],ymm8[2,0],ymm1[4,5],ymm8[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm1[2],xmm11[2],xmm1[3],xmm11[3] |
| ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm14 = zero,zero,xmm6[2],xmm9[2] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm14[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm8 = ymm2[1],ymm0[1],ymm2[3],ymm0[3] |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm1 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm1 = ymm3[2],mem[2],ymm3[3],mem[3],ymm3[6],mem[6],ymm3[7],mem[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm8[2,0],ymm1[4,5],ymm8[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; AVX1-ONLY-NEXT: vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm14 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm14 = zero,zero,xmm4[2],mem[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm14[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm4[1],ymm6[1],ymm4[3],ymm6[3] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm8[2],ymm5[2],ymm8[3],ymm5[3],ymm8[6],ymm5[6],ymm8[7],ymm5[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm11[2],xmm10[2],xmm11[3],xmm10[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm14 = zero,zero,xmm2[2],xmm3[2] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm14[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm7[1],ymm12[1],ymm7[3],ymm12[3] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm15[2],ymm13[2],ymm15[3],ymm13[3],ymm15[6],ymm13[6],ymm15[7],ymm13[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = zero,zero,xmm7[2],mem[0] |
| ; AVX1-ONLY-NEXT: vmovaps (%rsp), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm14 = xmm12[2],xmm15[2],xmm12[3],xmm15[3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm14[0,1],xmm1[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm1[2],xmm9[2],xmm1[3],xmm9[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm9 = xmm9[3,0],mem[3,0] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm9[2,0],xmm1[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm4[2],ymm6[3],ymm4[3],ymm6[6],ymm4[6],ymm6[7],ymm4[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm5[3,0],ymm8[3,0],ymm5[7,4],ymm8[7,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm4[2,0],ymm1[2,3],ymm4[6,4],ymm1[6,7] |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm4 = xmm2[2],xmm3[2],xmm2[3],xmm3[3] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm5 = xmm10[3,0],xmm11[3,0] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm4 = xmm5[2,0],xmm4[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm4 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm5 = ymm2[3,0],mem[3,0],ymm2[7,4],mem[7,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm5[2,0],ymm4[2,3],ymm5[6,4],ymm4[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm5 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm5 = xmm2[2],mem[2],xmm2[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm6 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm6 = xmm2[3,0],mem[3,0] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm5 = xmm6[2,0],xmm5[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm2 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7] |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm3 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm3 = ymm13[3,0],mem[3,0],ymm13[7,4],mem[7,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm3[2,0],ymm2[2,3],ymm3[6,4],ymm2[6,7] |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm3 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm3 = xmm7[2],mem[2],xmm7[3],mem[3] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm5 = xmm15[3,0],xmm12[3,0] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm3 = xmm5[2,0],xmm3[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 32(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 96(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 64(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 96(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 32(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 64(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 32(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 96(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 64(%rcx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm14, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%r8) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%r8) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 64(%r8) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%r8) |
| ; AVX1-ONLY-NEXT: addq $1000, %rsp # imm = 0x3E8 |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i32_stride4_vf32: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: subq $712, %rsp # imm = 0x2C8 |
| ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %ymm7 |
| ; AVX2-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 448(%rdi), %ymm5 |
| ; AVX2-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %ymm6 |
| ; AVX2-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovaps 352(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm0 = [0,4,0,4,0,4,0,4] |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, %ymm10 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm3, %ymm0, %ymm2 |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, %ymm9 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 272(%rdi), %xmm3 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm3 = [0,4,0,4] |
| ; AVX2-ONLY-NEXT: # xmm3 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm3, %ymm4 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm6, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm5, %ymm0, %ymm2 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 400(%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 384(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, (%rsp) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] |
| ; AVX2-ONLY-NEXT: vpermps %ymm7, %ymm3, %ymm4 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3] |
| ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %ymm7 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm8 |
| ; AVX2-ONLY-NEXT: vpermps %ymm8, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm7, %ymm0, %ymm2 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %ymm5 |
| ; AVX2-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 144(%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] |
| ; AVX2-ONLY-NEXT: vpermps %ymm5, %ymm3, %ymm4 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %ymm14 |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm15 |
| ; AVX2-ONLY-NEXT: vpermps %ymm15, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm14, %ymm0, %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm3, %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %xmm3 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 16(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm3 = [1,5,1,5,1,5,1,5] |
| ; AVX2-ONLY-NEXT: vpermps %ymm10, %ymm3, %ymm0 |
| ; AVX2-ONLY-NEXT: vpermps %ymm9, %ymm3, %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps %ymm9, %ymm4 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %ymm5 |
| ; AVX2-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 304(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm6 = [1,5,1,5] |
| ; AVX2-ONLY-NEXT: # xmm6 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps %ymm5, %ymm6, %ymm2 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm15, %ymm3, %ymm0 |
| ; AVX2-ONLY-NEXT: vpermps %ymm14, %ymm3, %ymm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %xmm12 |
| ; AVX2-ONLY-NEXT: vmovaps 48(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm10 = xmm12[0],xmm5[0],xmm12[1],xmm5[1] |
| ; AVX2-ONLY-NEXT: vpermps %ymm0, %ymm6, %ymm11 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm10 = xmm11[0,1],xmm10[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm8, %ymm3, %ymm9 |
| ; AVX2-ONLY-NEXT: vpermps %ymm7, %ymm3, %ymm10 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm9[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 176(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm6, %ymm13 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm13[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm3, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm3, %ymm3 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 384(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm3, %ymm6, %ymm13 |
| ; AVX2-ONLY-NEXT: vmovaps 432(%rdi), %xmm3 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %xmm9 |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm11 = xmm9[0],xmm3[0],xmm9[1],xmm3[1] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm11 = xmm13[0,1],xmm11[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm0 = [2,6,2,6,2,6,2,6] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpermps %ymm6, %ymm0, %ymm11 |
| ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm0, %ymm13 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm13[0,1,2,3,4,5],ymm11[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm13 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm13 = xmm3[2],mem[2],xmm3[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm3 = [2,6,2,6] |
| ; AVX2-ONLY-NEXT: # xmm3 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm10 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm10 = xmm13[0,1],xmm10[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm0, %ymm10 |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm0, %ymm11 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3,4,5],ymm10[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm11 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm11 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm13 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],xmm13[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm11[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm8, %ymm0, %ymm10 |
| ; AVX2-ONLY-NEXT: vpermps %ymm7, %ymm0, %ymm11 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3,4,5],ymm10[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm11 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm11 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm13 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],xmm13[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpermps %ymm15, %ymm0, %ymm10 |
| ; AVX2-ONLY-NEXT: vpermps %ymm14, %ymm0, %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm10[6,7] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm10 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm10 = xmm2[2],mem[2],xmm2[3],mem[3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm10[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm10 = [3,7,3,7,3,7,3,7] |
| ; AVX2-ONLY-NEXT: vpermps %ymm15, %ymm10, %ymm0 |
| ; AVX2-ONLY-NEXT: vpermps %ymm14, %ymm10, %ymm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm12[2],xmm5[2],xmm12[3],xmm5[3] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm2 = [3,7,3,7] |
| ; AVX2-ONLY-NEXT: # xmm2 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm5[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpermps %ymm6, %ymm10, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm10, %ymm5 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm5 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm5 = xmm3[2],mem[2],xmm3[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpermps %ymm8, %ymm10, %ymm5 |
| ; AVX2-ONLY-NEXT: vpermps %ymm7, %ymm10, %ymm6 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm6 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm6 = xmm3[2],mem[2],xmm3[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm7 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm6 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm7 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5],ymm6[6,7] |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm3 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm3 = xmm9[2],mem[2],xmm9[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 32(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 96(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 64(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 96(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 32(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 64(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm11, 32(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 96(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 64(%rcx) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm13, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, 96(%r8) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm5, 32(%r8) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%r8) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%r8) |
| ; AVX2-ONLY-NEXT: addq $712, %rsp # imm = 0x2C8 |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512-LABEL: load_i32_stride4_vf32: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm2 |
| ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm3 |
| ; AVX512-NEXT: vmovdqa64 320(%rdi), %zmm4 |
| ; AVX512-NEXT: vmovdqa64 256(%rdi), %zmm5 |
| ; AVX512-NEXT: vmovdqa64 448(%rdi), %zmm6 |
| ; AVX512-NEXT: vmovdqa64 384(%rdi), %zmm7 |
| ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,4,8,12,16,20,24,28,0,4,8,12,16,20,24,28] |
| ; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512-NEXT: vmovdqa64 %zmm7, %zmm9 |
| ; AVX512-NEXT: vpermt2d %zmm6, %zmm8, %zmm9 |
| ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm10 |
| ; AVX512-NEXT: vpermt2d %zmm4, %zmm8, %zmm10 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm10[0,1,2,3],zmm9[4,5,6,7] |
| ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm10 |
| ; AVX512-NEXT: vpermt2d %zmm3, %zmm8, %zmm10 |
| ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm8 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],zmm10[4,5,6,7] |
| ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [1,5,9,13,17,21,25,29,1,5,9,13,17,21,25,29] |
| ; AVX512-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512-NEXT: vmovdqa64 %zmm7, %zmm11 |
| ; AVX512-NEXT: vpermt2d %zmm6, %zmm10, %zmm11 |
| ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm12 |
| ; AVX512-NEXT: vpermt2d %zmm4, %zmm10, %zmm12 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm11 = zmm12[0,1,2,3],zmm11[4,5,6,7] |
| ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm12 |
| ; AVX512-NEXT: vpermt2d %zmm3, %zmm10, %zmm12 |
| ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm10 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm10 = zmm10[0,1,2,3],zmm12[4,5,6,7] |
| ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [2,6,10,14,18,22,26,30,2,6,10,14,18,22,26,30] |
| ; AVX512-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512-NEXT: vmovdqa64 %zmm7, %zmm13 |
| ; AVX512-NEXT: vpermt2d %zmm6, %zmm12, %zmm13 |
| ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm14 |
| ; AVX512-NEXT: vpermt2d %zmm4, %zmm12, %zmm14 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm13 = zmm14[0,1,2,3],zmm13[4,5,6,7] |
| ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm14 |
| ; AVX512-NEXT: vpermt2d %zmm3, %zmm12, %zmm14 |
| ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm12 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm12 = zmm12[0,1,2,3],zmm14[4,5,6,7] |
| ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [3,7,11,15,19,23,27,31,3,7,11,15,19,23,27,31] |
| ; AVX512-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512-NEXT: vpermt2d %zmm6, %zmm14, %zmm7 |
| ; AVX512-NEXT: vpermt2d %zmm4, %zmm14, %zmm5 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm5[0,1,2,3],zmm7[4,5,6,7] |
| ; AVX512-NEXT: vpermt2d %zmm3, %zmm14, %zmm2 |
| ; AVX512-NEXT: vpermt2d %zmm1, %zmm14, %zmm0 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7] |
| ; AVX512-NEXT: vmovdqa64 %zmm9, 64(%rsi) |
| ; AVX512-NEXT: vmovdqa64 %zmm8, (%rsi) |
| ; AVX512-NEXT: vmovdqa64 %zmm11, 64(%rdx) |
| ; AVX512-NEXT: vmovdqa64 %zmm10, (%rdx) |
| ; AVX512-NEXT: vmovdqa64 %zmm13, 64(%rcx) |
| ; AVX512-NEXT: vmovdqa64 %zmm12, (%rcx) |
| ; AVX512-NEXT: vmovdqa64 %zmm4, 64(%r8) |
| ; AVX512-NEXT: vmovdqa64 %zmm0, (%r8) |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %wide.vec = load <128 x i32>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <128 x i32> %wide.vec, <128 x i32> poison, <32 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60, i32 64, i32 68, i32 72, i32 76, i32 80, i32 84, i32 88, i32 92, i32 96, i32 100, i32 104, i32 108, i32 112, i32 116, i32 120, i32 124> |
| %strided.vec1 = shufflevector <128 x i32> %wide.vec, <128 x i32> poison, <32 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61, i32 65, i32 69, i32 73, i32 77, i32 81, i32 85, i32 89, i32 93, i32 97, i32 101, i32 105, i32 109, i32 113, i32 117, i32 121, i32 125> |
| %strided.vec2 = shufflevector <128 x i32> %wide.vec, <128 x i32> poison, <32 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62, i32 66, i32 70, i32 74, i32 78, i32 82, i32 86, i32 90, i32 94, i32 98, i32 102, i32 106, i32 110, i32 114, i32 118, i32 122, i32 126> |
| %strided.vec3 = shufflevector <128 x i32> %wide.vec, <128 x i32> poison, <32 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63, i32 67, i32 71, i32 75, i32 79, i32 83, i32 87, i32 91, i32 95, i32 99, i32 103, i32 107, i32 111, i32 115, i32 119, i32 123, i32 127> |
| store <32 x i32> %strided.vec0, ptr %out.vec0, align 64 |
| store <32 x i32> %strided.vec1, ptr %out.vec1, align 64 |
| store <32 x i32> %strided.vec2, ptr %out.vec2, align 64 |
| store <32 x i32> %strided.vec3, ptr %out.vec3, align 64 |
| ret void |
| } |
| |
| define void @load_i32_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind { |
| ; SSE-LABEL: load_i32_stride4_vf64: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: subq $1224, %rsp # imm = 0x4C8 |
| ; SSE-NEXT: movaps 144(%rdi), %xmm4 |
| ; SSE-NEXT: movaps 176(%rdi), %xmm5 |
| ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 160(%rdi), %xmm6 |
| ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 208(%rdi), %xmm7 |
| ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 192(%rdi), %xmm8 |
| ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 240(%rdi), %xmm9 |
| ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 224(%rdi), %xmm3 |
| ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 80(%rdi), %xmm10 |
| ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 64(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 112(%rdi), %xmm12 |
| ; SSE-NEXT: movaps 96(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1] |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1] |
| ; SSE-NEXT: movaps %xmm2, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm3, %xmm1 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1] |
| ; SSE-NEXT: movaps %xmm8, %xmm3 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm3, %xmm0 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1] |
| ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm6, %xmm0 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1] |
| ; SSE-NEXT: movaps 128(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, %xmm1 |
| ; SSE-NEXT: movaps %xmm2, %xmm3 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] |
| ; SSE-NEXT: movaps %xmm4, %xmm8 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 368(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 352(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE-NEXT: movaps 336(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 320(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 304(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 288(%rdi), %xmm7 |
| ; SSE-NEXT: movaps %xmm7, %xmm0 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] |
| ; SSE-NEXT: movaps 272(%rdi), %xmm4 |
| ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 256(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, %xmm1 |
| ; SSE-NEXT: movaps %xmm2, %xmm11 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 496(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 480(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE-NEXT: movaps 464(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 448(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 432(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 416(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE-NEXT: movaps 400(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 384(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 624(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 608(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE-NEXT: movaps 592(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 576(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 560(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 544(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE-NEXT: movaps 528(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 512(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 752(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 736(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE-NEXT: movaps 720(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 704(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 688(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 672(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE-NEXT: movaps 656(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 640(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 880(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 864(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE-NEXT: movaps 848(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 832(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 816(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 800(%rdi), %xmm13 |
| ; SSE-NEXT: movaps %xmm13, %xmm0 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE-NEXT: movaps 784(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 768(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1008(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 992(%rdi), %xmm15 |
| ; SSE-NEXT: movaps %xmm15, %xmm0 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE-NEXT: movaps 976(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 960(%rdi), %xmm14 |
| ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm14 = xmm14[0],xmm1[0],xmm14[1],xmm1[1] |
| ; SSE-NEXT: movaps %xmm14, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm0[1] |
| ; SSE-NEXT: movaps 944(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 928(%rdi), %xmm9 |
| ; SSE-NEXT: movaps %xmm9, %xmm2 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] |
| ; SSE-NEXT: movaps 912(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 896(%rdi), %xmm5 |
| ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm5, %xmm0 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm2[1] |
| ; SSE-NEXT: movaps 32(%rdi), %xmm6 |
| ; SSE-NEXT: movaps 48(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm6, %xmm4 |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1] |
| ; SSE-NEXT: movaps (%rdi), %xmm10 |
| ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 16(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpcklps {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm10, %xmm0 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm4[1] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm12[2],xmm0[3],xmm12[3] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm12 = xmm12[2],mem[2],xmm12[3],mem[3] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm4 = xmm4[2],mem[2],xmm4[3],mem[3] |
| ; SSE-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm8[2],xmm3[3],xmm8[3] |
| ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm2 = xmm2[2],mem[2],xmm2[3],mem[3] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm8 = xmm8[2],mem[2],xmm8[3],mem[3] |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm7 = xmm7[2],mem[2],xmm7[3],mem[3] |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm11 = xmm11[2],mem[2],xmm11[3],mem[3] |
| ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3] |
| ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm11 = xmm11[2],mem[2],xmm11[3],mem[3] |
| ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm11 = xmm11[2],mem[2],xmm11[3],mem[3] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm13 = xmm13[2],mem[2],xmm13[3],mem[3] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm9 = xmm9[2],mem[2],xmm9[3],mem[3] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm15 = xmm15[2],mem[2],xmm15[3],mem[3] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm6 = xmm6[2],mem[2],xmm6[3],mem[3] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload |
| ; SSE-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm12, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm12 = xmm12[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, %xmm12 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm12 = xmm12[0],xmm4[0] |
| ; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm8, %xmm1 |
| ; SSE-NEXT: movaps %xmm8, %xmm4 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm2[0] |
| ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm7[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm7[1] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm3, %xmm2 |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1] |
| ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, %xmm2 |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] |
| ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm11, %xmm0 |
| ; SSE-NEXT: movaps %xmm11, %xmm12 |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movlhps {{.*#+}} xmm12 = xmm12[0],xmm1[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, %xmm11 |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movlhps {{.*#+}} xmm11 = xmm11[0],xmm1[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, %xmm7 |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm1[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, %xmm4 |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm1[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, %xmm1 |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, %xmm0 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm13[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm13[1] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm3, %xmm13 |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movlhps {{.*#+}} xmm13 = xmm13[0],xmm2[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, %xmm8 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm9[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm9[1] |
| ; SSE-NEXT: movaps %xmm2, %xmm9 |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, %xmm3 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm15[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm15[1] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm15, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm6[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm15 = xmm15[1],xmm6[1] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 224(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 160(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 96(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 32(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 240(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 176(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 112(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 48(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 192(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 128(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 64(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, (%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 208(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 144(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 80(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 16(%rsi) |
| ; SSE-NEXT: movaps %xmm5, 224(%rdx) |
| ; SSE-NEXT: movaps %xmm14, 240(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm5, 192(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm5, 208(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm5, 160(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm5, 176(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm5, 128(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm5, 144(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm5, 96(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm5, 112(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm5, 64(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm5, 80(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm5, 32(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm5, 48(%rdx) |
| ; SSE-NEXT: movaps %xmm10, (%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm5, 16(%rdx) |
| ; SSE-NEXT: movaps %xmm3, 240(%rcx) |
| ; SSE-NEXT: movaps %xmm8, 224(%rcx) |
| ; SSE-NEXT: movaps %xmm13, 208(%rcx) |
| ; SSE-NEXT: movaps %xmm0, 192(%rcx) |
| ; SSE-NEXT: movaps %xmm1, 176(%rcx) |
| ; SSE-NEXT: movaps %xmm4, 160(%rcx) |
| ; SSE-NEXT: movaps %xmm7, 144(%rcx) |
| ; SSE-NEXT: movaps %xmm11, 128(%rcx) |
| ; SSE-NEXT: movaps %xmm12, 112(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rcx) |
| ; SSE-NEXT: movaps %xmm2, (%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 240(%r8) |
| ; SSE-NEXT: movaps %xmm9, 224(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 208(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 192(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 176(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 160(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 144(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 128(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%r8) |
| ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%r8) |
| ; SSE-NEXT: movaps %xmm15, (%r8) |
| ; SSE-NEXT: addq $1224, %rsp # imm = 0x4C8 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i32_stride4_vf64: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: subq $2200, %rsp # imm = 0x898 |
| ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %ymm3 |
| ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %ymm4 |
| ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm9 |
| ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm1[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm14[0],ymm1[2],ymm14[2] |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, %ymm8 |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm9[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm1 = ymm5[0],ymm9[0],ymm5[1],ymm9[1],ymm5[4],ymm9[4],ymm5[5],ymm9[5] |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, %ymm7 |
| ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps 176(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm11[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, %xmm10 |
| ; AVX1-ONLY-NEXT: vmovaps 144(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm6[0],xmm5[0],xmm6[1],xmm5[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, %xmm15 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm4[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm4[0],ymm0[0],ymm4[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm3[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[4],ymm3[4],ymm1[5],ymm3[5] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 432(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovaps 400(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm4, %xmm6 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 704(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 736(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps 672(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 688(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovaps 656(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 640(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 960(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 992(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps 928(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 944(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovaps 912(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 896(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 320(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 352(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 304(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovaps 272(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 608(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps 544(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 560(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovaps 528(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, (%rsp) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 512(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 832(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 864(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps 800(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 816(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovaps 784(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 768(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm12 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm12[0,1],xmm1[2,0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm1[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm1[0],ymm2[0],ymm1[2],ymm2[2] |
| ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm13 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm13[0,1],ymm12[2,0],ymm13[4,5],ymm12[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 48(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm13 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm0 = xmm13[0,1],xmm0[2,0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, %ymm2 |
| ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm14[0],ymm8[0],ymm14[1],ymm8[1],ymm14[4],ymm8[4],ymm14[5],ymm8[5] |
| ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm9[1,0],ymm7[1,0],ymm9[5,4],ymm7[5,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm15, %xmm7 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm1 = xmm15[1],xmm5[1],zero,zero |
| ; AVX1-ONLY-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps %xmm10, %xmm3 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm13 = xmm10[0],xmm11[0],xmm10[1],xmm11[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm4[0],ymm8[0],ymm4[1],ymm8[1],ymm4[4],ymm8[4],ymm4[5],ymm8[5] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm10[1,0],ymm5[1,0],ymm10[5,4],ymm5[5,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm1 = xmm6[1],xmm15[1],zero,zero |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{.*#+}} xmm13 = xmm6[0],xmm12[0],xmm6[1],xmm12[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm1 = ymm1[1,0],mem[1,0],ymm1[5,4],mem[5,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[0],xmm1[1],zero,zero |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm13 = xmm13[0],mem[0],xmm13[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm1 = ymm1[1,0],mem[1,0],ymm1[5,4],mem[5,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[0],xmm1[1],zero,zero |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm13 = xmm13[0],mem[0],xmm13[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm1 = ymm1[1,0],mem[1,0],ymm1[5,4],mem[5,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[0],xmm1[1],zero,zero |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm13 = xmm13[0],mem[0],xmm13[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm1 = ymm1[1,0],mem[1,0],ymm1[5,4],mem[5,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[0],xmm1[1],zero,zero |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm13 = xmm13[0],mem[0],xmm13[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm1 = ymm1[1,0],mem[1,0],ymm1[5,4],mem[5,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[0],xmm1[1],zero,zero |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm13 = xmm13[0],mem[0],xmm13[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm1 = ymm1[1,0],mem[1,0],ymm1[5,4],mem[5,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm13 = mem[0],xmm13[1],zero,zero |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm13[0,1],xmm1[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm2[1],ymm14[1],ymm2[3],ymm14[3] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm14[2],ymm9[2],ymm14[3],ymm9[3],ymm14[6],ymm9[6],ymm14[7],ymm9[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm7[2],mem[2],xmm7[3],mem[3] |
| ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm13 = zero,zero,xmm3[2],xmm11[2] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm8[1],ymm4[1],ymm8[3],ymm4[3] |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm5[2],ymm10[2],ymm5[3],ymm10[3],ymm5[6],ymm10[6],ymm5[7],ymm10[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm1[2],xmm15[2],xmm1[3],xmm15[3] |
| ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm13 = zero,zero,xmm6[2],xmm12[2] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm4[1],ymm11[1],ymm4[3],ymm11[3] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm5[2],ymm9[2],ymm5[3],ymm9[3],ymm5[6],ymm9[6],ymm5[7],ymm9[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm15[2],xmm12[2],xmm15[3],xmm12[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps {{.*#+}} xmm13 = zero,zero,xmm2[2],xmm3[2] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm6[1],ymm8[1],ymm6[3],ymm8[3] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm7[2],ymm10[2],ymm7[3],ymm10[3],ymm7[6],ymm10[6],ymm7[7],ymm10[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm13 = zero,zero,xmm13[2],mem[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm13 = zero,zero,xmm13[2],mem[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps (%rsp), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm13 = zero,zero,xmm13[2],mem[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm13 = zero,zero,xmm13[2],mem[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = zero,zero,xmm1[2],mem[0] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm13 = xmm13[2],mem[2],xmm13[3],mem[3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm13[0,1],xmm1[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,0],ymm14[3,0],ymm1[7,4],ymm14[7,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm13 = xmm13[3,0],mem[3,0] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm13[2,0],xmm1[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm1 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm13 = xmm13[3,0],mem[3,0] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm13[2,0],xmm1[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm0 = ymm11[2],ymm4[2],ymm11[3],ymm4[3],ymm11[6],ymm4[6],ymm11[7],ymm4[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm9[3,0],ymm5[3,0],ymm9[7,4],ymm5[7,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm13 = xmm12[3,0],xmm15[3,0] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm1 = xmm13[2,0],xmm1[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vunpckhps {{.*#+}} ymm1 = ymm8[2],ymm6[2],ymm8[3],ymm6[3],ymm8[6],ymm6[6],ymm8[7],ymm6[7] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm10[3,0],ymm7[3,0],ymm10[7,4],ymm7[7,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm10[2,0],ymm1[2,3],ymm10[6,4],ymm1[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm10 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm10 = xmm2[2],mem[2],xmm2[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm11 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm11 = xmm2[3,0],mem[3,0] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm10 = xmm11[2,0],xmm10[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm6 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm9 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm9 = ymm2[3,0],mem[3,0],ymm2[7,4],mem[7,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm9[2,0],ymm6[2,3],ymm9[6,4],ymm6[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm9 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm9 = xmm2[2],mem[2],xmm2[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm10 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm10 = xmm2[3,0],mem[3,0] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm9 = xmm10[2,0],xmm9[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm4 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm8 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm8 = ymm2[3,0],mem[3,0],ymm2[7,4],mem[7,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm8[2,0],ymm4[2,3],ymm8[6,4],ymm4[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm8 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm8 = xmm2[2],mem[2],xmm2[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps (%rsp), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm9 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm9 = xmm2[3,0],mem[3,0] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm8 = xmm9[2,0],xmm8[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm8[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm2 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm7 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm7 = ymm3[3,0],mem[3,0],ymm3[7,4],mem[7,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm7[2,0],ymm2[2,3],ymm7[6,4],ymm2[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm7 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm7 = xmm3[2],mem[2],xmm3[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm8 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm8 = xmm3[3,0],mem[3,0] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm7 = xmm8[2,0],xmm7[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm3 = ymm3[2],mem[2],ymm3[3],mem[3],ymm3[6],mem[6],ymm3[7],mem[7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm5 = ymm5[3,0],mem[3,0],ymm5[7,4],mem[7,4] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm5[2,0],ymm3[2,3],ymm5[6,4],ymm3[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm5 = xmm5[2],mem[2],xmm5[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm7 = xmm7[3,0],mem[3,0] |
| ; AVX1-ONLY-NEXT: vshufps {{.*#+}} xmm5 = xmm7[2,0],xmm5[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 192(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 128(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 64(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 224(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 160(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 96(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 32(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 192(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 128(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 64(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 224(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 160(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 96(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 32(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 192(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 128(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 64(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 224(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 160(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 96(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 32(%rcx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm2, 192(%r8) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 128(%r8) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm6, 64(%r8) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%r8) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 224(%r8) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%r8) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm14, 32(%r8) |
| ; AVX1-ONLY-NEXT: addq $2200, %rsp # imm = 0x898 |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i32_stride4_vf64: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: subq $1960, %rsp # imm = 0x7A8 |
| ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %ymm8 |
| ; AVX2-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 448(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %ymm9 |
| ; AVX2-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm6 = [0,4,0,4,0,4,0,4] |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm6, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, %ymm5 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, %ymm10 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 144(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm7 = [0,4,0,4] |
| ; AVX2-ONLY-NEXT: # xmm7 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps %ymm9, %ymm7, %ymm2 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm6, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm3, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 400(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 384(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vpermps %ymm8, %ymm7, %ymm2 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3] |
| ; AVX2-ONLY-NEXT: vmovaps 704(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 736(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm6, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, %ymm9 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 672(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 656(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 640(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vpermps %ymm3, %ymm7, %ymm2 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 960(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovaps 992(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm0, %ymm6, %ymm0 |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, %ymm14 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 928(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 912(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 896(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vpermps %ymm3, %ymm7, %ymm2 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 352(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm6, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, %ymm3 |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %ymm8 |
| ; AVX2-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 272(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vpermps %ymm8, %ymm7, %ymm2 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 576(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 608(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm6, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, %ymm11 |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 544(%rdi), %ymm8 |
| ; AVX2-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 528(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 512(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vpermps %ymm8, %ymm7, %ymm2 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 832(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 864(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm6, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, %ymm13 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 800(%rdi), %ymm8 |
| ; AVX2-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 784(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 768(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vpermps %ymm8, %ymm7, %ymm2 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm6, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, %ymm15 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, %ymm12 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm7, %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 16(%rdi), %xmm6 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm6 = xmm2[0],xmm6[0],xmm2[1],xmm6[1] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm6[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm6 = [1,5,1,5,1,5,1,5] |
| ; AVX2-ONLY-NEXT: vpermps %ymm5, %ymm6, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovaps %ymm10, %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm10, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm5 |
| ; AVX2-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 176(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm7 = [1,5,1,5] |
| ; AVX2-ONLY-NEXT: # xmm7 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps %ymm5, %ymm7, %ymm10 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm10[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm6, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpermps %ymm8, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 384(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 432(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] |
| ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm7, %ymm10 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm10[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm9, %ymm6, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpermps %ymm5, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 640(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 688(%rdi), %xmm9 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 672(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1] |
| ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm7, %ymm10 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm10[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm6, %ymm0 |
| ; AVX2-ONLY-NEXT: vpermps %ymm14, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 896(%rdi), %ymm9 |
| ; AVX2-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 944(%rdi), %xmm10 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm10, (%rsp) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 928(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1] |
| ; AVX2-ONLY-NEXT: vpermps %ymm9, %ymm7, %ymm10 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm10[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm15, %ymm6, %ymm0 |
| ; AVX2-ONLY-NEXT: vpermps %ymm12, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm9 |
| ; AVX2-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %xmm10 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 48(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm10[0],xmm1[0],xmm10[1],xmm1[1] |
| ; AVX2-ONLY-NEXT: vpermps %ymm9, %ymm7, %ymm10 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm10[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm3, %ymm6, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, %ymm15 |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpermps %ymm3, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %ymm9 |
| ; AVX2-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 304(%rdi), %xmm10 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1] |
| ; AVX2-ONLY-NEXT: vpermps %ymm9, %ymm7, %ymm10 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm10[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm11, %ymm6, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovaps %ymm11, %ymm12 |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpermps %ymm11, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 512(%rdi), %ymm9 |
| ; AVX2-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 560(%rdi), %xmm10 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 544(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1] |
| ; AVX2-ONLY-NEXT: vpermps %ymm9, %ymm7, %ymm10 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm10[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm13, %ymm6, %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpermps %ymm13, %ymm6, %ymm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 768(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm1, %ymm7, %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps 816(%rdi), %xmm6 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 800(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklps {{.*#+}} xmm10 = xmm7[0],xmm6[0],xmm7[1],xmm6[1] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm10[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm0 = [2,6,2,6,2,6,2,6] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpermps %ymm9, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm2, %ymm0, %ymm10 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm2 = [2,6,2,6] |
| ; AVX2-ONLY-NEXT: # xmm2 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm7 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm7[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpermps %ymm14, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm8, %ymm0, %ymm7 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm7 = xmm7[2],mem[2],xmm7[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm10 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],xmm10[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpermps %ymm8, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm5, %ymm0, %ymm7 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm7 = xmm7[2],mem[2],xmm7[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm10 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],xmm10[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpermps %ymm5, %ymm0, %ymm7 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm7 = xmm7[2],mem[2],xmm7[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm10 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],xmm10[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm15, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm3, %ymm0, %ymm7 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm7 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm7 = xmm3[2],mem[2],xmm3[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm10 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],xmm10[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm12, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm11, %ymm0, %ymm7 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm7 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm7 = xmm3[2],mem[2],xmm3[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm10 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],xmm10[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpermps %ymm11, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps %ymm13, %ymm0, %ymm7 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm7 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm7 = xmm3[2],mem[2],xmm3[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm10 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],xmm10[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpermps %ymm6, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm0, %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm2 = xmm2[2],mem[2],xmm2[3],mem[3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vbroadcastsd {{.*#+}} ymm0 = [3,7,3,7,3,7,3,7] |
| ; AVX2-ONLY-NEXT: vpermps %ymm9, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm7 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm7 = xmm1[2],mem[2],xmm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovddup {{.*#+}} xmm1 = [3,7,3,7] |
| ; AVX2-ONLY-NEXT: # xmm1 = mem[0,0] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermps %ymm14, %ymm0, %ymm2 |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1,2,3,4,5],ymm2[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm7 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm7 = xmm3[2],mem[2],xmm3[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm7[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpermps %ymm8, %ymm0, %ymm2 |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1,2,3,4,5],ymm2[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm7 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm7 = xmm3[2],mem[2],xmm3[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm9[0,1],xmm7[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm7[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vpermps %ymm5, %ymm0, %ymm5 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3,4,5],ymm2[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps (%rsp), %xmm3, %xmm5 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm5 = xmm3[2],mem[2],xmm3[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm7 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpermps %ymm6, %ymm0, %ymm2 |
| ; AVX2-ONLY-NEXT: vpermps %ymm4, %ymm0, %ymm3 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpermps %ymm15, %ymm0, %ymm3 |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm4 = xmm4[2],mem[2],xmm4[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm7 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm4 = xmm7[0,1],xmm4[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpermps %ymm12, %ymm0, %ymm4 |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2,3,4,5],ymm4[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm7 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm7 = xmm6[2],mem[2],xmm6[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm8 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm8[0,1],xmm7[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm7[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpermps %ymm11, %ymm0, %ymm4 |
| ; AVX2-ONLY-NEXT: vpermps %ymm13, %ymm0, %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm4[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm4 = xmm4[2],mem[2],xmm4[3],mem[3] |
| ; AVX2-ONLY-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 192(%r8) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm8, 128(%r8) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 64(%r8) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, (%r8) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm5, 224(%r8) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm9, 160(%r8) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm10, 96(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 32(%r8) |
| ; AVX2-ONLY-NEXT: addq $1960, %rsp # imm = 0x7A8 |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512-LABEL: load_i32_stride4_vf64: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm2 |
| ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm3 |
| ; AVX512-NEXT: vmovdqa64 832(%rdi), %zmm4 |
| ; AVX512-NEXT: vmovdqa64 768(%rdi), %zmm5 |
| ; AVX512-NEXT: vmovdqa64 960(%rdi), %zmm10 |
| ; AVX512-NEXT: vmovdqa64 896(%rdi), %zmm7 |
| ; AVX512-NEXT: vmovdqa64 320(%rdi), %zmm14 |
| ; AVX512-NEXT: vmovdqa64 256(%rdi), %zmm15 |
| ; AVX512-NEXT: vmovdqa64 448(%rdi), %zmm17 |
| ; AVX512-NEXT: vmovdqa64 384(%rdi), %zmm16 |
| ; AVX512-NEXT: vmovdqa64 576(%rdi), %zmm8 |
| ; AVX512-NEXT: vmovdqa64 512(%rdi), %zmm9 |
| ; AVX512-NEXT: vmovdqa64 704(%rdi), %zmm12 |
| ; AVX512-NEXT: vmovdqa64 640(%rdi), %zmm11 |
| ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm19 = [0,4,8,12,16,20,24,28,0,4,8,12,16,20,24,28] |
| ; AVX512-NEXT: # zmm19 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512-NEXT: vmovdqa64 %zmm11, %zmm6 |
| ; AVX512-NEXT: vpermt2d %zmm12, %zmm19, %zmm6 |
| ; AVX512-NEXT: vmovdqa64 %zmm9, %zmm13 |
| ; AVX512-NEXT: vpermt2d %zmm8, %zmm19, %zmm13 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm13[0,1,2,3],zmm6[4,5,6,7] |
| ; AVX512-NEXT: vmovdqa64 %zmm16, %zmm13 |
| ; AVX512-NEXT: vpermt2d %zmm17, %zmm19, %zmm13 |
| ; AVX512-NEXT: vmovdqa64 %zmm15, %zmm18 |
| ; AVX512-NEXT: vpermt2d %zmm14, %zmm19, %zmm18 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm13 = zmm18[0,1,2,3],zmm13[4,5,6,7] |
| ; AVX512-NEXT: vmovdqa64 %zmm7, %zmm18 |
| ; AVX512-NEXT: vpermt2d %zmm10, %zmm19, %zmm18 |
| ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm20 |
| ; AVX512-NEXT: vpermt2d %zmm4, %zmm19, %zmm20 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm18 = zmm20[0,1,2,3],zmm18[4,5,6,7] |
| ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm20 |
| ; AVX512-NEXT: vpermt2d %zmm3, %zmm19, %zmm20 |
| ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm19 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm19 = zmm19[0,1,2,3],zmm20[4,5,6,7] |
| ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [1,5,9,13,17,21,25,29,1,5,9,13,17,21,25,29] |
| ; AVX512-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512-NEXT: vmovdqa64 %zmm11, %zmm20 |
| ; AVX512-NEXT: vpermt2d %zmm12, %zmm21, %zmm20 |
| ; AVX512-NEXT: vmovdqa64 %zmm9, %zmm22 |
| ; AVX512-NEXT: vpermt2d %zmm8, %zmm21, %zmm22 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm20 = zmm22[0,1,2,3],zmm20[4,5,6,7] |
| ; AVX512-NEXT: vmovdqa64 %zmm16, %zmm22 |
| ; AVX512-NEXT: vpermt2d %zmm17, %zmm21, %zmm22 |
| ; AVX512-NEXT: vmovdqa64 %zmm15, %zmm23 |
| ; AVX512-NEXT: vpermt2d %zmm14, %zmm21, %zmm23 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm23[0,1,2,3],zmm22[4,5,6,7] |
| ; AVX512-NEXT: vmovdqa64 %zmm7, %zmm23 |
| ; AVX512-NEXT: vpermt2d %zmm10, %zmm21, %zmm23 |
| ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm24 |
| ; AVX512-NEXT: vpermt2d %zmm4, %zmm21, %zmm24 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm24[0,1,2,3],zmm23[4,5,6,7] |
| ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm24 |
| ; AVX512-NEXT: vpermt2d %zmm3, %zmm21, %zmm24 |
| ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm21 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm21[0,1,2,3],zmm24[4,5,6,7] |
| ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm24 = [2,6,10,14,18,22,26,30,2,6,10,14,18,22,26,30] |
| ; AVX512-NEXT: # zmm24 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512-NEXT: vmovdqa64 %zmm11, %zmm25 |
| ; AVX512-NEXT: vpermt2d %zmm12, %zmm24, %zmm25 |
| ; AVX512-NEXT: vmovdqa64 %zmm9, %zmm26 |
| ; AVX512-NEXT: vpermt2d %zmm8, %zmm24, %zmm26 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm25 = zmm26[0,1,2,3],zmm25[4,5,6,7] |
| ; AVX512-NEXT: vmovdqa64 %zmm16, %zmm26 |
| ; AVX512-NEXT: vpermt2d %zmm17, %zmm24, %zmm26 |
| ; AVX512-NEXT: vmovdqa64 %zmm15, %zmm27 |
| ; AVX512-NEXT: vpermt2d %zmm14, %zmm24, %zmm27 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm26 = zmm27[0,1,2,3],zmm26[4,5,6,7] |
| ; AVX512-NEXT: vmovdqa64 %zmm7, %zmm27 |
| ; AVX512-NEXT: vpermt2d %zmm10, %zmm24, %zmm27 |
| ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm28 |
| ; AVX512-NEXT: vpermt2d %zmm4, %zmm24, %zmm28 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm27 = zmm28[0,1,2,3],zmm27[4,5,6,7] |
| ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm28 |
| ; AVX512-NEXT: vpermt2d %zmm3, %zmm24, %zmm28 |
| ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm24 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm24 = zmm24[0,1,2,3],zmm28[4,5,6,7] |
| ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm28 = [3,7,11,15,19,23,27,31,3,7,11,15,19,23,27,31] |
| ; AVX512-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512-NEXT: vpermt2d %zmm17, %zmm28, %zmm16 |
| ; AVX512-NEXT: vpermt2d %zmm14, %zmm28, %zmm15 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm14 = zmm15[0,1,2,3],zmm16[4,5,6,7] |
| ; AVX512-NEXT: vpermt2d %zmm10, %zmm28, %zmm7 |
| ; AVX512-NEXT: vpermt2d %zmm4, %zmm28, %zmm5 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm5[0,1,2,3],zmm7[4,5,6,7] |
| ; AVX512-NEXT: vpermt2d %zmm12, %zmm28, %zmm11 |
| ; AVX512-NEXT: vpermt2d %zmm8, %zmm28, %zmm9 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm9[0,1,2,3],zmm11[4,5,6,7] |
| ; AVX512-NEXT: vpermt2d %zmm3, %zmm28, %zmm2 |
| ; AVX512-NEXT: vpermt2d %zmm1, %zmm28, %zmm0 |
| ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7] |
| ; AVX512-NEXT: vmovdqa64 %zmm18, 192(%rsi) |
| ; AVX512-NEXT: vmovdqa64 %zmm19, (%rsi) |
| ; AVX512-NEXT: vmovdqa64 %zmm13, 64(%rsi) |
| ; AVX512-NEXT: vmovdqa64 %zmm6, 128(%rsi) |
| ; AVX512-NEXT: vmovdqa64 %zmm23, 192(%rdx) |
| ; AVX512-NEXT: vmovdqa64 %zmm21, (%rdx) |
| ; AVX512-NEXT: vmovdqa64 %zmm22, 64(%rdx) |
| ; AVX512-NEXT: vmovdqa64 %zmm20, 128(%rdx) |
| ; AVX512-NEXT: vmovdqa64 %zmm27, 192(%rcx) |
| ; AVX512-NEXT: vmovdqa64 %zmm24, (%rcx) |
| ; AVX512-NEXT: vmovdqa64 %zmm26, 64(%rcx) |
| ; AVX512-NEXT: vmovdqa64 %zmm25, 128(%rcx) |
| ; AVX512-NEXT: vmovdqa64 %zmm5, 128(%r8) |
| ; AVX512-NEXT: vmovdqa64 %zmm4, 192(%r8) |
| ; AVX512-NEXT: vmovdqa64 %zmm0, (%r8) |
| ; AVX512-NEXT: vmovdqa64 %zmm14, 64(%r8) |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %wide.vec = load <256 x i32>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <256 x i32> %wide.vec, <256 x i32> poison, <64 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60, i32 64, i32 68, i32 72, i32 76, i32 80, i32 84, i32 88, i32 92, i32 96, i32 100, i32 104, i32 108, i32 112, i32 116, i32 120, i32 124, i32 128, i32 132, i32 136, i32 140, i32 144, i32 148, i32 152, i32 156, i32 160, i32 164, i32 168, i32 172, i32 176, i32 180, i32 184, i32 188, i32 192, i32 196, i32 200, i32 204, i32 208, i32 212, i32 216, i32 220, i32 224, i32 228, i32 232, i32 236, i32 240, i32 244, i32 248, i32 252> |
| %strided.vec1 = shufflevector <256 x i32> %wide.vec, <256 x i32> poison, <64 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61, i32 65, i32 69, i32 73, i32 77, i32 81, i32 85, i32 89, i32 93, i32 97, i32 101, i32 105, i32 109, i32 113, i32 117, i32 121, i32 125, i32 129, i32 133, i32 137, i32 141, i32 145, i32 149, i32 153, i32 157, i32 161, i32 165, i32 169, i32 173, i32 177, i32 181, i32 185, i32 189, i32 193, i32 197, i32 201, i32 205, i32 209, i32 213, i32 217, i32 221, i32 225, i32 229, i32 233, i32 237, i32 241, i32 245, i32 249, i32 253> |
| %strided.vec2 = shufflevector <256 x i32> %wide.vec, <256 x i32> poison, <64 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62, i32 66, i32 70, i32 74, i32 78, i32 82, i32 86, i32 90, i32 94, i32 98, i32 102, i32 106, i32 110, i32 114, i32 118, i32 122, i32 126, i32 130, i32 134, i32 138, i32 142, i32 146, i32 150, i32 154, i32 158, i32 162, i32 166, i32 170, i32 174, i32 178, i32 182, i32 186, i32 190, i32 194, i32 198, i32 202, i32 206, i32 210, i32 214, i32 218, i32 222, i32 226, i32 230, i32 234, i32 238, i32 242, i32 246, i32 250, i32 254> |
| %strided.vec3 = shufflevector <256 x i32> %wide.vec, <256 x i32> poison, <64 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63, i32 67, i32 71, i32 75, i32 79, i32 83, i32 87, i32 91, i32 95, i32 99, i32 103, i32 107, i32 111, i32 115, i32 119, i32 123, i32 127, i32 131, i32 135, i32 139, i32 143, i32 147, i32 151, i32 155, i32 159, i32 163, i32 167, i32 171, i32 175, i32 179, i32 183, i32 187, i32 191, i32 195, i32 199, i32 203, i32 207, i32 211, i32 215, i32 219, i32 223, i32 227, i32 231, i32 235, i32 239, i32 243, i32 247, i32 251, i32 255> |
| store <64 x i32> %strided.vec0, ptr %out.vec0, align 64 |
| store <64 x i32> %strided.vec1, ptr %out.vec1, align 64 |
| store <64 x i32> %strided.vec2, ptr %out.vec2, align 64 |
| store <64 x i32> %strided.vec3, ptr %out.vec3, align 64 |
| ret void |
| } |
| ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
| ; AVX: {{.*}} |
| ; AVX1: {{.*}} |
| ; AVX2: {{.*}} |
| ; AVX2-FAST: {{.*}} |
| ; AVX2-FAST-PERLANE: {{.*}} |
| ; AVX2-SLOW: {{.*}} |
| ; AVX512BW: {{.*}} |
| ; AVX512BW-ONLY-FAST: {{.*}} |
| ; AVX512BW-ONLY-SLOW: {{.*}} |
| ; AVX512DQ-FAST: {{.*}} |
| ; AVX512DQ-SLOW: {{.*}} |
| ; AVX512DQBW-FAST: {{.*}} |
| ; AVX512DQBW-SLOW: {{.*}} |
| ; AVX512F: {{.*}} |
| ; AVX512F-ONLY-FAST: {{.*}} |
| ; AVX512F-ONLY-SLOW: {{.*}} |
| ; FALLBACK0: {{.*}} |
| ; FALLBACK1: {{.*}} |
| ; FALLBACK10: {{.*}} |
| ; FALLBACK11: {{.*}} |
| ; FALLBACK12: {{.*}} |
| ; FALLBACK2: {{.*}} |
| ; FALLBACK3: {{.*}} |
| ; FALLBACK4: {{.*}} |
| ; FALLBACK5: {{.*}} |
| ; FALLBACK6: {{.*}} |
| ; FALLBACK7: {{.*}} |
| ; FALLBACK8: {{.*}} |
| ; FALLBACK9: {{.*}} |