| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12 |
| |
| ; These patterns are produced by LoopVectorizer for interleaved loads. |
| |
| define void @load_i64_stride5_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind { |
| ; SSE-LABEL: load_i64_stride5_vf2: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movapd 64(%rdi), %xmm0 |
| ; SSE-NEXT: movapd (%rdi), %xmm1 |
| ; SSE-NEXT: movapd 16(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 32(%rdi), %xmm3 |
| ; SSE-NEXT: movapd 48(%rdi), %xmm4 |
| ; SSE-NEXT: movapd %xmm3, %xmm5 |
| ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm1[0],xmm5[1] |
| ; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm4[0] |
| ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm2[0],xmm4[1] |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm0[0] |
| ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] |
| ; SSE-NEXT: movapd %xmm5, (%rsi) |
| ; SSE-NEXT: movapd %xmm1, (%rdx) |
| ; SSE-NEXT: movapd %xmm4, (%rcx) |
| ; SSE-NEXT: movapd %xmm2, (%r8) |
| ; SSE-NEXT: movapd %xmm0, (%r9) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i64_stride5_vf2: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0,1,2,3],xmm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm1[0,1,2,3],xmm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm4, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, (%r8) |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, (%r9) |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i64_stride5_vf2: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovdqa 48(%rdi), %xmm3 |
| ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm5 = xmm1[0,1],xmm2[2,3] |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm3 = mem[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3] |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm5, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm1, (%rdx) |
| ; AVX2-ONLY-NEXT: vextractf128 $1, %ymm0, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm3, (%r8) |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm2, (%r9) |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512-LABEL: load_i64_stride5_vf2: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512-NEXT: vmovdqa 32(%rdi), %xmm1 |
| ; AVX512-NEXT: vmovdqa 48(%rdi), %xmm2 |
| ; AVX512-NEXT: vmovdqa 64(%rdi), %xmm3 |
| ; AVX512-NEXT: vpblendd {{.*#+}} xmm4 = xmm0[0,1],xmm1[2,3] |
| ; AVX512-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7] |
| ; AVX512-NEXT: vmovaps (%rdi), %ymm2 |
| ; AVX512-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],mem[2,3],ymm2[4,5],mem[6,7] |
| ; AVX512-NEXT: vpalignr {{.*#+}} xmm5 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7] |
| ; AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3] |
| ; AVX512-NEXT: vmovdqa %xmm4, (%rsi) |
| ; AVX512-NEXT: vmovdqa %xmm0, (%rdx) |
| ; AVX512-NEXT: vextractf128 $1, %ymm2, (%rcx) |
| ; AVX512-NEXT: vmovdqa %xmm5, (%r8) |
| ; AVX512-NEXT: vmovdqa %xmm1, (%r9) |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %wide.vec = load <10 x i64>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <10 x i64> %wide.vec, <10 x i64> poison, <2 x i32> <i32 0, i32 5> |
| %strided.vec1 = shufflevector <10 x i64> %wide.vec, <10 x i64> poison, <2 x i32> <i32 1, i32 6> |
| %strided.vec2 = shufflevector <10 x i64> %wide.vec, <10 x i64> poison, <2 x i32> <i32 2, i32 7> |
| %strided.vec3 = shufflevector <10 x i64> %wide.vec, <10 x i64> poison, <2 x i32> <i32 3, i32 8> |
| %strided.vec4 = shufflevector <10 x i64> %wide.vec, <10 x i64> poison, <2 x i32> <i32 4, i32 9> |
| store <2 x i64> %strided.vec0, ptr %out.vec0, align 64 |
| store <2 x i64> %strided.vec1, ptr %out.vec1, align 64 |
| store <2 x i64> %strided.vec2, ptr %out.vec2, align 64 |
| store <2 x i64> %strided.vec3, ptr %out.vec3, align 64 |
| store <2 x i64> %strided.vec4, ptr %out.vec4, align 64 |
| ret void |
| } |
| |
| define void @load_i64_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind { |
| ; SSE-LABEL: load_i64_stride5_vf4: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movapd 144(%rdi), %xmm1 |
| ; SSE-NEXT: movapd 64(%rdi), %xmm0 |
| ; SSE-NEXT: movapd 96(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 128(%rdi), %xmm3 |
| ; SSE-NEXT: movapd (%rdi), %xmm4 |
| ; SSE-NEXT: movapd 16(%rdi), %xmm5 |
| ; SSE-NEXT: movapd 32(%rdi), %xmm6 |
| ; SSE-NEXT: movapd 48(%rdi), %xmm7 |
| ; SSE-NEXT: movapd 80(%rdi), %xmm8 |
| ; SSE-NEXT: movapd 112(%rdi), %xmm9 |
| ; SSE-NEXT: movapd %xmm9, %xmm10 |
| ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm8[0],xmm10[1] |
| ; SSE-NEXT: movapd %xmm6, %xmm11 |
| ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm4[0],xmm11[1] |
| ; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm3[0] |
| ; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm7[0] |
| ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm2[0],xmm3[1] |
| ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm5[0],xmm7[1] |
| ; SSE-NEXT: shufpd {{.*#+}} xmm5 = xmm5[1],xmm0[0] |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm6[0],xmm0[1] |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm9[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm10, 16(%rsi) |
| ; SSE-NEXT: movapd %xmm11, (%rsi) |
| ; SSE-NEXT: movapd %xmm8, 16(%rdx) |
| ; SSE-NEXT: movapd %xmm4, (%rdx) |
| ; SSE-NEXT: movapd %xmm3, 16(%rcx) |
| ; SSE-NEXT: movapd %xmm7, (%rcx) |
| ; SSE-NEXT: movapd %xmm2, 16(%r8) |
| ; SSE-NEXT: movapd %xmm5, (%r8) |
| ; SSE-NEXT: movapd %xmm1, 16(%r9) |
| ; SSE-NEXT: movapd %xmm0, (%r9) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i64_stride5_vf4: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovapd 96(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovapd 64(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm2[0,1,2],ymm1[3] |
| ; AVX1-ONLY-NEXT: vmovapd (%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovapd 32(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm8 = xmm4[0],xmm6[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm8[0,1],ymm3[2,3] |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm9 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm2 = ymm2[0],ymm9[0],ymm2[3],ymm9[2] |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm4 = xmm4[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3] |
| ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm8 = xmm4[0,1],xmm8[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm4 = ymm4[0],ymm0[0],ymm4[3],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm1 = xmm6[0],xmm8[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovapd %ymm3, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm2, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm7, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm4, (%r8) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm0, (%r9) |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i64_stride5_vf4: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm3[0,1,2,3,4,5],ymm2[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm6 |
| ; AVX2-ONLY-NEXT: vmovdqa 48(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %xmm8 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm9 = xmm5[0,1],xmm6[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm9[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm3 = ymm3[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm3[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm5[2,3] |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm7 = ymm2[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm2 = xmm6[0,1],xmm8[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm4, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm3, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm5, (%r8) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm1, (%r9) |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512F-SLOW-LABEL: load_i64_stride5_vf4: |
| ; AVX512F-SLOW: # %bb.0: |
| ; AVX512F-SLOW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512F-SLOW-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,5,10,15] |
| ; AVX512F-SLOW-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 |
| ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = <1,6,11,u> |
| ; AVX512F-SLOW-NEXT: vpermi2q %zmm1, %zmm0, %zmm3 |
| ; AVX512F-SLOW-NEXT: vmovdqa 128(%rdi), %xmm4 |
| ; AVX512F-SLOW-NEXT: vpbroadcastq %xmm4, %ymm5 |
| ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm5[6,7] |
| ; AVX512F-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 |
| ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = <2,7,12,u> |
| ; AVX512F-SLOW-NEXT: vpermi2q %zmm1, %zmm0, %zmm5 |
| ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7] |
| ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = <11,0,5,u> |
| ; AVX512F-SLOW-NEXT: vpermi2q %zmm0, %zmm1, %zmm5 |
| ; AVX512F-SLOW-NEXT: vpbroadcastq 144(%rdi), %ymm6 |
| ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm6[6,7] |
| ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm6 = <12,1,6,u> |
| ; AVX512F-SLOW-NEXT: vpermi2q %zmm0, %zmm1, %zmm6 |
| ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],mem[6,7] |
| ; AVX512F-SLOW-NEXT: vmovdqa %ymm2, (%rsi) |
| ; AVX512F-SLOW-NEXT: vmovdqa %ymm3, (%rdx) |
| ; AVX512F-SLOW-NEXT: vmovdqa %ymm4, (%rcx) |
| ; AVX512F-SLOW-NEXT: vmovdqa %ymm5, (%r8) |
| ; AVX512F-SLOW-NEXT: vmovdqa %ymm0, (%r9) |
| ; AVX512F-SLOW-NEXT: vzeroupper |
| ; AVX512F-SLOW-NEXT: retq |
| ; |
| ; AVX512F-FAST-LABEL: load_i64_stride5_vf4: |
| ; AVX512F-FAST: # %bb.0: |
| ; AVX512F-FAST-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512F-FAST-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,5,10,15] |
| ; AVX512F-FAST-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 |
| ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = <1,6,11,u> |
| ; AVX512F-FAST-NEXT: vpermi2q %zmm1, %zmm0, %zmm3 |
| ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,2,4] |
| ; AVX512F-FAST-NEXT: vmovdqa 128(%rdi), %ymm5 |
| ; AVX512F-FAST-NEXT: vpermi2q %ymm5, %ymm3, %ymm4 |
| ; AVX512F-FAST-NEXT: vinserti128 $1, 128(%rdi), %ymm0, %ymm3 |
| ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = <2,7,12,u> |
| ; AVX512F-FAST-NEXT: vpermi2q %zmm1, %zmm0, %zmm6 |
| ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5],ymm3[6,7] |
| ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = <11,0,5,u> |
| ; AVX512F-FAST-NEXT: vpermi2q %zmm0, %zmm1, %zmm6 |
| ; AVX512F-FAST-NEXT: vpbroadcastq 144(%rdi), %ymm7 |
| ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7] |
| ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = <12,1,6,u> |
| ; AVX512F-FAST-NEXT: vpermi2q %zmm0, %zmm1, %zmm7 |
| ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3,4,5],ymm5[6,7] |
| ; AVX512F-FAST-NEXT: vmovdqa %ymm2, (%rsi) |
| ; AVX512F-FAST-NEXT: vmovdqa %ymm4, (%rdx) |
| ; AVX512F-FAST-NEXT: vmovdqa %ymm3, (%rcx) |
| ; AVX512F-FAST-NEXT: vmovdqa %ymm6, (%r8) |
| ; AVX512F-FAST-NEXT: vmovdqa %ymm0, (%r9) |
| ; AVX512F-FAST-NEXT: vzeroupper |
| ; AVX512F-FAST-NEXT: retq |
| ; |
| ; AVX512BW-SLOW-LABEL: load_i64_stride5_vf4: |
| ; AVX512BW-SLOW: # %bb.0: |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-SLOW-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,5,10,15] |
| ; AVX512BW-SLOW-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = <1,6,11,u> |
| ; AVX512BW-SLOW-NEXT: vpermi2q %zmm1, %zmm0, %zmm3 |
| ; AVX512BW-SLOW-NEXT: vmovdqa 128(%rdi), %xmm4 |
| ; AVX512BW-SLOW-NEXT: vpbroadcastq %xmm4, %ymm5 |
| ; AVX512BW-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm5[6,7] |
| ; AVX512BW-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 |
| ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = <2,7,12,u> |
| ; AVX512BW-SLOW-NEXT: vpermi2q %zmm1, %zmm0, %zmm5 |
| ; AVX512BW-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7] |
| ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = <11,0,5,u> |
| ; AVX512BW-SLOW-NEXT: vpermi2q %zmm0, %zmm1, %zmm5 |
| ; AVX512BW-SLOW-NEXT: vpbroadcastq 144(%rdi), %ymm6 |
| ; AVX512BW-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm6[6,7] |
| ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm6 = <12,1,6,u> |
| ; AVX512BW-SLOW-NEXT: vpermi2q %zmm0, %zmm1, %zmm6 |
| ; AVX512BW-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],mem[6,7] |
| ; AVX512BW-SLOW-NEXT: vmovdqa %ymm2, (%rsi) |
| ; AVX512BW-SLOW-NEXT: vmovdqa %ymm3, (%rdx) |
| ; AVX512BW-SLOW-NEXT: vmovdqa %ymm4, (%rcx) |
| ; AVX512BW-SLOW-NEXT: vmovdqa %ymm5, (%r8) |
| ; AVX512BW-SLOW-NEXT: vmovdqa %ymm0, (%r9) |
| ; AVX512BW-SLOW-NEXT: vzeroupper |
| ; AVX512BW-SLOW-NEXT: retq |
| ; |
| ; AVX512BW-FAST-LABEL: load_i64_stride5_vf4: |
| ; AVX512BW-FAST: # %bb.0: |
| ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-FAST-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,5,10,15] |
| ; AVX512BW-FAST-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = <1,6,11,u> |
| ; AVX512BW-FAST-NEXT: vpermi2q %zmm1, %zmm0, %zmm3 |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,2,4] |
| ; AVX512BW-FAST-NEXT: vmovdqa 128(%rdi), %ymm5 |
| ; AVX512BW-FAST-NEXT: vpermi2q %ymm5, %ymm3, %ymm4 |
| ; AVX512BW-FAST-NEXT: vinserti128 $1, 128(%rdi), %ymm0, %ymm3 |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = <2,7,12,u> |
| ; AVX512BW-FAST-NEXT: vpermi2q %zmm1, %zmm0, %zmm6 |
| ; AVX512BW-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5],ymm3[6,7] |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = <11,0,5,u> |
| ; AVX512BW-FAST-NEXT: vpermi2q %zmm0, %zmm1, %zmm6 |
| ; AVX512BW-FAST-NEXT: vpbroadcastq 144(%rdi), %ymm7 |
| ; AVX512BW-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7] |
| ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = <12,1,6,u> |
| ; AVX512BW-FAST-NEXT: vpermi2q %zmm0, %zmm1, %zmm7 |
| ; AVX512BW-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3,4,5],ymm5[6,7] |
| ; AVX512BW-FAST-NEXT: vmovdqa %ymm2, (%rsi) |
| ; AVX512BW-FAST-NEXT: vmovdqa %ymm4, (%rdx) |
| ; AVX512BW-FAST-NEXT: vmovdqa %ymm3, (%rcx) |
| ; AVX512BW-FAST-NEXT: vmovdqa %ymm6, (%r8) |
| ; AVX512BW-FAST-NEXT: vmovdqa %ymm0, (%r9) |
| ; AVX512BW-FAST-NEXT: vzeroupper |
| ; AVX512BW-FAST-NEXT: retq |
| %wide.vec = load <20 x i64>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <20 x i64> %wide.vec, <20 x i64> poison, <4 x i32> <i32 0, i32 5, i32 10, i32 15> |
| %strided.vec1 = shufflevector <20 x i64> %wide.vec, <20 x i64> poison, <4 x i32> <i32 1, i32 6, i32 11, i32 16> |
| %strided.vec2 = shufflevector <20 x i64> %wide.vec, <20 x i64> poison, <4 x i32> <i32 2, i32 7, i32 12, i32 17> |
| %strided.vec3 = shufflevector <20 x i64> %wide.vec, <20 x i64> poison, <4 x i32> <i32 3, i32 8, i32 13, i32 18> |
| %strided.vec4 = shufflevector <20 x i64> %wide.vec, <20 x i64> poison, <4 x i32> <i32 4, i32 9, i32 14, i32 19> |
| store <4 x i64> %strided.vec0, ptr %out.vec0, align 64 |
| store <4 x i64> %strided.vec1, ptr %out.vec1, align 64 |
| store <4 x i64> %strided.vec2, ptr %out.vec2, align 64 |
| store <4 x i64> %strided.vec3, ptr %out.vec3, align 64 |
| store <4 x i64> %strided.vec4, ptr %out.vec4, align 64 |
| ret void |
| } |
| |
| define void @load_i64_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind { |
| ; SSE-LABEL: load_i64_stride5_vf8: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movapd 224(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 304(%rdi), %xmm1 |
| ; SSE-NEXT: movapd 64(%rdi), %xmm0 |
| ; SSE-NEXT: movapd 176(%rdi), %xmm4 |
| ; SSE-NEXT: movapd 256(%rdi), %xmm3 |
| ; SSE-NEXT: movapd 208(%rdi), %xmm6 |
| ; SSE-NEXT: movapd 288(%rdi), %xmm7 |
| ; SSE-NEXT: movapd (%rdi), %xmm8 |
| ; SSE-NEXT: movapd 16(%rdi), %xmm5 |
| ; SSE-NEXT: movapd 32(%rdi), %xmm14 |
| ; SSE-NEXT: movapd 48(%rdi), %xmm9 |
| ; SSE-NEXT: movapd 160(%rdi), %xmm10 |
| ; SSE-NEXT: movapd 192(%rdi), %xmm12 |
| ; SSE-NEXT: movapd 240(%rdi), %xmm11 |
| ; SSE-NEXT: movapd 272(%rdi), %xmm15 |
| ; SSE-NEXT: movapd %xmm14, %xmm13 |
| ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm8[0],xmm13[1] |
| ; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm9[0] |
| ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm5[0],xmm9[1] |
| ; SSE-NEXT: shufpd {{.*#+}} xmm5 = xmm5[1],xmm0[0] |
| ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm14[0],xmm0[1] |
| ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd %xmm15, %xmm14 |
| ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm11[0],xmm14[1] |
| ; SSE-NEXT: shufpd {{.*#+}} xmm11 = xmm11[1],xmm7[0] |
| ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm3[0],xmm7[1] |
| ; SSE-NEXT: shufpd {{.*#+}} xmm3 = xmm3[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm15[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd %xmm12, %xmm15 |
| ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm10[0],xmm15[1] |
| ; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm6[0] |
| ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm4[0],xmm6[1] |
| ; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm2[0] |
| ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm12[0],xmm2[1] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 80(%rdi), %xmm12 |
| ; SSE-NEXT: movapd 112(%rdi), %xmm4 |
| ; SSE-NEXT: movapd %xmm4, %xmm3 |
| ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm12[0],xmm3[1] |
| ; SSE-NEXT: movapd 128(%rdi), %xmm0 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm12 = xmm12[1],xmm0[0] |
| ; SSE-NEXT: movapd 96(%rdi), %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] |
| ; SSE-NEXT: movapd 144(%rdi), %xmm2 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm2[0] |
| ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm4[0],xmm2[1] |
| ; SSE-NEXT: movapd %xmm3, 16(%rsi) |
| ; SSE-NEXT: movapd %xmm15, 32(%rsi) |
| ; SSE-NEXT: movapd %xmm14, 48(%rsi) |
| ; SSE-NEXT: movapd %xmm13, (%rsi) |
| ; SSE-NEXT: movapd %xmm12, 16(%rdx) |
| ; SSE-NEXT: movapd %xmm10, 32(%rdx) |
| ; SSE-NEXT: movapd %xmm11, 48(%rdx) |
| ; SSE-NEXT: movapd %xmm8, (%rdx) |
| ; SSE-NEXT: movapd %xmm0, 16(%rcx) |
| ; SSE-NEXT: movapd %xmm6, 32(%rcx) |
| ; SSE-NEXT: movapd %xmm7, 48(%rcx) |
| ; SSE-NEXT: movapd %xmm9, (%rcx) |
| ; SSE-NEXT: movapd %xmm1, 16(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%r8) |
| ; SSE-NEXT: movapd %xmm5, (%r8) |
| ; SSE-NEXT: movapd %xmm2, 16(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%r9) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i64_stride5_vf8: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovapd 256(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovapd 224(%rdi), %ymm9 |
| ; AVX1-ONLY-NEXT: vmovapd 96(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vmovapd 64(%rdi), %ymm7 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm7[0,1,2],ymm2[3] |
| ; AVX1-ONLY-NEXT: vmovapd (%rdi), %xmm10 |
| ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vmovapd 32(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm5 = xmm10[0],xmm4[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm9[0,1,2],ymm0[3] |
| ; AVX1-ONLY-NEXT: vmovapd %ymm0, %ymm3 |
| ; AVX1-ONLY-NEXT: vmovapd 192(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovapd 160(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm13 = xmm12[0],xmm5[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm13[0,1],ymm6[2,3] |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm13 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm14 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm7 = ymm7[0],ymm14[0],ymm7[3],ymm14[2] |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm10 = xmm10[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm10[0,1],ymm7[2,3] |
| ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %xmm14 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm10 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm9 = ymm9[0],ymm10[0],ymm9[3],ymm10[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 208(%rdi), %xmm15 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm10 = xmm12[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3] |
| ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm10 = xmm12[0,1],xmm13[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm11 = xmm8[0,1,2,3],xmm11[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm13 = xmm11[0,1],xmm14[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm13 |
| ; AVX1-ONLY-NEXT: vmovdqa 176(%rdi), %xmm14 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm15 = xmm14[0,1,2,3],xmm15[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0,1,2,3],ymm13[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm12 = ymm12[0],ymm1[0],ymm12[3],ymm1[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm15 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm8 = xmm8[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm8 = ymm8[0,1],ymm12[2,3] |
| ; AVX1-ONLY-NEXT: vmovapd 288(%rdi), %ymm12 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm11 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm11 = ymm11[0],ymm12[0],ymm11[3],ymm12[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 224(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm14 = xmm14[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm11 = ymm14[0,1],ymm11[2,3] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm2 = xmm4[0],xmm15[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0,1,2],ymm12[3] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm0 = xmm5[0],xmm0[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3] |
| ; AVX1-ONLY-NEXT: vmovapd %ymm6, 32(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm9, 32(%rdx) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm7, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm13, 32(%rcx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm10, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm11, 32(%r8) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm8, (%r8) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm0, 32(%r9) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm1, (%r9) |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i64_stride5_vf8: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: vmovdqa 160(%rdi), %ymm9 |
| ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %ymm11 |
| ; AVX2-ONLY-NEXT: vmovdqa 288(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovdqa 256(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %ymm12 |
| ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %ymm8 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3,4,5],ymm3[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm13 |
| ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vmovdqa 48(%rdi), %xmm14 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm6 = xmm13[0,1],xmm5[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm12[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 192(%rdi), %xmm6 |
| ; AVX2-ONLY-NEXT: vmovdqa 160(%rdi), %xmm15 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm10 = xmm15[0,1],xmm6[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm10[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %xmm10 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm13 = xmm13[8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm8 = ymm8[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm8[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm8 = ymm13[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 208(%rdi), %xmm13 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm13 = xmm15[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm12 = ymm12[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3],ymm12[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1],mem[2,3],ymm11[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %xmm13 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm13 = xmm13[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13 |
| ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm11 = ymm11[2,3],ymm13[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1],mem[2,3],ymm9[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 256(%rdi), %xmm13 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm13 = xmm13[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13 |
| ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm9 = ymm9[2,3],ymm13[2,3] |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm13 = mem[8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm14 = ymm3[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm3[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm14 = ymm14[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %xmm14 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm15 = ymm1[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm4 = mem[8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm15[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm3 = xmm5[0,1],xmm10[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm6[0,1],xmm14[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm7, 32(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm12, 32(%rdx) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm8, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm9, 32(%rcx) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm11, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm4, 32(%r8) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm13, (%r8) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm0, 32(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm2, (%r9) |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512F-LABEL: load_i64_stride5_vf8: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa64 256(%rdi), %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm1 |
| ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm2 |
| ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm3 |
| ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm4 |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [12,1,6,0,12,1,6,0] |
| ; AVX512F-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpermi2q %zmm3, %zmm4, %zmm5 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [0,5,10,15] |
| ; AVX512F-NEXT: vpermi2q %zmm2, %zmm1, %zmm6 |
| ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm5[4,5,6,7] |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,5,6,11] |
| ; AVX512F-NEXT: vpermi2q %zmm0, %zmm5, %zmm6 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = <1,6,11,u> |
| ; AVX512F-NEXT: vpermi2q %zmm2, %zmm1, %zmm5 |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [5,10,15,0,5,10,15,0] |
| ; AVX512F-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpermi2q %zmm4, %zmm3, %zmm7 |
| ; AVX512F-NEXT: movb $7, %al |
| ; AVX512F-NEXT: kmovw %eax, %k1 |
| ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm7 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,5,6,12] |
| ; AVX512F-NEXT: vpermi2q %zmm0, %zmm7, %zmm5 |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [6,11,0,1,6,11,0,1] |
| ; AVX512F-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpermi2q %zmm4, %zmm3, %zmm7 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = <2,7,12,u> |
| ; AVX512F-NEXT: vpermi2q %zmm2, %zmm1, %zmm8 |
| ; AVX512F-NEXT: movb $56, %al |
| ; AVX512F-NEXT: kmovw %eax, %k1 |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm8 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,5,8,13] |
| ; AVX512F-NEXT: vpermi2q %zmm0, %zmm8, %zmm7 |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [7,12,0,2,7,12,0,2] |
| ; AVX512F-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpermi2q %zmm4, %zmm3, %zmm8 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = <11,0,5,u> |
| ; AVX512F-NEXT: vpermi2q %zmm1, %zmm2, %zmm9 |
| ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm9 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,4,5,9,14] |
| ; AVX512F-NEXT: vpermi2q %zmm0, %zmm9, %zmm8 |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,5,0,11,0,5,0,11] |
| ; AVX512F-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpermi2q %zmm3, %zmm4, %zmm9 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = <12,1,6,u> |
| ; AVX512F-NEXT: vpermi2q %zmm1, %zmm2, %zmm3 |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm3 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,4,5,10,15] |
| ; AVX512F-NEXT: vpermi2q %zmm0, %zmm3, %zmm1 |
| ; AVX512F-NEXT: vmovdqa64 %zmm6, (%rsi) |
| ; AVX512F-NEXT: vmovdqa64 %zmm5, (%rdx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, (%rcx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm8, (%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, (%r9) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: load_i64_stride5_vf8: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1 |
| ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm2 |
| ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm3 |
| ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm4 |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [12,1,6,0,12,1,6,0] |
| ; AVX512BW-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vpermi2q %zmm3, %zmm4, %zmm5 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm6 = [0,5,10,15] |
| ; AVX512BW-NEXT: vpermi2q %zmm2, %zmm1, %zmm6 |
| ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm5[4,5,6,7] |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,5,6,11] |
| ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm5, %zmm6 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm5 = <1,6,11,u> |
| ; AVX512BW-NEXT: vpermi2q %zmm2, %zmm1, %zmm5 |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [5,10,15,0,5,10,15,0] |
| ; AVX512BW-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vpermi2q %zmm4, %zmm3, %zmm7 |
| ; AVX512BW-NEXT: movb $7, %al |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm7 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,5,6,12] |
| ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm7, %zmm5 |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [6,11,0,1,6,11,0,1] |
| ; AVX512BW-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vpermi2q %zmm4, %zmm3, %zmm7 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm8 = <2,7,12,u> |
| ; AVX512BW-NEXT: vpermi2q %zmm2, %zmm1, %zmm8 |
| ; AVX512BW-NEXT: movb $56, %al |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm8 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,5,8,13] |
| ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm8, %zmm7 |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [7,12,0,2,7,12,0,2] |
| ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vpermi2q %zmm4, %zmm3, %zmm8 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm9 = <11,0,5,u> |
| ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm2, %zmm9 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm9 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,4,5,9,14] |
| ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm9, %zmm8 |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,5,0,11,0,5,0,11] |
| ; AVX512BW-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vpermi2q %zmm3, %zmm4, %zmm9 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm3 = <12,1,6,u> |
| ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm2, %zmm3 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm3 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,4,5,10,15] |
| ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm3, %zmm1 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm6, (%rsi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, (%rdx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rcx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm8, (%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, (%r9) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %wide.vec = load <40 x i64>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <40 x i64> %wide.vec, <40 x i64> poison, <8 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35> |
| %strided.vec1 = shufflevector <40 x i64> %wide.vec, <40 x i64> poison, <8 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36> |
| %strided.vec2 = shufflevector <40 x i64> %wide.vec, <40 x i64> poison, <8 x i32> <i32 2, i32 7, i32 12, i32 17, i32 22, i32 27, i32 32, i32 37> |
| %strided.vec3 = shufflevector <40 x i64> %wide.vec, <40 x i64> poison, <8 x i32> <i32 3, i32 8, i32 13, i32 18, i32 23, i32 28, i32 33, i32 38> |
| %strided.vec4 = shufflevector <40 x i64> %wide.vec, <40 x i64> poison, <8 x i32> <i32 4, i32 9, i32 14, i32 19, i32 24, i32 29, i32 34, i32 39> |
| store <8 x i64> %strided.vec0, ptr %out.vec0, align 64 |
| store <8 x i64> %strided.vec1, ptr %out.vec1, align 64 |
| store <8 x i64> %strided.vec2, ptr %out.vec2, align 64 |
| store <8 x i64> %strided.vec3, ptr %out.vec3, align 64 |
| store <8 x i64> %strided.vec4, ptr %out.vec4, align 64 |
| ret void |
| } |
| |
| define void @load_i64_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind { |
| ; SSE-LABEL: load_i64_stride5_vf16: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: subq $280, %rsp # imm = 0x118 |
| ; SSE-NEXT: movapd 224(%rdi), %xmm0 |
| ; SSE-NEXT: movapd 144(%rdi), %xmm1 |
| ; SSE-NEXT: movapd 64(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 96(%rdi), %xmm3 |
| ; SSE-NEXT: movapd 176(%rdi), %xmm4 |
| ; SSE-NEXT: movapd 128(%rdi), %xmm6 |
| ; SSE-NEXT: movapd 208(%rdi), %xmm7 |
| ; SSE-NEXT: movapd (%rdi), %xmm8 |
| ; SSE-NEXT: movapd 16(%rdi), %xmm5 |
| ; SSE-NEXT: movapd 32(%rdi), %xmm13 |
| ; SSE-NEXT: movapd 48(%rdi), %xmm9 |
| ; SSE-NEXT: movapd 80(%rdi), %xmm10 |
| ; SSE-NEXT: movapd 112(%rdi), %xmm14 |
| ; SSE-NEXT: movapd 160(%rdi), %xmm11 |
| ; SSE-NEXT: movapd 192(%rdi), %xmm15 |
| ; SSE-NEXT: movapd %xmm13, %xmm12 |
| ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm8[0],xmm12[1] |
| ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm9[0] |
| ; SSE-NEXT: movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm5[0],xmm9[1] |
| ; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm5 = xmm5[1],xmm2[0] |
| ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm13[0],xmm2[1] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd %xmm14, %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm10[0],xmm2[1] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm6[0] |
| ; SSE-NEXT: movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm3[0],xmm6[1] |
| ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm3 = xmm3[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm14[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd %xmm15, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm11[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm11 = xmm11[1],xmm7[0] |
| ; SSE-NEXT: movapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm4[0],xmm7[1] |
| ; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm0[0] |
| ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm15[0],xmm0[1] |
| ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 240(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 272(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 288(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 256(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 304(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 320(%rdi), %xmm15 |
| ; SSE-NEXT: movapd 352(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm15[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 368(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm15 = xmm15[1],xmm1[0] |
| ; SSE-NEXT: movapd 336(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 384(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: movapd 400(%rdi), %xmm8 |
| ; SSE-NEXT: movapd 432(%rdi), %xmm1 |
| ; SSE-NEXT: movapd %xmm1, %xmm13 |
| ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm8[0],xmm13[1] |
| ; SSE-NEXT: movapd 448(%rdi), %xmm12 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm12[0] |
| ; SSE-NEXT: movapd 416(%rdi), %xmm14 |
| ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm14[0],xmm12[1] |
| ; SSE-NEXT: movapd 464(%rdi), %xmm0 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm14 = xmm14[1],xmm0[0] |
| ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] |
| ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 480(%rdi), %xmm3 |
| ; SSE-NEXT: movapd 512(%rdi), %xmm6 |
| ; SSE-NEXT: movapd %xmm6, %xmm9 |
| ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm3[0],xmm9[1] |
| ; SSE-NEXT: movapd 528(%rdi), %xmm5 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm3 = xmm3[1],xmm5[0] |
| ; SSE-NEXT: movapd 496(%rdi), %xmm4 |
| ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm4[0],xmm5[1] |
| ; SSE-NEXT: movapd 544(%rdi), %xmm10 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm10[0] |
| ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm6[0],xmm10[1] |
| ; SSE-NEXT: movapd 560(%rdi), %xmm6 |
| ; SSE-NEXT: movapd 592(%rdi), %xmm11 |
| ; SSE-NEXT: movapd %xmm11, %xmm7 |
| ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm6[0],xmm7[1] |
| ; SSE-NEXT: movapd 608(%rdi), %xmm0 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm6 = xmm6[1],xmm0[0] |
| ; SSE-NEXT: movapd 576(%rdi), %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] |
| ; SSE-NEXT: movapd 624(%rdi), %xmm2 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm2[0] |
| ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm11[0],xmm2[1] |
| ; SSE-NEXT: movapd %xmm13, 80(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm11, 16(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm11, 64(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm11, (%rsi) |
| ; SSE-NEXT: movapd %xmm7, 112(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm7, 48(%rsi) |
| ; SSE-NEXT: movapd %xmm9, 96(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm7, 32(%rsi) |
| ; SSE-NEXT: movapd %xmm8, 80(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm7, 16(%rdx) |
| ; SSE-NEXT: movapd %xmm15, 64(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm7, (%rdx) |
| ; SSE-NEXT: movapd %xmm6, 112(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 48(%rdx) |
| ; SSE-NEXT: movapd %xmm3, 96(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm3, 32(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm3, 16(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm3, (%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm3, 48(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm3, 32(%rcx) |
| ; SSE-NEXT: movapd %xmm12, 80(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm3, 64(%rcx) |
| ; SSE-NEXT: movapd %xmm0, 112(%rcx) |
| ; SSE-NEXT: movapd %xmm5, 96(%rcx) |
| ; SSE-NEXT: movapd %xmm1, 112(%r8) |
| ; SSE-NEXT: movapd %xmm4, 96(%r8) |
| ; SSE-NEXT: movapd %xmm14, 80(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%r8) |
| ; SSE-NEXT: movapd %xmm2, 112(%r9) |
| ; SSE-NEXT: movapd %xmm10, 96(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%r9) |
| ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%r9) |
| ; SSE-NEXT: addq $280, %rsp # imm = 0x118 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i64_stride5_vf16: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: subq $360, %rsp # imm = 0x168 |
| ; AVX1-ONLY-NEXT: vmovapd 96(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 64(%rdi), %ymm5 |
| ; AVX1-ONLY-NEXT: vmovapd 576(%rdi), %ymm7 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 544(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovapd 256(%rdi), %ymm3 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 224(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3] |
| ; AVX1-ONLY-NEXT: vmovapd 192(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vmovapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 160(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm6 = xmm4[0],xmm6[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm6[0,1],ymm3[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm1[0,1,2],ymm7[3] |
| ; AVX1-ONLY-NEXT: vmovapd 512(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vmovapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 480(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm7 = xmm9[0],xmm6[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm7[0,1],ymm3[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm5[0,1,2],ymm0[3] |
| ; AVX1-ONLY-NEXT: vmovapd (%rdi), %xmm13 |
| ; AVX1-ONLY-NEXT: vmovapd 32(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm11 = xmm13[0],xmm0[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm11[0,1],ymm3[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 416(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 384(%rdi), %ymm3 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm15 = ymm3[0,1,2],ymm0[3] |
| ; AVX1-ONLY-NEXT: vmovapd 352(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vmovapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 320(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm14 = xmm0[0],xmm6[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm14[0,1],ymm15[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %xmm14 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm15 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm2 = ymm2[0],ymm15[0],ymm2[3],ymm15[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 208(%rdi), %xmm15 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm4 = xmm4[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 608(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm4[0],ymm1[3],ymm4[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 528(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm9 = xmm9[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm9[0,1],ymm1[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm9 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm5 = ymm5[0],ymm9[0],ymm5[3],ymm9[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm13 = xmm13[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm13[0,1],ymm5[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm1, (%rsp) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %xmm13 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm5 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[3],ymm5[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 368(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm3 = xmm4[0,1],xmm14[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3 |
| ; AVX1-ONLY-NEXT: vmovdqa 176(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm14 = xmm11[0,1,2,3],xmm15[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2 |
| ; AVX1-ONLY-NEXT: vmovdqa 496(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm7[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm7[0,1],xmm6[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2 |
| ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm10 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0,1,2,3],xmm9[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm9[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm13 = xmm2[0,1],xmm13[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm13 |
| ; AVX1-ONLY-NEXT: vmovdqa 336(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm12[0,1,2,3],xmm8[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm8[0,1,2,3],ymm13[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4 |
| ; AVX1-ONLY-NEXT: vmovapd 288(%rdi), %ymm8 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm4 = ymm4[0],ymm8[0],ymm4[3],ymm8[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 224(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm9 = xmm11[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0,1],ymm4[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3 |
| ; AVX1-ONLY-NEXT: vmovapd 608(%rdi), %ymm11 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[0],ymm11[0],ymm3[3],ymm11[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 544(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm3[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm3 |
| ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm7 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[0],ymm7[0],ymm3[3],ymm7[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm10 = xmm10[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm10[0,1],ymm3[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2 |
| ; AVX1-ONLY-NEXT: vmovapd 448(%rdi), %ymm10 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm2 = ymm2[0],ymm10[0],ymm2[3],ymm10[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 384(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm12 = xmm12[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm12[0,1],ymm2[2,3] |
| ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm8 = mem[0,1,2],ymm8[3] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm6 = mem[0,1,2,3],xmm6[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0,1],ymm8[2,3] |
| ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm8 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm8 = mem[0,1,2],ymm11[3] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm4 = mem[0,1,2,3],xmm4[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm4[0,1],ymm8[2,3] |
| ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm8 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm8 = mem[0,1,2],ymm10[3] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1,2,3],xmm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm8[2,3] |
| ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm7 = mem[0,1,2],ymm7[3] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2,3],xmm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm7[2,3] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm7, 64(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm7, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm7, 96(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm7, 32(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm7, 64(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm7 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm7, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm7, 96(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm7, 32(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm13, 64(%rcx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm14, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm15, 96(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm7, 32(%rcx) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm2, 64(%r8) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm3, (%r8) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm5, 96(%r8) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm9, 32(%r8) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm1, (%r9) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm0, 64(%r9) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm4, 96(%r9) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm6, 32(%r9) |
| ; AVX1-ONLY-NEXT: addq $360, %rsp # imm = 0x168 |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i64_stride5_vf16: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: subq $360, %rsp # imm = 0x168 |
| ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %ymm14 |
| ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovdqa 576(%rdi), %ymm13 |
| ; AVX2-ONLY-NEXT: vmovdqa 544(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovdqa 256(%rdi), %ymm12 |
| ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5],ymm12[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 192(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 160(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm7 = xmm5[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm13[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 512(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 480(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm9 = xmm7[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5],ymm14[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm9 |
| ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm10 = xmm9[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 416(%rdi), %ymm11 |
| ; AVX2-ONLY-NEXT: vmovdqa 384(%rdi), %ymm10 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm10[0,1,2,3,4,5],ymm11[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 352(%rdi), %xmm6 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 320(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm15 = xmm0[0,1],xmm6[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm15[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 208(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 288(%rdi), %ymm8 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 528(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = xmm7[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 608(%rdi), %ymm6 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm3 = ymm3[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],ymm3[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 48(%rdi), %xmm3 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = xmm9[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm4 = ymm4[8,9,10,11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7],ymm4[24,25,26,27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 368(%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm4 = xmm0[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 448(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm5 = ymm10[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm10[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm5[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 160(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],mem[2,3],ymm4[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 256(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5 |
| ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm4[2,3],ymm5[2,3] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 480(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],mem[2,3],ymm4[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 576(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5 |
| ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm4[2,3],ymm5[2,3] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],mem[2,3],ymm4[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5 |
| ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm4[2,3],ymm5[2,3] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 320(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],mem[2,3],ymm4[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 416(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5 |
| ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm4[2,3],ymm5[2,3] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm4 = ymm12[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %xmm15 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = mem[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm4 = ymm13[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],ymm13[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 544(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm5 = ymm14[8,9,10,11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7],ymm14[24,25,26,27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm10 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3],ymm5[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm10 = ymm11[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm11[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 384(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm9 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm8 = ymm12[0,1,2,3,4,5],ymm8[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm10 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm10 = mem[0,1],xmm15[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm13[0,1,2,3,4,5],ymm6[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm7 = mem[0,1],xmm7[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm11[0,1,2,3,4,5],ymm2[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3,4,5],ymm3[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm1 = mem[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, 64(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, 96(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, 32(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, 64(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, 96(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, 32(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, 64(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups (%rsp), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, 96(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, 32(%rcx) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm9, 64(%r8) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm5, (%r8) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm4, 96(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, 32(%r8) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm1, (%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm0, 64(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm6, 96(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm8, 32(%r9) |
| ; AVX2-ONLY-NEXT: addq $360, %rsp # imm = 0x168 |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512F-LABEL: load_i64_stride5_vf16: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa64 576(%rdi), %zmm2 |
| ; AVX512F-NEXT: vmovdqa64 384(%rdi), %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 320(%rdi), %zmm3 |
| ; AVX512F-NEXT: vmovdqa64 448(%rdi), %zmm5 |
| ; AVX512F-NEXT: vmovdqa64 512(%rdi), %zmm4 |
| ; AVX512F-NEXT: vmovdqa64 256(%rdi), %zmm6 |
| ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm8 |
| ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm11 |
| ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm9 |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [12,1,6,0,12,1,6,0] |
| ; AVX512F-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm7 |
| ; AVX512F-NEXT: vpermt2q %zmm11, %zmm10, %zmm7 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm12 = [0,5,10,15] |
| ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm13 |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm12, %zmm13 |
| ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm13[0,1,2,3],zmm7[4,5,6,7] |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm13 = [0,1,2,3,4,5,6,11] |
| ; AVX512F-NEXT: vpermt2q %zmm6, %zmm13, %zmm7 |
| ; AVX512F-NEXT: vpermi2q %zmm5, %zmm4, %zmm10 |
| ; AVX512F-NEXT: vpermi2q %zmm0, %zmm3, %zmm12 |
| ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm10 = zmm12[0,1,2,3],zmm10[4,5,6,7] |
| ; AVX512F-NEXT: vpermt2q %zmm2, %zmm13, %zmm10 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm14 = <1,6,11,u> |
| ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm15 |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm14, %zmm15 |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [5,10,15,0,5,10,15,0] |
| ; AVX512F-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm12 |
| ; AVX512F-NEXT: vpermt2q %zmm9, %zmm13, %zmm12 |
| ; AVX512F-NEXT: movb $7, %al |
| ; AVX512F-NEXT: kmovw %eax, %k1 |
| ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm12 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm15 = [0,1,2,3,4,5,6,12] |
| ; AVX512F-NEXT: vpermt2q %zmm6, %zmm15, %zmm12 |
| ; AVX512F-NEXT: vpermi2q %zmm4, %zmm5, %zmm13 |
| ; AVX512F-NEXT: vpermi2q %zmm0, %zmm3, %zmm14 |
| ; AVX512F-NEXT: vmovdqa64 %zmm14, %zmm13 {%k1} |
| ; AVX512F-NEXT: vpermt2q %zmm2, %zmm15, %zmm13 |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [6,11,0,1,6,11,0,1] |
| ; AVX512F-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm15 |
| ; AVX512F-NEXT: vpermt2q %zmm9, %zmm14, %zmm15 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm16 = <2,7,12,u> |
| ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm17 |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm16, %zmm17 |
| ; AVX512F-NEXT: movb $56, %al |
| ; AVX512F-NEXT: kmovw %eax, %k1 |
| ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm17 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm15 = [0,1,2,3,4,5,8,13] |
| ; AVX512F-NEXT: vpermt2q %zmm6, %zmm15, %zmm17 |
| ; AVX512F-NEXT: vpermi2q %zmm4, %zmm5, %zmm14 |
| ; AVX512F-NEXT: vpermi2q %zmm0, %zmm3, %zmm16 |
| ; AVX512F-NEXT: vmovdqa64 %zmm14, %zmm16 {%k1} |
| ; AVX512F-NEXT: vpermt2q %zmm2, %zmm15, %zmm16 |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [7,12,0,2,7,12,0,2] |
| ; AVX512F-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm15 |
| ; AVX512F-NEXT: vpermt2q %zmm9, %zmm14, %zmm15 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm18 = <11,0,5,u> |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm19 |
| ; AVX512F-NEXT: vpermt2q %zmm8, %zmm18, %zmm19 |
| ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm19 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm15 = [0,1,2,3,4,5,9,14] |
| ; AVX512F-NEXT: vpermt2q %zmm6, %zmm15, %zmm19 |
| ; AVX512F-NEXT: vpermi2q %zmm4, %zmm5, %zmm14 |
| ; AVX512F-NEXT: vpermi2q %zmm3, %zmm0, %zmm18 |
| ; AVX512F-NEXT: vmovdqa64 %zmm14, %zmm18 {%k1} |
| ; AVX512F-NEXT: vpermt2q %zmm2, %zmm15, %zmm18 |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [0,5,0,11,0,5,0,11] |
| ; AVX512F-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpermt2q %zmm11, %zmm14, %zmm9 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm11 = <12,1,6,u> |
| ; AVX512F-NEXT: vpermt2q %zmm8, %zmm11, %zmm1 |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm1 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,4,5,10,15] |
| ; AVX512F-NEXT: vpermt2q %zmm6, %zmm8, %zmm1 |
| ; AVX512F-NEXT: vpermt2q %zmm5, %zmm14, %zmm4 |
| ; AVX512F-NEXT: vpermt2q %zmm3, %zmm11, %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1} |
| ; AVX512F-NEXT: vpermt2q %zmm2, %zmm8, %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 %zmm10, 64(%rsi) |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, (%rsi) |
| ; AVX512F-NEXT: vmovdqa64 %zmm13, 64(%rdx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm12, (%rdx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm16, 64(%rcx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm17, (%rcx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm18, 64(%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm19, (%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, 64(%r9) |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, (%r9) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: load_i64_stride5_vf16: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %zmm2 |
| ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm3 |
| ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm5 |
| ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %zmm4 |
| ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm6 |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm8 |
| ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm11 |
| ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm9 |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [12,1,6,0,12,1,6,0] |
| ; AVX512BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm7 |
| ; AVX512BW-NEXT: vpermt2q %zmm11, %zmm10, %zmm7 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm12 = [0,5,10,15] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm13 |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm12, %zmm13 |
| ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm13[0,1,2,3],zmm7[4,5,6,7] |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm13 = [0,1,2,3,4,5,6,11] |
| ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm13, %zmm7 |
| ; AVX512BW-NEXT: vpermi2q %zmm5, %zmm4, %zmm10 |
| ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm3, %zmm12 |
| ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm10 = zmm12[0,1,2,3],zmm10[4,5,6,7] |
| ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm13, %zmm10 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm14 = <1,6,11,u> |
| ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm15 |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm14, %zmm15 |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [5,10,15,0,5,10,15,0] |
| ; AVX512BW-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm12 |
| ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm13, %zmm12 |
| ; AVX512BW-NEXT: movb $7, %al |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm12 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm15 = [0,1,2,3,4,5,6,12] |
| ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm15, %zmm12 |
| ; AVX512BW-NEXT: vpermi2q %zmm4, %zmm5, %zmm13 |
| ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm3, %zmm14 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm13 {%k1} |
| ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm15, %zmm13 |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [6,11,0,1,6,11,0,1] |
| ; AVX512BW-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm15 |
| ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm14, %zmm15 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm16 = <2,7,12,u> |
| ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm17 |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm16, %zmm17 |
| ; AVX512BW-NEXT: movb $56, %al |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm17 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm15 = [0,1,2,3,4,5,8,13] |
| ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm15, %zmm17 |
| ; AVX512BW-NEXT: vpermi2q %zmm4, %zmm5, %zmm14 |
| ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm3, %zmm16 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm16 {%k1} |
| ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm15, %zmm16 |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [7,12,0,2,7,12,0,2] |
| ; AVX512BW-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm15 |
| ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm14, %zmm15 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm18 = <11,0,5,u> |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm19 |
| ; AVX512BW-NEXT: vpermt2q %zmm8, %zmm18, %zmm19 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm19 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm15 = [0,1,2,3,4,5,9,14] |
| ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm15, %zmm19 |
| ; AVX512BW-NEXT: vpermi2q %zmm4, %zmm5, %zmm14 |
| ; AVX512BW-NEXT: vpermi2q %zmm3, %zmm0, %zmm18 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm18 {%k1} |
| ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm15, %zmm18 |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [0,5,0,11,0,5,0,11] |
| ; AVX512BW-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vpermt2q %zmm11, %zmm14, %zmm9 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm11 = <12,1,6,u> |
| ; AVX512BW-NEXT: vpermt2q %zmm8, %zmm11, %zmm1 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm1 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,4,5,10,15] |
| ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm8, %zmm1 |
| ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm14, %zmm4 |
| ; AVX512BW-NEXT: vpermt2q %zmm3, %zmm11, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1} |
| ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm8, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm10, 64(%rsi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rsi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm13, 64(%rdx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm12, (%rdx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm16, 64(%rcx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm17, (%rcx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm18, 64(%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm19, (%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, 64(%r9) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, (%r9) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %wide.vec = load <80 x i64>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <80 x i64> %wide.vec, <80 x i64> poison, <16 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35, i32 40, i32 45, i32 50, i32 55, i32 60, i32 65, i32 70, i32 75> |
| %strided.vec1 = shufflevector <80 x i64> %wide.vec, <80 x i64> poison, <16 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36, i32 41, i32 46, i32 51, i32 56, i32 61, i32 66, i32 71, i32 76> |
| %strided.vec2 = shufflevector <80 x i64> %wide.vec, <80 x i64> poison, <16 x i32> <i32 2, i32 7, i32 12, i32 17, i32 22, i32 27, i32 32, i32 37, i32 42, i32 47, i32 52, i32 57, i32 62, i32 67, i32 72, i32 77> |
| %strided.vec3 = shufflevector <80 x i64> %wide.vec, <80 x i64> poison, <16 x i32> <i32 3, i32 8, i32 13, i32 18, i32 23, i32 28, i32 33, i32 38, i32 43, i32 48, i32 53, i32 58, i32 63, i32 68, i32 73, i32 78> |
| %strided.vec4 = shufflevector <80 x i64> %wide.vec, <80 x i64> poison, <16 x i32> <i32 4, i32 9, i32 14, i32 19, i32 24, i32 29, i32 34, i32 39, i32 44, i32 49, i32 54, i32 59, i32 64, i32 69, i32 74, i32 79> |
| store <16 x i64> %strided.vec0, ptr %out.vec0, align 64 |
| store <16 x i64> %strided.vec1, ptr %out.vec1, align 64 |
| store <16 x i64> %strided.vec2, ptr %out.vec2, align 64 |
| store <16 x i64> %strided.vec3, ptr %out.vec3, align 64 |
| store <16 x i64> %strided.vec4, ptr %out.vec4, align 64 |
| ret void |
| } |
| |
| define void @load_i64_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind { |
| ; SSE-LABEL: load_i64_stride5_vf32: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: subq $920, %rsp # imm = 0x398 |
| ; SSE-NEXT: movapd 224(%rdi), %xmm0 |
| ; SSE-NEXT: movapd 144(%rdi), %xmm1 |
| ; SSE-NEXT: movapd 64(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 176(%rdi), %xmm3 |
| ; SSE-NEXT: movapd 96(%rdi), %xmm4 |
| ; SSE-NEXT: movapd 208(%rdi), %xmm6 |
| ; SSE-NEXT: movapd 128(%rdi), %xmm7 |
| ; SSE-NEXT: movapd (%rdi), %xmm9 |
| ; SSE-NEXT: movapd 16(%rdi), %xmm5 |
| ; SSE-NEXT: movapd 32(%rdi), %xmm13 |
| ; SSE-NEXT: movapd 48(%rdi), %xmm8 |
| ; SSE-NEXT: movapd 160(%rdi), %xmm10 |
| ; SSE-NEXT: movapd 192(%rdi), %xmm14 |
| ; SSE-NEXT: movapd 80(%rdi), %xmm11 |
| ; SSE-NEXT: movapd 112(%rdi), %xmm15 |
| ; SSE-NEXT: movapd %xmm13, %xmm12 |
| ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm9[0],xmm12[1] |
| ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm9 = xmm9[1],xmm8[0] |
| ; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm5[0],xmm8[1] |
| ; SSE-NEXT: movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm5 = xmm5[1],xmm2[0] |
| ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm13[0],xmm2[1] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd %xmm15, %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm11[0],xmm2[1] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm11 = xmm11[1],xmm7[0] |
| ; SSE-NEXT: movapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm4[0],xmm7[1] |
| ; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm15[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd %xmm14, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm10[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm6[0] |
| ; SSE-NEXT: movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm3[0],xmm6[1] |
| ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm3 = xmm3[1],xmm0[0] |
| ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm14[0],xmm0[1] |
| ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 240(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 272(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 288(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 256(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 304(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 320(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 352(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 368(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 336(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 384(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 400(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 432(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 448(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 416(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 464(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 480(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 512(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 528(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 496(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 544(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 560(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 592(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 608(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 576(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 624(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 640(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 672(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 688(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 656(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 704(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 720(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 752(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 768(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 736(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 784(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 800(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 832(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 848(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 816(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 864(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 880(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 912(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 928(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 896(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 944(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 960(%rdi), %xmm10 |
| ; SSE-NEXT: movapd 992(%rdi), %xmm1 |
| ; SSE-NEXT: movapd %xmm1, %xmm14 |
| ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm10[0],xmm14[1] |
| ; SSE-NEXT: movapd 1008(%rdi), %xmm15 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm15[0] |
| ; SSE-NEXT: movapd 976(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm2[0],xmm15[1] |
| ; SSE-NEXT: movapd 1024(%rdi), %xmm0 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm0[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] |
| ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1040(%rdi), %xmm8 |
| ; SSE-NEXT: movapd 1072(%rdi), %xmm3 |
| ; SSE-NEXT: movapd %xmm3, %xmm13 |
| ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm8[0],xmm13[1] |
| ; SSE-NEXT: movapd 1088(%rdi), %xmm9 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm9[0] |
| ; SSE-NEXT: movapd 1056(%rdi), %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm1[0],xmm9[1] |
| ; SSE-NEXT: movapd 1104(%rdi), %xmm0 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] |
| ; SSE-NEXT: movapd %xmm0, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: movapd 1120(%rdi), %xmm1 |
| ; SSE-NEXT: movapd 1152(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm5 |
| ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm1[0],xmm5[1] |
| ; SSE-NEXT: movapd 1168(%rdi), %xmm6 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm6[0] |
| ; SSE-NEXT: movapd 1136(%rdi), %xmm11 |
| ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm11[0],xmm6[1] |
| ; SSE-NEXT: movapd 1184(%rdi), %xmm2 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm11 = xmm11[1],xmm2[0] |
| ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1200(%rdi), %xmm0 |
| ; SSE-NEXT: movapd 1232(%rdi), %xmm4 |
| ; SSE-NEXT: movapd %xmm4, %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1] |
| ; SSE-NEXT: movapd 1248(%rdi), %xmm3 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm3[0] |
| ; SSE-NEXT: movapd 1216(%rdi), %xmm7 |
| ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm7[0],xmm3[1] |
| ; SSE-NEXT: movapd 1264(%rdi), %xmm12 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm7 = xmm7[1],xmm12[0] |
| ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm4[0],xmm12[1] |
| ; SSE-NEXT: movapd %xmm5, 224(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm4, 160(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm4, 96(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm4, 32(%rsi) |
| ; SSE-NEXT: movapd %xmm2, 240(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, 176(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, 112(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, 48(%rsi) |
| ; SSE-NEXT: movapd %xmm14, 192(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, 128(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, 64(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, (%rsi) |
| ; SSE-NEXT: movapd %xmm13, 208(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, 144(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, 80(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm2, 16(%rsi) |
| ; SSE-NEXT: movapd %xmm1, 224(%rdx) |
| ; SSE-NEXT: movapd %xmm0, 240(%rdx) |
| ; SSE-NEXT: movapd %xmm10, 192(%rdx) |
| ; SSE-NEXT: movapd %xmm8, 208(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 160(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 176(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 128(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 144(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rdx) |
| ; SSE-NEXT: movapd %xmm3, 240(%rcx) |
| ; SSE-NEXT: movapd %xmm6, 224(%rcx) |
| ; SSE-NEXT: movapd %xmm9, 208(%rcx) |
| ; SSE-NEXT: movapd %xmm15, 192(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 176(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 160(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 144(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 128(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rcx) |
| ; SSE-NEXT: movapd %xmm7, 240(%r8) |
| ; SSE-NEXT: movapd %xmm11, 224(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 208(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 192(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 176(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 160(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 144(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 128(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%r8) |
| ; SSE-NEXT: movapd %xmm12, 240(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 224(%r9) |
| ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 208(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 192(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 176(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 160(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 144(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 128(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%r9) |
| ; SSE-NEXT: addq $920, %rsp # imm = 0x398 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i64_stride5_vf32: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: subq $1336, %rsp # imm = 0x538 |
| ; AVX1-ONLY-NEXT: vmovaps 896(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 864(%rdi), %ymm4 |
| ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 576(%rdi), %ymm3 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 544(%rdi), %ymm14 |
| ; AVX1-ONLY-NEXT: vmovapd 256(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 224(%rdi), %ymm11 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm11[0,1,2],ymm0[3] |
| ; AVX1-ONLY-NEXT: vmovapd 192(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 160(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm1 = xmm12[0],xmm1[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm14[0,1,2],ymm3[3] |
| ; AVX1-ONLY-NEXT: vmovapd 512(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 480(%rdi), %xmm15 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm1 = xmm15[0],xmm1[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm2[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps 832(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 800(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 1216(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 1184(%rdi), %ymm6 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3] |
| ; AVX1-ONLY-NEXT: vmovapd 1152(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 1120(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm2 = xmm7[0],xmm2[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 416(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 384(%rdi), %ymm8 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm8[0,1,2],ymm0[3] |
| ; AVX1-ONLY-NEXT: vmovapd 352(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 320(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm3 = xmm5[0],xmm0[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 736(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 704(%rdi), %ymm3 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm3[0,1,2],ymm0[3] |
| ; AVX1-ONLY-NEXT: vmovapd 672(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 640(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm10 = xmm2[0],xmm0[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1056(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1024(%rdi), %ymm9 |
| ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm9[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps 992(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 960(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm13 = xmm9[0,1],xmm0[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm13[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %xmm10 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm13 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm13 = ymm11[0],ymm13[0],ymm11[3],ymm13[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 208(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm12 = xmm12[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm12 = ymm12[0,1],ymm13[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 608(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm13 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm13 = ymm14[0],ymm13[0],ymm14[3],ymm13[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 528(%rdi), %xmm14 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm15 = xmm15[8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm13 = ymm15[0,1],ymm13[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 928(%rdi), %xmm15 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm13 |
| ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm13 = ymm0[0],ymm13[0],ymm0[3],ymm13[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 848(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm13[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1248(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm6[0],ymm1[0],ymm6[3],ymm1[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1168(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm7[0,1],ymm6[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm6 |
| ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm0[0],ymm6[0],ymm0[3],ymm6[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm4 = xmm4[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm4[0,1],ymm6[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm4 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm4 = ymm8[0],ymm4[0],ymm8[3],ymm4[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 368(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm5[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 768(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[3],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 688(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1088(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 |
| ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[3],ymm2[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1008(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm3 = xmm9[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm4[0,1],xmm10[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2 |
| ; AVX1-ONLY-NEXT: vmovdqa 176(%rdi), %xmm13 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm13[0,1,2,3],xmm11[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm6[0,1],xmm12[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm3 |
| ; AVX1-ONLY-NEXT: vmovdqa 496(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm9 = xmm2[0,1,2,3],xmm14[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm9[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 896(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm9 = xmm3[0,1],xmm15[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9 |
| ; AVX1-ONLY-NEXT: vmovaps 816(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm5 = xmm5[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1216(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm9 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm9 = xmm5[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9 |
| ; AVX1-ONLY-NEXT: vmovdqa 1136(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm11[0,1,2,3],xmm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1056(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm9 |
| ; AVX1-ONLY-NEXT: vmovdqa 976(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm0[0,1,2,3],xmm8[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm8 = xmm8[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8 |
| ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0,1,2,3],xmm7[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm7 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm7 = xmm9[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7 |
| ; AVX1-ONLY-NEXT: vmovaps 336(%rdi), %xmm10 |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm8 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm8 = xmm10[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 736(%rdi), %xmm14 |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm7 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm7 = xmm14[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7 |
| ; AVX1-ONLY-NEXT: vmovaps 656(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm8 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm8 = xmm12[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4 |
| ; AVX1-ONLY-NEXT: vmovapd 288(%rdi), %ymm15 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm8 = ymm4[0],ymm15[0],ymm4[3],ymm15[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 224(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm13 = xmm13[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm13[0,1],ymm8[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6 |
| ; AVX1-ONLY-NEXT: vmovapd 608(%rdi), %ymm4 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm13 = ymm6[0],ymm4[0],ymm6[3],ymm4[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 544(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm13[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm2 |
| ; AVX1-ONLY-NEXT: vmovapd 928(%rdi), %ymm3 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[3],ymm3[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 864(%rdi), %xmm13 |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm3 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm3 = mem[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm2 |
| ; AVX1-ONLY-NEXT: vmovapd 1248(%rdi), %ymm3 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[3],ymm3[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1184(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm3 = xmm11[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX1-ONLY-NEXT: vmovapd 1088(%rdi), %ymm6 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm6[0],ymm1[3],ymm6[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1024(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovapd 768(%rdi), %ymm4 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm11 = ymm0[0],ymm4[0],ymm0[3],ymm4[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 704(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm12 = xmm12[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm14 = ymm12[0,1],ymm11[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9 |
| ; AVX1-ONLY-NEXT: vmovapd 448(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm9 = ymm9[0],ymm2[0],ymm9[3],ymm2[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 384(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm10 = xmm10[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm11 = ymm10[0,1],ymm9[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm12 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm9 = ymm9[0],ymm12[0],ymm9[3],ymm12[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm10 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm10 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm10 = ymm10[0,1],ymm9[2,3] |
| ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm9 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm9 = mem[0,1,2],ymm12[3] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1,2,3],xmm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm12 = ymm0[0,1],ymm9[2,3] |
| ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm15[3] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm7 = mem[0,1,2,3],xmm7[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm7[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm2[3] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2,3],xmm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2,3],xmm8[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm4[3] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2,3],xmm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm2 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm2 = mem[0,1,2,3],xmm13[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm6[3] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm2 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm2 = mem[0,1,2,3],xmm5[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm2 = mem[0,1,2,3,4,5],ymm2[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm4 = mem[0,1],xmm4[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 192(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 128(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 64(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 224(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 160(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 192(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 128(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 64(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 224(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 160(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 128(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 64(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 192(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 224(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 160(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rcx) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm10, (%r8) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm11, 64(%r8) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm14, 128(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 192(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 224(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 160(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%r8) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm2, 224(%r9) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm0, 192(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 160(%r9) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm1, 128(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 96(%r9) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm7, 64(%r9) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm9, 32(%r9) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm12, (%r9) |
| ; AVX1-ONLY-NEXT: addq $1336, %rsp # imm = 0x538 |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i64_stride5_vf32: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: subq $1544, %rsp # imm = 0x608 |
| ; AVX2-ONLY-NEXT: vmovdqa 896(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 864(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 576(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 544(%rdi), %ymm5 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 256(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 192(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 160(%rdi), %xmm6 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm6[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm3[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 512(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 480(%rdi), %xmm9 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm9[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm2[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 832(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 800(%rdi), %xmm10 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm10[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1216(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1184(%rdi), %ymm14 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 1152(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1120(%rdi), %xmm12 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm12[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %ymm13 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 416(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 384(%rdi), %ymm11 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 352(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 320(%rdi), %xmm3 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 736(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 704(%rdi), %ymm7 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 672(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 640(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm8 = xmm5[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1056(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1024(%rdi), %ymm8 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 992(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 960(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm15 = xmm2[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 208(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm6[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 288(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm6 = mem[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 528(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm9[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 608(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm6 = mem[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 848(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm10[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 928(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm6 = mem[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm1, %ymm10 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1168(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm12[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 1248(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm6 = ymm14[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm14[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm1, %ymm9 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 48(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm4[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm4 = ymm13[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm13[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 368(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm3[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 448(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm3 = ymm11[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm11[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 688(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm5[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 768(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm1 = ymm7[8,9,10,11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7],ymm7[24,25,26,27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm3, %ymm7 |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1008(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm2[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 1088(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm1 = ymm8[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm8[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm2, %ymm4 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 576(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 800(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 896(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1120(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 1216(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 960(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 1056(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 640(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 736(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm0 = ymm13[8,9,10,11,12,13,14,15],ymm11[0,1,2,3,4,5,6,7],ymm13[24,25,26,27,28,29,30,31],ymm11[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %xmm12 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm12[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 544(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm2 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = mem[8,9,10,11,12,13,14,15],ymm10[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm10[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm2 = ymm1[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 864(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm3 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm2 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm2 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm3 = ymm2[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 1184(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm8 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm3 = ymm10[8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7],ymm10[24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 1024(%rdi), %xmm14 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm8 = mem[8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm3 = ymm9[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],ymm9[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 704(%rdi), %xmm15 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm8 = mem[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm3 = ymm6[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm6[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 384(%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm8 = mem[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm3 = ymm2[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm8 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm2[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3,4,5],ymm11[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm3 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm3 = mem[0,1],xmm12[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm5[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm1 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm1 = mem[0,1],xmm4[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[0,1,2,3,4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm1 = mem[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm9[0,1,2,3,4,5],ymm7[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, (%rsp), %xmm15, %xmm4 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm4 = mem[0,1],xmm15[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = mem[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm5 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm5 = mem[0,1],xmm2[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm5 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm5 = ymm10[0,1,2,3,4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm6 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm6 = mem[0,1],xmm14[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm6 = mem[0,1,2,3,4,5],ymm2[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm2 = mem[0,1],xmm2[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 192(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 128(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 64(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 224(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 160(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 96(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 32(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 192(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 128(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 64(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 224(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 160(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 96(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 32(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 128(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 64(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 192(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 224(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 160(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 96(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 32(%rcx) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm8, (%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 64(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 128(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 192(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 224(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 160(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 96(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm6, 32(%r8) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, 224(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm5, 192(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm4, 128(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 96(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm3, 64(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm11, 32(%r9) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%r9) |
| ; AVX2-ONLY-NEXT: addq $1544, %rsp # imm = 0x608 |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512F-LABEL: load_i64_stride5_vf32: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: subq $648, %rsp # imm = 0x288 |
| ; AVX512F-NEXT: vmovdqa64 1088(%rdi), %zmm13 |
| ; AVX512F-NEXT: vmovdqa64 1152(%rdi), %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 768(%rdi), %zmm9 |
| ; AVX512F-NEXT: vmovdqa64 832(%rdi), %zmm1 |
| ; AVX512F-NEXT: vmovdqa64 384(%rdi), %zmm19 |
| ; AVX512F-NEXT: vmovdqa64 448(%rdi), %zmm20 |
| ; AVX512F-NEXT: vmovdqa64 512(%rdi), %zmm2 |
| ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm15 |
| ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm18 |
| ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm22 |
| ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm3 |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm4 = [12,1,6,0,12,1,6,0] |
| ; AVX512F-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm12 |
| ; AVX512F-NEXT: vpermt2q %zmm22, %zmm4, %zmm12 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm28 = [0,5,10,15] |
| ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm14 |
| ; AVX512F-NEXT: vpermt2q %zmm18, %zmm28, %zmm14 |
| ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm5 |
| ; AVX512F-NEXT: vpermt2q %zmm20, %zmm4, %zmm5 |
| ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm5 |
| ; AVX512F-NEXT: vpermt2q %zmm9, %zmm4, %zmm5 |
| ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermi2q %zmm13, %zmm0, %zmm4 |
| ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm30 = <1,6,11,u> |
| ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm4 |
| ; AVX512F-NEXT: vpermt2q %zmm18, %zmm30, %zmm4 |
| ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm17 = [5,10,15,0,5,10,15,0] |
| ; AVX512F-NEXT: # zmm17 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm27 |
| ; AVX512F-NEXT: vpermt2q %zmm3, %zmm17, %zmm27 |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm26 |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm17, %zmm26 |
| ; AVX512F-NEXT: vmovdqa64 %zmm20, %zmm21 |
| ; AVX512F-NEXT: vpermt2q %zmm2, %zmm17, %zmm21 |
| ; AVX512F-NEXT: vpermi2q %zmm0, %zmm13, %zmm17 |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [6,11,0,1,6,11,0,1] |
| ; AVX512F-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm4 |
| ; AVX512F-NEXT: vpermt2q %zmm3, %zmm25, %zmm4 |
| ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm23 = <2,7,12,u> |
| ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm24 |
| ; AVX512F-NEXT: vpermt2q %zmm18, %zmm23, %zmm24 |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm4 |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm25, %zmm4 |
| ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm20, %zmm4 |
| ; AVX512F-NEXT: vpermt2q %zmm2, %zmm25, %zmm4 |
| ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermi2q %zmm0, %zmm13, %zmm25 |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm29 = [7,12,0,2,7,12,0,2] |
| ; AVX512F-NEXT: # zmm29 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm4 |
| ; AVX512F-NEXT: vpermt2q %zmm3, %zmm29, %zmm4 |
| ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm31 = [0,5,0,11,0,5,0,11] |
| ; AVX512F-NEXT: # zmm31 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpermt2q %zmm22, %zmm31, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm20, %zmm22 |
| ; AVX512F-NEXT: vpermt2q %zmm2, %zmm29, %zmm22 |
| ; AVX512F-NEXT: vpermt2q %zmm20, %zmm31, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, (%rsp) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm20 |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm29, %zmm20 |
| ; AVX512F-NEXT: vpermi2q %zmm0, %zmm13, %zmm29 |
| ; AVX512F-NEXT: vpermt2q %zmm13, %zmm31, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm13 = <11,0,5,u> |
| ; AVX512F-NEXT: vpermt2q %zmm9, %zmm31, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm18, %zmm9 |
| ; AVX512F-NEXT: vpermt2q %zmm15, %zmm13, %zmm9 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm31 = <12,1,6,u> |
| ; AVX512F-NEXT: vpermt2q %zmm15, %zmm31, %zmm18 |
| ; AVX512F-NEXT: vmovdqa64 320(%rdi), %zmm15 |
| ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm10 |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm28, %zmm10 |
| ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm11 |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm30, %zmm11 |
| ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm6 |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm23, %zmm6 |
| ; AVX512F-NEXT: vmovdqa64 %zmm19, %zmm8 |
| ; AVX512F-NEXT: vpermt2q %zmm15, %zmm13, %zmm8 |
| ; AVX512F-NEXT: vpermt2q %zmm15, %zmm31, %zmm19 |
| ; AVX512F-NEXT: vmovdqa64 704(%rdi), %zmm15 |
| ; AVX512F-NEXT: vmovdqa64 640(%rdi), %zmm2 |
| ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm5 |
| ; AVX512F-NEXT: vpermt2q %zmm15, %zmm28, %zmm5 |
| ; AVX512F-NEXT: vmovdqa64 1024(%rdi), %zmm1 |
| ; AVX512F-NEXT: vmovdqa64 960(%rdi), %zmm0 |
| ; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm28 |
| ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm7 |
| ; AVX512F-NEXT: vpermt2q %zmm15, %zmm30, %zmm7 |
| ; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm30 |
| ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 |
| ; AVX512F-NEXT: vpermt2q %zmm15, %zmm23, %zmm3 |
| ; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm23 |
| ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm4 |
| ; AVX512F-NEXT: vpermt2q %zmm2, %zmm13, %zmm4 |
| ; AVX512F-NEXT: vpermi2q %zmm0, %zmm1, %zmm13 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm31, %zmm1 |
| ; AVX512F-NEXT: vpermt2q %zmm2, %zmm31, %zmm15 |
| ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm16 = zmm14[0,1,2,3],zmm12[4,5,6,7] |
| ; AVX512F-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm14 # 64-byte Folded Reload |
| ; AVX512F-NEXT: # zmm14 = zmm10[0,1,2,3],mem[4,5,6,7] |
| ; AVX512F-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm12 # 64-byte Folded Reload |
| ; AVX512F-NEXT: # zmm12 = zmm5[0,1,2,3],mem[4,5,6,7] |
| ; AVX512F-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm10 # 64-byte Folded Reload |
| ; AVX512F-NEXT: # zmm10 = zmm28[0,1,2,3],mem[4,5,6,7] |
| ; AVX512F-NEXT: vmovdqa64 256(%rdi), %zmm28 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm31 = [0,1,2,3,4,5,6,11] |
| ; AVX512F-NEXT: vpermt2q %zmm28, %zmm31, %zmm16 |
| ; AVX512F-NEXT: vmovdqa64 576(%rdi), %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm2, %zmm31, %zmm14 |
| ; AVX512F-NEXT: vmovdqa64 896(%rdi), %zmm5 |
| ; AVX512F-NEXT: vpermt2q %zmm5, %zmm31, %zmm12 |
| ; AVX512F-NEXT: vmovdqa64 1216(%rdi), %zmm0 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm31, %zmm10 |
| ; AVX512F-NEXT: movb $7, %al |
| ; AVX512F-NEXT: kmovw %eax, %k1 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm31, %zmm27 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm26 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm21 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 %zmm30, %zmm17 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,5,6,12] |
| ; AVX512F-NEXT: vpermt2q %zmm28, %zmm7, %zmm27 |
| ; AVX512F-NEXT: vpermt2q %zmm5, %zmm7, %zmm26 |
| ; AVX512F-NEXT: vpermt2q %zmm2, %zmm7, %zmm21 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm7, %zmm17 |
| ; AVX512F-NEXT: movb $56, %al |
| ; AVX512F-NEXT: kmovw %eax, %k1 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm24 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm3 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm6 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm23 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,5,8,13] |
| ; AVX512F-NEXT: vpermt2q %zmm28, %zmm7, %zmm24 |
| ; AVX512F-NEXT: vpermt2q %zmm5, %zmm7, %zmm3 |
| ; AVX512F-NEXT: vpermt2q %zmm2, %zmm7, %zmm6 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm7, %zmm23 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm9 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 %zmm20, %zmm4 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm8 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 %zmm29, %zmm13 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,5,9,14] |
| ; AVX512F-NEXT: vpermt2q %zmm28, %zmm7, %zmm9 |
| ; AVX512F-NEXT: vpermt2q %zmm5, %zmm7, %zmm4 |
| ; AVX512F-NEXT: vpermt2q %zmm2, %zmm7, %zmm8 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm7, %zmm13 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm18 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,5,10,15] |
| ; AVX512F-NEXT: vpermt2q %zmm28, %zmm7, %zmm18 |
| ; AVX512F-NEXT: vmovdqu64 (%rsp), %zmm11 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm19 {%k1} |
| ; AVX512F-NEXT: vpermt2q %zmm2, %zmm7, %zmm19 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm7, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm15 {%k1} |
| ; AVX512F-NEXT: vpermt2q %zmm5, %zmm7, %zmm15 |
| ; AVX512F-NEXT: vmovdqa64 %zmm10, 192(%rsi) |
| ; AVX512F-NEXT: vmovdqa64 %zmm12, 128(%rsi) |
| ; AVX512F-NEXT: vmovdqa64 %zmm14, 64(%rsi) |
| ; AVX512F-NEXT: vmovdqa64 %zmm16, (%rsi) |
| ; AVX512F-NEXT: vmovdqa64 %zmm17, 192(%rdx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm27, (%rdx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm21, 64(%rdx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm26, 128(%rdx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm23, 192(%rcx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm24, (%rcx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm6, 64(%rcx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm3, 128(%rcx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm13, 192(%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, (%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm8, 64(%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, 128(%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm15, 128(%r9) |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, 192(%r9) |
| ; AVX512F-NEXT: vmovdqa64 %zmm18, (%r9) |
| ; AVX512F-NEXT: vmovdqa64 %zmm19, 64(%r9) |
| ; AVX512F-NEXT: addq $648, %rsp # imm = 0x288 |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: load_i64_stride5_vf32: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: subq $648, %rsp # imm = 0x288 |
| ; AVX512BW-NEXT: vmovdqa64 1088(%rdi), %zmm13 |
| ; AVX512BW-NEXT: vmovdqa64 1152(%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 768(%rdi), %zmm9 |
| ; AVX512BW-NEXT: vmovdqa64 832(%rdi), %zmm1 |
| ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm19 |
| ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm20 |
| ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %zmm2 |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm15 |
| ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm18 |
| ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm22 |
| ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm3 |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm4 = [12,1,6,0,12,1,6,0] |
| ; AVX512BW-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm12 |
| ; AVX512BW-NEXT: vpermt2q %zmm22, %zmm4, %zmm12 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm28 = [0,5,10,15] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm14 |
| ; AVX512BW-NEXT: vpermt2q %zmm18, %zmm28, %zmm14 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm5 |
| ; AVX512BW-NEXT: vpermt2q %zmm20, %zmm4, %zmm5 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm5 |
| ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm4, %zmm5 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermi2q %zmm13, %zmm0, %zmm4 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm30 = <1,6,11,u> |
| ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm4 |
| ; AVX512BW-NEXT: vpermt2q %zmm18, %zmm30, %zmm4 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm17 = [5,10,15,0,5,10,15,0] |
| ; AVX512BW-NEXT: # zmm17 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm27 |
| ; AVX512BW-NEXT: vpermt2q %zmm3, %zmm17, %zmm27 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm26 |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm17, %zmm26 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm20, %zmm21 |
| ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm17, %zmm21 |
| ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm13, %zmm17 |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm25 = [6,11,0,1,6,11,0,1] |
| ; AVX512BW-NEXT: # zmm25 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm4 |
| ; AVX512BW-NEXT: vpermt2q %zmm3, %zmm25, %zmm4 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm23 = <2,7,12,u> |
| ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm24 |
| ; AVX512BW-NEXT: vpermt2q %zmm18, %zmm23, %zmm24 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm4 |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm25, %zmm4 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm20, %zmm4 |
| ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm25, %zmm4 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm13, %zmm25 |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm29 = [7,12,0,2,7,12,0,2] |
| ; AVX512BW-NEXT: # zmm29 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm4 |
| ; AVX512BW-NEXT: vpermt2q %zmm3, %zmm29, %zmm4 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm31 = [0,5,0,11,0,5,0,11] |
| ; AVX512BW-NEXT: # zmm31 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vpermt2q %zmm22, %zmm31, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm20, %zmm22 |
| ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm29, %zmm22 |
| ; AVX512BW-NEXT: vpermt2q %zmm20, %zmm31, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, (%rsp) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm20 |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm29, %zmm20 |
| ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm13, %zmm29 |
| ; AVX512BW-NEXT: vpermt2q %zmm13, %zmm31, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm13 = <11,0,5,u> |
| ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm31, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm18, %zmm9 |
| ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm13, %zmm9 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm31 = <12,1,6,u> |
| ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm31, %zmm18 |
| ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm15 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm10 |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm28, %zmm10 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm11 |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm30, %zmm11 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm6 |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm23, %zmm6 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm19, %zmm8 |
| ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm13, %zmm8 |
| ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm31, %zmm19 |
| ; AVX512BW-NEXT: vmovdqa64 704(%rdi), %zmm15 |
| ; AVX512BW-NEXT: vmovdqa64 640(%rdi), %zmm2 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm5 |
| ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm28, %zmm5 |
| ; AVX512BW-NEXT: vmovdqa64 1024(%rdi), %zmm1 |
| ; AVX512BW-NEXT: vmovdqa64 960(%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm28 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm7 |
| ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm30, %zmm7 |
| ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm30 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm3 |
| ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm23, %zmm3 |
| ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm23 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm4 |
| ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm13, %zmm4 |
| ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm1, %zmm13 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm31, %zmm1 |
| ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm31, %zmm15 |
| ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm16 = zmm14[0,1,2,3],zmm12[4,5,6,7] |
| ; AVX512BW-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm14 # 64-byte Folded Reload |
| ; AVX512BW-NEXT: # zmm14 = zmm10[0,1,2,3],mem[4,5,6,7] |
| ; AVX512BW-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm12 # 64-byte Folded Reload |
| ; AVX512BW-NEXT: # zmm12 = zmm5[0,1,2,3],mem[4,5,6,7] |
| ; AVX512BW-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm10 # 64-byte Folded Reload |
| ; AVX512BW-NEXT: # zmm10 = zmm28[0,1,2,3],mem[4,5,6,7] |
| ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm28 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm31 = [0,1,2,3,4,5,6,11] |
| ; AVX512BW-NEXT: vpermt2q %zmm28, %zmm31, %zmm16 |
| ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm31, %zmm14 |
| ; AVX512BW-NEXT: vmovdqa64 896(%rdi), %zmm5 |
| ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm31, %zmm12 |
| ; AVX512BW-NEXT: vmovdqa64 1216(%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm31, %zmm10 |
| ; AVX512BW-NEXT: movb $7, %al |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm31, %zmm27 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm26 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm21 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 %zmm30, %zmm17 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,5,6,12] |
| ; AVX512BW-NEXT: vpermt2q %zmm28, %zmm7, %zmm27 |
| ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm7, %zmm26 |
| ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm7, %zmm21 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm7, %zmm17 |
| ; AVX512BW-NEXT: movb $56, %al |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm24 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm3 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm6 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm23 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,5,8,13] |
| ; AVX512BW-NEXT: vpermt2q %zmm28, %zmm7, %zmm24 |
| ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm7, %zmm3 |
| ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm7, %zmm6 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm7, %zmm23 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm9 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 %zmm20, %zmm4 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm8 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 %zmm29, %zmm13 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,5,9,14] |
| ; AVX512BW-NEXT: vpermt2q %zmm28, %zmm7, %zmm9 |
| ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm7, %zmm4 |
| ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm7, %zmm8 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm7, %zmm13 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm18 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,5,10,15] |
| ; AVX512BW-NEXT: vpermt2q %zmm28, %zmm7, %zmm18 |
| ; AVX512BW-NEXT: vmovdqu64 (%rsp), %zmm11 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm19 {%k1} |
| ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm7, %zmm19 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm7, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm15 {%k1} |
| ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm7, %zmm15 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm10, 192(%rsi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm12, 128(%rsi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm14, 64(%rsi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm16, (%rsi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm17, 192(%rdx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm27, (%rdx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm21, 64(%rdx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm26, 128(%rdx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm23, 192(%rcx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm24, (%rcx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm6, 64(%rcx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm3, 128(%rcx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm13, 192(%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, (%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm8, 64(%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, 128(%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm15, 128(%r9) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, 192(%r9) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm18, (%r9) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm19, 64(%r9) |
| ; AVX512BW-NEXT: addq $648, %rsp # imm = 0x288 |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %wide.vec = load <160 x i64>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <160 x i64> %wide.vec, <160 x i64> poison, <32 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35, i32 40, i32 45, i32 50, i32 55, i32 60, i32 65, i32 70, i32 75, i32 80, i32 85, i32 90, i32 95, i32 100, i32 105, i32 110, i32 115, i32 120, i32 125, i32 130, i32 135, i32 140, i32 145, i32 150, i32 155> |
| %strided.vec1 = shufflevector <160 x i64> %wide.vec, <160 x i64> poison, <32 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36, i32 41, i32 46, i32 51, i32 56, i32 61, i32 66, i32 71, i32 76, i32 81, i32 86, i32 91, i32 96, i32 101, i32 106, i32 111, i32 116, i32 121, i32 126, i32 131, i32 136, i32 141, i32 146, i32 151, i32 156> |
| %strided.vec2 = shufflevector <160 x i64> %wide.vec, <160 x i64> poison, <32 x i32> <i32 2, i32 7, i32 12, i32 17, i32 22, i32 27, i32 32, i32 37, i32 42, i32 47, i32 52, i32 57, i32 62, i32 67, i32 72, i32 77, i32 82, i32 87, i32 92, i32 97, i32 102, i32 107, i32 112, i32 117, i32 122, i32 127, i32 132, i32 137, i32 142, i32 147, i32 152, i32 157> |
| %strided.vec3 = shufflevector <160 x i64> %wide.vec, <160 x i64> poison, <32 x i32> <i32 3, i32 8, i32 13, i32 18, i32 23, i32 28, i32 33, i32 38, i32 43, i32 48, i32 53, i32 58, i32 63, i32 68, i32 73, i32 78, i32 83, i32 88, i32 93, i32 98, i32 103, i32 108, i32 113, i32 118, i32 123, i32 128, i32 133, i32 138, i32 143, i32 148, i32 153, i32 158> |
| %strided.vec4 = shufflevector <160 x i64> %wide.vec, <160 x i64> poison, <32 x i32> <i32 4, i32 9, i32 14, i32 19, i32 24, i32 29, i32 34, i32 39, i32 44, i32 49, i32 54, i32 59, i32 64, i32 69, i32 74, i32 79, i32 84, i32 89, i32 94, i32 99, i32 104, i32 109, i32 114, i32 119, i32 124, i32 129, i32 134, i32 139, i32 144, i32 149, i32 154, i32 159> |
| store <32 x i64> %strided.vec0, ptr %out.vec0, align 64 |
| store <32 x i64> %strided.vec1, ptr %out.vec1, align 64 |
| store <32 x i64> %strided.vec2, ptr %out.vec2, align 64 |
| store <32 x i64> %strided.vec3, ptr %out.vec3, align 64 |
| store <32 x i64> %strided.vec4, ptr %out.vec4, align 64 |
| ret void |
| } |
| |
| define void @load_i64_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind { |
| ; SSE-LABEL: load_i64_stride5_vf64: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: subq $2200, %rsp # imm = 0x898 |
| ; SSE-NEXT: movapd 224(%rdi), %xmm0 |
| ; SSE-NEXT: movapd 144(%rdi), %xmm1 |
| ; SSE-NEXT: movapd 64(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 176(%rdi), %xmm3 |
| ; SSE-NEXT: movapd 96(%rdi), %xmm4 |
| ; SSE-NEXT: movapd 208(%rdi), %xmm6 |
| ; SSE-NEXT: movapd 128(%rdi), %xmm7 |
| ; SSE-NEXT: movapd (%rdi), %xmm9 |
| ; SSE-NEXT: movapd 16(%rdi), %xmm5 |
| ; SSE-NEXT: movapd 32(%rdi), %xmm13 |
| ; SSE-NEXT: movapd 48(%rdi), %xmm8 |
| ; SSE-NEXT: movapd 160(%rdi), %xmm10 |
| ; SSE-NEXT: movapd 192(%rdi), %xmm14 |
| ; SSE-NEXT: movapd 80(%rdi), %xmm11 |
| ; SSE-NEXT: movapd 112(%rdi), %xmm15 |
| ; SSE-NEXT: movapd %xmm13, %xmm12 |
| ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm9[0],xmm12[1] |
| ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm9 = xmm9[1],xmm8[0] |
| ; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm5[0],xmm8[1] |
| ; SSE-NEXT: movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm5 = xmm5[1],xmm2[0] |
| ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm13[0],xmm2[1] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd %xmm15, %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm11[0],xmm2[1] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm11 = xmm11[1],xmm7[0] |
| ; SSE-NEXT: movapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm4[0],xmm7[1] |
| ; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm15[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd %xmm14, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm10[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm6[0] |
| ; SSE-NEXT: movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm3[0],xmm6[1] |
| ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: shufpd {{.*#+}} xmm3 = xmm3[1],xmm0[0] |
| ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm14[0],xmm0[1] |
| ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 240(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 272(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 288(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 256(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 304(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 320(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 352(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 368(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 336(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 384(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 400(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 432(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 448(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 416(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 464(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 480(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 512(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 528(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 496(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 544(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 560(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 592(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 608(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 576(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 624(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 640(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 672(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 688(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 656(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 704(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 720(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 752(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 768(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 736(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 784(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 800(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 832(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 848(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 816(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 864(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 880(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 912(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 928(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 896(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 944(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 960(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 992(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1008(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 976(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1024(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1040(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 1072(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1088(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1056(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1104(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1120(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 1152(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1168(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1136(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1184(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1200(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 1232(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1248(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1216(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1264(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1280(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 1312(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1328(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1296(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1344(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1360(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 1392(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1408(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1376(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1424(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1440(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 1472(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1488(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1456(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1504(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1520(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 1552(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1568(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1536(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1584(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1600(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 1632(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1648(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1616(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1664(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1680(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 1712(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1728(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1696(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1744(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1760(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 1792(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1808(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1776(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1824(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1840(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 1872(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1888(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1856(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1904(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1920(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 1952(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1968(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1936(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 1984(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 2000(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 2032(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 2048(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 2016(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 2064(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 2080(%rdi), %xmm2 |
| ; SSE-NEXT: movapd 2112(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 2128(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 2096(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 2144(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 2160(%rdi), %xmm14 |
| ; SSE-NEXT: movapd 2192(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm13 |
| ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm14[0],xmm13[1] |
| ; SSE-NEXT: movapd 2208(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm14 = xmm14[1],xmm1[0] |
| ; SSE-NEXT: movapd 2176(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: movapd 2224(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 2240(%rdi), %xmm11 |
| ; SSE-NEXT: movapd 2272(%rdi), %xmm0 |
| ; SSE-NEXT: movapd %xmm0, %xmm9 |
| ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm11[0],xmm9[1] |
| ; SSE-NEXT: movapd 2288(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm11 = xmm11[1],xmm1[0] |
| ; SSE-NEXT: movapd 2256(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 2304(%rdi), %xmm1 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 2320(%rdi), %xmm7 |
| ; SSE-NEXT: movapd 2352(%rdi), %xmm1 |
| ; SSE-NEXT: movapd %xmm1, %xmm5 |
| ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm7[0],xmm5[1] |
| ; SSE-NEXT: movapd 2368(%rdi), %xmm15 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm7 = xmm7[1],xmm15[0] |
| ; SSE-NEXT: movapd 2336(%rdi), %xmm2 |
| ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm2[0],xmm15[1] |
| ; SSE-NEXT: movapd 2384(%rdi), %xmm0 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm0[0] |
| ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] |
| ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 2400(%rdi), %xmm6 |
| ; SSE-NEXT: movapd 2432(%rdi), %xmm10 |
| ; SSE-NEXT: movapd %xmm10, %xmm3 |
| ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm6[0],xmm3[1] |
| ; SSE-NEXT: movapd 2448(%rdi), %xmm12 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm6 = xmm6[1],xmm12[0] |
| ; SSE-NEXT: movapd 2416(%rdi), %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm1[0],xmm12[1] |
| ; SSE-NEXT: movapd 2464(%rdi), %xmm0 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0] |
| ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm10[0],xmm0[1] |
| ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movapd 2480(%rdi), %xmm0 |
| ; SSE-NEXT: movapd 2512(%rdi), %xmm4 |
| ; SSE-NEXT: movapd %xmm4, %xmm1 |
| ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] |
| ; SSE-NEXT: movapd 2528(%rdi), %xmm8 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm8[0] |
| ; SSE-NEXT: movapd 2496(%rdi), %xmm10 |
| ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm10[0],xmm8[1] |
| ; SSE-NEXT: movapd 2544(%rdi), %xmm2 |
| ; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm2[0] |
| ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm4[0],xmm2[1] |
| ; SSE-NEXT: movapd %xmm1, 496(%rsi) |
| ; SSE-NEXT: movapd %xmm3, 480(%rsi) |
| ; SSE-NEXT: movapd %xmm5, 464(%rsi) |
| ; SSE-NEXT: movapd %xmm9, 448(%rsi) |
| ; SSE-NEXT: movapd %xmm13, 432(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 416(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 400(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 384(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 368(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 352(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 336(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 320(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 304(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 288(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 272(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 256(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 240(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 224(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 208(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 192(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 176(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 160(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 144(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 128(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 112(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 96(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 80(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 64(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 48(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 32(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 16(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, (%rsi) |
| ; SSE-NEXT: movapd %xmm0, 496(%rdx) |
| ; SSE-NEXT: movapd %xmm6, 480(%rdx) |
| ; SSE-NEXT: movapd %xmm7, 464(%rdx) |
| ; SSE-NEXT: movapd %xmm11, 448(%rdx) |
| ; SSE-NEXT: movapd %xmm14, 432(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 416(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 400(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 384(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 368(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 352(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 336(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 320(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 304(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 288(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 272(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 256(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 240(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 224(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 208(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 192(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 176(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 160(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 144(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 128(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rdx) |
| ; SSE-NEXT: movapd %xmm8, 496(%rcx) |
| ; SSE-NEXT: movapd %xmm12, 480(%rcx) |
| ; SSE-NEXT: movapd %xmm15, 464(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 448(%rcx) |
| ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 432(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 416(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 400(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 384(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 368(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 352(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 336(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 320(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 304(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 288(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 272(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 256(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 240(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 224(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 208(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 192(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 176(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 160(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 144(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 128(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rcx) |
| ; SSE-NEXT: movapd %xmm10, 496(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 480(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 464(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 448(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 432(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 416(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 400(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 384(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 368(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 352(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 336(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 320(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 304(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 288(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 272(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 256(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 240(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 224(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 208(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 192(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 176(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 160(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 144(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 128(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%r8) |
| ; SSE-NEXT: movapd %xmm2, 496(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 480(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 464(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 448(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 432(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 416(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 400(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 384(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 368(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 352(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 336(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 320(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 304(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 288(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 272(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 256(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 240(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 224(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 208(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 192(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 176(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 160(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 144(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 128(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%r9) |
| ; SSE-NEXT: addq $2200, %rsp # imm = 0x898 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i64_stride5_vf64: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: subq $3288, %rsp # imm = 0xCD8 |
| ; AVX1-ONLY-NEXT: vmovaps 896(%rdi), %ymm5 |
| ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 864(%rdi), %ymm6 |
| ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %ymm4 |
| ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 544(%rdi), %ymm7 |
| ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 256(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 224(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0,1,2],ymm0[3] |
| ; AVX1-ONLY-NEXT: vmovapd 192(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 160(%rdi), %xmm13 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm3 = xmm13[0],xmm3[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm7[0,1,2,3,4,5],ymm4[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps 512(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm4 = xmm1[0,1],xmm4[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm5[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps 832(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 800(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm5 = xmm3[0,1],xmm5[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 1216(%rdi), %ymm4 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 1184(%rdi), %ymm5 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm5[0,1,2],ymm4[3] |
| ; AVX1-ONLY-NEXT: vmovapd 1152(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vmovapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 1120(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm7 = xmm4[0],xmm7[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 1536(%rdi), %ymm6 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 1504(%rdi), %ymm7 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm8 = ymm7[0,1,2],ymm6[3] |
| ; AVX1-ONLY-NEXT: vmovapd 1472(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vmovapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 1440(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm9 = xmm6[0],xmm9[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 1856(%rdi), %ymm8 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 1824(%rdi), %ymm9 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm10 = ymm9[0,1,2],ymm8[3] |
| ; AVX1-ONLY-NEXT: vmovapd 1792(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vmovapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 1760(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm11 = xmm8[0],xmm11[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2176(%rdi), %ymm10 |
| ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2144(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm0[0,1,2,3,4,5],ymm10[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps 2112(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2080(%rdi), %xmm10 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm12 = xmm10[0,1],xmm12[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2496(%rdi), %ymm11 |
| ; AVX1-ONLY-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2464(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm0[0,1,2,3,4,5],ymm11[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps 2432(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2400(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm12 = xmm0[0,1],xmm12[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm11 |
| ; AVX1-ONLY-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm12 = xmm12[0,1],xmm0[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 416(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 384(%rdi), %ymm12 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm15 = ymm12[0,1,2],ymm0[3] |
| ; AVX1-ONLY-NEXT: vmovapd 352(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovapd 320(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm14 = xmm11[0],xmm0[1] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 736(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 704(%rdi), %ymm14 |
| ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps 672(%rdi), %xmm15 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 640(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm15 = xmm0[0,1],xmm15[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1056(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1024(%rdi), %ymm14 |
| ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps 992(%rdi), %xmm15 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 960(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm15 = xmm0[0,1],xmm15[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1376(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1344(%rdi), %ymm14 |
| ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps 1312(%rdi), %xmm15 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1280(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm15 = xmm0[0,1],xmm15[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1696(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1664(%rdi), %ymm14 |
| ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps 1632(%rdi), %xmm15 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1600(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm15 = xmm0[0,1],xmm15[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2016(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1984(%rdi), %ymm14 |
| ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps 1952(%rdi), %xmm15 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1920(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm15 = xmm0[0,1],xmm15[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2336(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2304(%rdi), %ymm14 |
| ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps 2272(%rdi), %xmm15 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2240(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm15 = xmm0[0,1],xmm15[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm14 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm2 = ymm2[0],ymm14[0],ymm2[3],ymm14[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 208(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm13[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 608(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[3],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 528(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 928(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 848(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm3[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1248(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm5[0],ymm0[0],ymm5[3],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1168(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm4[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1568(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm7[0],ymm0[0],ymm7[3],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1488(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm6[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1888(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm9[0],ymm0[0],ymm9[3],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1808(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm8[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2208(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 2128(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm10[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2528(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 2448(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm14 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm15 |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm12[0],ymm0[0],ymm12[3],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 368(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm11[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 768(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 688(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1088(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1008(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1408(%rdi), %xmm13 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1328(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1728(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm1 |
| ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[3],ymm1[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1648(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vpalignr $8, (%rsp), %xmm1, %xmm3 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm3 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2048(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm3 |
| ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm0[0],ymm3[0],ymm0[3],ymm3[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1968(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm7 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm7 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2368(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm6 |
| ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm0[0],ymm6[0],ymm0[3],ymm6[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 2288(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm7 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm7 = mem[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm7 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm7 = xmm6[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm10 |
| ; AVX1-ONLY-NEXT: vmovaps 176(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm11 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm11 = xmm7[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm10 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm10 = xmm11[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10 |
| ; AVX1-ONLY-NEXT: vmovaps 496(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm12 = xmm0[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 896(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm10 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm10 = xmm0[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10 |
| ; AVX1-ONLY-NEXT: vmovaps 816(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm12 = xmm0[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1216(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm10 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm10 = xmm0[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10 |
| ; AVX1-ONLY-NEXT: vmovaps 1136(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm12 = xmm0[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1536(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm10 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm10 = xmm0[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10 |
| ; AVX1-ONLY-NEXT: vmovaps 1456(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm12 = xmm0[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1856(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm10 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm10 = xmm0[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10 |
| ; AVX1-ONLY-NEXT: vmovaps 1776(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm12 = xmm0[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2176(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm10 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm10 = xmm0[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10 |
| ; AVX1-ONLY-NEXT: vmovaps 2096(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm12 = xmm0[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2496(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm10 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm10 = xmm0[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10 |
| ; AVX1-ONLY-NEXT: vmovaps 2416(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm12 = xmm0[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2336(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm8 = xmm0[0,1],xmm8[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8 |
| ; AVX1-ONLY-NEXT: vmovdqa 2256(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm9 = xmm0[0,1,2,3],xmm9[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2016(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2 |
| ; AVX1-ONLY-NEXT: vmovdqa 1936(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm0[0,1,2,3],xmm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1696(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovdqa 1616(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1376(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0,1],xmm13[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovdqa 1296(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0,1,2,3],xmm5[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1056(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm0[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3 |
| ; AVX1-ONLY-NEXT: vmovaps 976(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm5 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm5 = xmm1[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 736(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm5 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm5 = xmm3[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm8 |
| ; AVX1-ONLY-NEXT: vmovaps 656(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm9 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm9 = xmm5[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm9 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm9 = xmm8[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9 |
| ; AVX1-ONLY-NEXT: vmovaps 336(%rdi), %xmm10 |
| ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm12 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm12 = xmm10[0,1],mem[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm12[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm12 = xmm9[0,1],xmm14[2,3] |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12 |
| ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm14 |
| ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm15 = xmm14[0,1,2,3],xmm15[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm15[0,1,2,3],ymm12[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9 |
| ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm12 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm9 = ymm9[0],ymm12[0],ymm9[3],ymm12[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm12 = xmm14[8,9,10,11,12,13,14,15],xmm12[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm12[0,1],ymm9[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6 |
| ; AVX1-ONLY-NEXT: vmovapd 288(%rdi), %ymm9 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm6[0],ymm9[0],ymm6[3],ymm9[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 224(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm6 |
| ; AVX1-ONLY-NEXT: vmovapd 448(%rdi), %ymm7 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[3],ymm7[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 384(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm7 = xmm10[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm6 |
| ; AVX1-ONLY-NEXT: vmovapd 608(%rdi), %ymm7 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[3],ymm7[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 544(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm7 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3 |
| ; AVX1-ONLY-NEXT: vmovapd 768(%rdi), %ymm6 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[0],ymm6[0],ymm3[3],ymm6[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 704(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm3 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vmovapd 928(%rdi), %ymm5 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[3],ymm5[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 864(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm5 = mem[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm3, (%rsp) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovapd 1088(%rdi), %ymm3 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[3],ymm3[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1024(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vmovapd 1248(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1184(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 |
| ; AVX1-ONLY-NEXT: vmovapd 1408(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1344(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm4[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vmovapd 1568(%rdi), %ymm14 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm14[0],ymm0[3],ymm14[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1504(%rdi), %xmm13 |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vmovapd 1728(%rdi), %ymm12 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm12[0],ymm0[3],ymm12[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1664(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vmovapd 1888(%rdi), %ymm10 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm10[0],ymm0[3],ymm10[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1824(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vmovapd 2048(%rdi), %ymm8 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm8[0],ymm0[3],ymm8[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 1984(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vmovapd 2208(%rdi), %ymm6 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm6[0],ymm0[3],ymm6[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 2144(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vmovapd 2368(%rdi), %ymm4 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[3],ymm4[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 2304(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: vmovapd 2528(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[3],ymm2[2] |
| ; AVX1-ONLY-NEXT: vmovdqa 2464(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm14[3] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm13 = mem[0,1,2,3],xmm13[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm13 = ymm13[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm12[3] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm11 = mem[0,1,2,3],xmm11[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm11 = ymm11[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm10[3] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm9 = mem[0,1,2,3],xmm9[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm8[3] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm7 = mem[0,1,2,3],xmm7[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm6[3] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm5 = mem[0,1,2,3],xmm5[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm4[3] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm3 = mem[0,1,2,3],xmm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm2[3] |
| ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2,3],xmm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 448(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 384(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 320(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 256(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 192(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 128(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 480(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 416(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 352(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 288(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 224(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 160(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 96(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%rsi) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 448(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 384(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 320(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 256(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 192(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 128(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 480(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 416(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 352(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 288(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 224(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 160(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 96(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 128(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 192(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 256(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 320(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 384(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 448(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 480(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 416(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 352(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 288(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 224(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 160(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 96(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 480(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 448(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 416(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 384(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 352(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 320(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 288(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 256(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 224(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 192(%r8) |
| ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 160(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 128(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 96(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%r8) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm0, 480(%r9) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm3, 448(%r9) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm5, 416(%r9) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm7, 384(%r9) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm9, 352(%r9) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm11, 320(%r9) |
| ; AVX1-ONLY-NEXT: vmovapd %ymm13, 288(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm15, 256(%r9) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%r9) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 192(%r9) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%r9) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 128(%r9) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%r9) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 64(%r9) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%r9) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%r9) |
| ; AVX1-ONLY-NEXT: addq $3288, %rsp # imm = 0xCD8 |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i64_stride5_vf64: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: subq $3240, %rsp # imm = 0xCA8 |
| ; AVX2-ONLY-NEXT: vmovdqa 896(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 864(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm4, (%rsp) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 576(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 544(%rdi), %ymm5 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %xmm6 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm6[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm3[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 512(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 480(%rdi), %xmm10 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm10[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm2[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 832(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 800(%rdi), %xmm11 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm11[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1216(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1184(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 1152(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1120(%rdi), %xmm12 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm12[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1536(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1504(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 1472(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1440(%rdi), %xmm13 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm13[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1856(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1824(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 1792(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1760(%rdi), %xmm14 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm14[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 2176(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 2144(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 2112(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 2080(%rdi), %xmm15 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm15[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2496(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2464(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 2432(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2400(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 416(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 384(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 352(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 320(%rdi), %xmm9 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm2 = xmm9[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 736(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 704(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 672(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 640(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm3 = xmm1[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1056(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1024(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm2[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 992(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 960(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm4 = xmm2[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1376(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1344(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm3[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 1312(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1280(%rdi), %xmm3 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm5 = xmm3[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1696(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1664(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm4[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 1632(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1600(%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm6 = xmm4[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 2016(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1984(%rdi), %ymm5 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm5[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 1952(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1920(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm7 = xmm5[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 2336(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 2304(%rdi), %ymm6 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm6[0,1,2,3,4,5],ymm0[6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 2272(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 2240(%rdi), %xmm6 |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm8 = xmm6[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 208(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm7 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 288(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm8 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 528(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm7 = xmm10[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 608(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm8 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 848(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm7 = xmm11[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 928(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vpalignr $8, (%rsp), %ymm0, %ymm8 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm8 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm0, %ymm10 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1168(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm7 = xmm12[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 1248(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm8 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1488(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm7 = xmm13[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 1568(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm8 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1808(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm7 = xmm14[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 1888(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm8 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 2128(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm7 = xmm15[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 2208(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm8 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm0, %ymm15 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 2448(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm7 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 2528(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm8 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 48(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm7 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm8 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm0, %ymm12 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 368(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm9[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 448(%rdi), %ymm8 |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm7 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm7 = mem[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm8, %ymm11 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 688(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 768(%rdi), %ymm7 |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm7, %ymm9 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1008(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm2[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 1088(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm2, %ymm8 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1328(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm3[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 1408(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm2, %ymm7 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1648(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm4[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 1728(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm2, %ymm4 |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 1968(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm5[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 2048(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqa 2288(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm6[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqa 2368(%rdi), %ymm14 |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = mem[8,9,10,11,12,13,14,15],ymm14[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm14[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 576(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 800(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 896(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1120(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 1216(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1440(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 1536(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1760(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 1856(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2080(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 2176(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2400(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 2496(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2240(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 2336(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1920(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 2016(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1600(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 1696(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1280(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 1376(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 960(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 1056(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 640(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 736(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] |
| ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm12[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm12[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm11[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm11[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 384(%rdi), %xmm12 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm12[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 544(%rdi), %xmm11 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 704(%rdi), %xmm13 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm10[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm10[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 864(%rdi), %xmm10 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 1024(%rdi), %xmm9 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 1184(%rdi), %xmm8 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 1344(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqu (%rsp), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 1504(%rdi), %xmm6 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 1664(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 1824(%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 1984(%rdi), %xmm3 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 2144(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm14[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm14[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 2304(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm15 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] |
| ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm0[0,1,0,3] |
| ; AVX2-ONLY-NEXT: vmovdqa 2464(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm14 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm14 = mem[0,1,2,3,4,5],ymm14[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm14 = mem[0,1,2,3,4,5],ymm14[6,7] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3] |
| ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm14 = mem[0,1,2,3,4,5],ymm14[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm12 = mem[0,1],xmm12[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm14[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm12 = mem[0,1,2,3,4,5],ymm12[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm11 = mem[0,1],xmm11[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm12 = mem[0,1,2,3,4,5],ymm12[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm13 = mem[0,1],xmm13[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm12[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm12 = mem[0,1,2,3,4,5],ymm12[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm10 = mem[0,1],xmm10[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm12[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm12 = mem[0,1,2,3,4,5],ymm12[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm9 = mem[0,1],xmm9[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm15 = ymm9[0,1,2,3],ymm12[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm9 = mem[0,1,2,3,4,5],ymm9[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm8 = mem[0,1],xmm8[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm12 = ymm8[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm8 = mem[0,1,2,3,4,5],ymm8[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm7 = mem[0,1],xmm7[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm14 = ymm7[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu (%rsp), %ymm7 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm7 = mem[0,1,2,3,4,5],ymm7[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm6 = mem[0,1],xmm6[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm9 = ymm6[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm6 = mem[0,1,2,3,4,5],ymm6[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm5 = mem[0,1],xmm5[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm6 = mem[0,1,2,3,4,5],ymm6[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm4 = mem[0,1],xmm4[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm4[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm4 = mem[0,1,2,3,4,5],ymm4[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm3 = mem[0,1],xmm3[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm3[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm3 = mem[0,1,2,3,4,5],ymm3[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm2 = mem[0,1],xmm2[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm2 = mem[0,1,2,3,4,5],ymm2[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm1 = mem[0,1],xmm1[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = mem[0,1,2,3,4,5],ymm1[6,7] |
| ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3] |
| ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 448(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 384(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 320(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 256(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 480(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 416(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 352(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 288(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rsi) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 448(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 384(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 320(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 256(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 480(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 416(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 352(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 288(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 256(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 320(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 384(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 448(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 480(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 416(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 352(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 288(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 480(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 448(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 416(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 384(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 352(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 320(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 288(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 256(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%r8) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm0, 480(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm3, 448(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm7, 416(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm4, 384(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm6, 352(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm5, 320(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm9, 288(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm14, 256(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm12, 224(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm15, 192(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm10, 160(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm13, 128(%r9) |
| ; AVX2-ONLY-NEXT: vmovdqa %ymm11, 96(%r9) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 64(%r9) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 32(%r9) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%r9) |
| ; AVX2-ONLY-NEXT: addq $3240, %rsp # imm = 0xCA8 |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512F-LABEL: load_i64_stride5_vf64: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: subq $3336, %rsp # imm = 0xD08 |
| ; AVX512F-NEXT: vmovdqa64 1728(%rdi), %zmm16 |
| ; AVX512F-NEXT: vmovdqa64 1792(%rdi), %zmm3 |
| ; AVX512F-NEXT: vmovdqa64 1408(%rdi), %zmm15 |
| ; AVX512F-NEXT: vmovdqa64 1088(%rdi), %zmm2 |
| ; AVX512F-NEXT: vmovdqa64 1152(%rdi), %zmm4 |
| ; AVX512F-NEXT: vmovdqa64 768(%rdi), %zmm19 |
| ; AVX512F-NEXT: vmovdqa64 832(%rdi), %zmm5 |
| ; AVX512F-NEXT: vmovdqa64 448(%rdi), %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 512(%rdi), %zmm6 |
| ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm1 |
| ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm7 |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [12,1,6,0,12,1,6,0] |
| ; AVX512F-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm8 |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm9, %zmm8 |
| ; AVX512F-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm6, %zmm8 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm9, %zmm8 |
| ; AVX512F-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm8 |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm9, %zmm8 |
| ; AVX512F-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm8 |
| ; AVX512F-NEXT: vpermt2q %zmm2, %zmm9, %zmm8 |
| ; AVX512F-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm10 |
| ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm8 |
| ; AVX512F-NEXT: vpermt2q %zmm16, %zmm9, %zmm10 |
| ; AVX512F-NEXT: vmovdqu64 %zmm10, (%rsp) # 64-byte Spill |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [5,10,15,0,5,10,15,0] |
| ; AVX512F-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm3 |
| ; AVX512F-NEXT: vpermt2q %zmm7, %zmm10, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm3 |
| ; AVX512F-NEXT: vpermt2q %zmm6, %zmm10, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 |
| ; AVX512F-NEXT: vpermt2q %zmm4, %zmm10, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm19, %zmm3 |
| ; AVX512F-NEXT: vpermt2q %zmm5, %zmm10, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm16, %zmm3 |
| ; AVX512F-NEXT: vpermt2q %zmm8, %zmm10, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [6,11,0,1,6,11,0,1] |
| ; AVX512F-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm3 |
| ; AVX512F-NEXT: vpermt2q %zmm7, %zmm11, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm3 |
| ; AVX512F-NEXT: vpermt2q %zmm6, %zmm11, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 |
| ; AVX512F-NEXT: vpermt2q %zmm4, %zmm11, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm19, %zmm3 |
| ; AVX512F-NEXT: vpermt2q %zmm5, %zmm11, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm16, %zmm3 |
| ; AVX512F-NEXT: vpermt2q %zmm8, %zmm11, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [7,12,0,2,7,12,0,2] |
| ; AVX512F-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm3 |
| ; AVX512F-NEXT: vpermt2q %zmm7, %zmm12, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [0,5,0,11,0,5,0,11] |
| ; AVX512F-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm3, %zmm7 |
| ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1 |
| ; AVX512F-NEXT: vpermt2q %zmm6, %zmm12, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm3, %zmm6 |
| ; AVX512F-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0 |
| ; AVX512F-NEXT: vpermt2q %zmm4, %zmm12, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm2, %zmm3, %zmm4 |
| ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm19, %zmm0 |
| ; AVX512F-NEXT: vpermt2q %zmm5, %zmm12, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm3, %zmm5 |
| ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm16, %zmm0 |
| ; AVX512F-NEXT: vpermt2q %zmm8, %zmm12, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm16, %zmm3, %zmm8 |
| ; AVX512F-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 1472(%rdi), %zmm1 |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm18 |
| ; AVX512F-NEXT: vpermt2q %zmm15, %zmm9, %zmm18 |
| ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm0 |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm10, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm0 |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm11, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm0 |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm12, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm15, %zmm3, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 2048(%rdi), %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 2112(%rdi), %zmm5 |
| ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm19 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm9, %zmm19 |
| ; AVX512F-NEXT: vmovdqa64 2368(%rdi), %zmm1 |
| ; AVX512F-NEXT: vmovdqa64 2432(%rdi), %zmm6 |
| ; AVX512F-NEXT: vpermi2q %zmm1, %zmm6, %zmm9 |
| ; AVX512F-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm5, %zmm10, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermi2q %zmm6, %zmm1, %zmm10 |
| ; AVX512F-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm5, %zmm11, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermi2q %zmm6, %zmm1, %zmm11 |
| ; AVX512F-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm5, %zmm12, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermi2q %zmm6, %zmm1, %zmm12 |
| ; AVX512F-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm3, %zmm6 |
| ; AVX512F-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm3, %zmm5 |
| ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm31 |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm1 = [0,5,10,15] |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm5 |
| ; AVX512F-NEXT: vpermt2q %zmm31, %zmm1, %zmm5 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm28 = <1,6,11,u> |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm31, %zmm28, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = <2,7,12,u> |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm31, %zmm8, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm11 = <11,0,5,u> |
| ; AVX512F-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm11, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm15 = <12,1,6,u> |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm15, %zmm31 |
| ; AVX512F-NEXT: vmovdqa64 384(%rdi), %zmm27 |
| ; AVX512F-NEXT: vmovdqa64 320(%rdi), %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm17 |
| ; AVX512F-NEXT: vpermt2q %zmm27, %zmm1, %zmm17 |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm27, %zmm28, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm27, %zmm8, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm27, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm11, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm15, %zmm27 |
| ; AVX512F-NEXT: vmovdqa64 1024(%rdi), %zmm26 |
| ; AVX512F-NEXT: vmovdqa64 960(%rdi), %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm3 |
| ; AVX512F-NEXT: vpermt2q %zmm26, %zmm1, %zmm3 |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm26, %zmm28, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm26, %zmm8, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm26, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm11, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm15, %zmm26 |
| ; AVX512F-NEXT: vmovdqa64 704(%rdi), %zmm30 |
| ; AVX512F-NEXT: vmovdqa64 640(%rdi), %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm14 |
| ; AVX512F-NEXT: vpermt2q %zmm30, %zmm1, %zmm14 |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm30, %zmm28, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm30, %zmm8, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm30, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm11, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm15, %zmm30 |
| ; AVX512F-NEXT: vmovdqa64 1664(%rdi), %zmm24 |
| ; AVX512F-NEXT: vmovdqa64 1600(%rdi), %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm12 |
| ; AVX512F-NEXT: vpermt2q %zmm24, %zmm1, %zmm12 |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm24, %zmm28, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm25 |
| ; AVX512F-NEXT: vpermt2q %zmm24, %zmm8, %zmm25 |
| ; AVX512F-NEXT: vmovdqa64 %zmm24, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm11, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm15, %zmm24 |
| ; AVX512F-NEXT: vmovdqa64 1344(%rdi), %zmm13 |
| ; AVX512F-NEXT: vmovdqa64 1280(%rdi), %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm4 |
| ; AVX512F-NEXT: vpermt2q %zmm13, %zmm1, %zmm4 |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm6 |
| ; AVX512F-NEXT: vpermt2q %zmm13, %zmm28, %zmm6 |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm22 |
| ; AVX512F-NEXT: vpermt2q %zmm13, %zmm8, %zmm22 |
| ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm23 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm11, %zmm23 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm15, %zmm13 |
| ; AVX512F-NEXT: vmovdqa64 1984(%rdi), %zmm10 |
| ; AVX512F-NEXT: vmovdqa64 1920(%rdi), %zmm9 |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm29 |
| ; AVX512F-NEXT: vpermt2q %zmm10, %zmm1, %zmm29 |
| ; AVX512F-NEXT: vmovdqa64 2304(%rdi), %zmm7 |
| ; AVX512F-NEXT: vmovdqa64 2240(%rdi), %zmm0 |
| ; AVX512F-NEXT: vpermi2q %zmm7, %zmm0, %zmm1 |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm16 |
| ; AVX512F-NEXT: vpermt2q %zmm10, %zmm28, %zmm16 |
| ; AVX512F-NEXT: vpermi2q %zmm7, %zmm0, %zmm28 |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm20 |
| ; AVX512F-NEXT: vpermt2q %zmm10, %zmm8, %zmm20 |
| ; AVX512F-NEXT: vpermi2q %zmm7, %zmm0, %zmm8 |
| ; AVX512F-NEXT: vmovdqa64 %zmm10, %zmm21 |
| ; AVX512F-NEXT: vpermt2q %zmm9, %zmm11, %zmm21 |
| ; AVX512F-NEXT: vpermi2q %zmm0, %zmm7, %zmm11 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm15, %zmm7 |
| ; AVX512F-NEXT: vpermt2q %zmm9, %zmm15, %zmm10 |
| ; AVX512F-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm9 # 64-byte Folded Reload |
| ; AVX512F-NEXT: # zmm9 = zmm5[0,1,2,3],mem[4,5,6,7] |
| ; AVX512F-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm2 # 64-byte Folded Reload |
| ; AVX512F-NEXT: # zmm2 = zmm17[0,1,2,3],mem[4,5,6,7] |
| ; AVX512F-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14, %zmm14 # 64-byte Folded Reload |
| ; AVX512F-NEXT: # zmm14 = zmm14[0,1,2,3],mem[4,5,6,7] |
| ; AVX512F-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload |
| ; AVX512F-NEXT: # zmm3 = zmm3[0,1,2,3],mem[4,5,6,7] |
| ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm18[4,5,6,7] |
| ; AVX512F-NEXT: vshufi64x2 $228, (%rsp), %zmm12, %zmm5 # 64-byte Folded Reload |
| ; AVX512F-NEXT: # zmm5 = zmm12[0,1,2,3],mem[4,5,6,7] |
| ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm29[0,1,2,3],zmm19[4,5,6,7] |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 # 64-byte Folded Reload |
| ; AVX512F-NEXT: # zmm0 = zmm1[0,1,2,3],mem[4,5,6,7] |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 256(%rdi), %zmm29 |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,5,6,11] |
| ; AVX512F-NEXT: vpermt2q %zmm29, %zmm0, %zmm9 |
| ; AVX512F-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 576(%rdi), %zmm15 |
| ; AVX512F-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 896(%rdi), %zmm18 |
| ; AVX512F-NEXT: vpermt2q %zmm18, %zmm0, %zmm14 |
| ; AVX512F-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 1216(%rdi), %zmm14 |
| ; AVX512F-NEXT: vpermt2q %zmm14, %zmm0, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 %zmm3, (%rsp) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 1536(%rdi), %zmm17 |
| ; AVX512F-NEXT: vpermt2q %zmm17, %zmm0, %zmm4 |
| ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 1856(%rdi), %zmm12 |
| ; AVX512F-NEXT: vpermt2q %zmm12, %zmm0, %zmm5 |
| ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 2176(%rdi), %zmm19 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm0, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 2496(%rdi), %zmm9 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vpermt2q %zmm9, %zmm0, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: movb $7, %al |
| ; AVX512F-NEXT: kmovw %eax, %k1 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm16, %zmm5 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm16 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm2 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm3 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm6, %zmm4 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm28, %zmm6 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm28 = [0,1,2,3,4,5,6,12] |
| ; AVX512F-NEXT: vpermt2q %zmm29, %zmm28, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm28, %zmm5 |
| ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm15, %zmm28, %zmm16 |
| ; AVX512F-NEXT: vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm14, %zmm28, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm18, %zmm28, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm12, %zmm28, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm17, %zmm28, %zmm4 |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm16 |
| ; AVX512F-NEXT: vpermt2q %zmm9, %zmm28, %zmm6 |
| ; AVX512F-NEXT: movb $56, %al |
| ; AVX512F-NEXT: kmovw %eax, %k1 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm4 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm20 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm3 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm25 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm22 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm8 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,5,8,13] |
| ; AVX512F-NEXT: vpermt2q %zmm29, %zmm0, %zmm4 |
| ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm0, %zmm20 |
| ; AVX512F-NEXT: vpermt2q %zmm15, %zmm0, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm14, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm18, %zmm0, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm12, %zmm0, %zmm25 |
| ; AVX512F-NEXT: vpermt2q %zmm17, %zmm0, %zmm22 |
| ; AVX512F-NEXT: vpermt2q %zmm9, %zmm0, %zmm8 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm28 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm21 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm5 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm3 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm4 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm23 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm11 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,5,9,14] |
| ; AVX512F-NEXT: vpermt2q %zmm29, %zmm0, %zmm28 |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm0, %zmm21 |
| ; AVX512F-NEXT: vpermt2q %zmm15, %zmm0, %zmm5 |
| ; AVX512F-NEXT: vpermt2q %zmm14, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm18, %zmm0, %zmm3 |
| ; AVX512F-NEXT: vpermt2q %zmm12, %zmm0, %zmm4 |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm1 |
| ; AVX512F-NEXT: vpermt2q %zmm17, %zmm0, %zmm23 |
| ; AVX512F-NEXT: vpermt2q %zmm9, %zmm0, %zmm11 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm31 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,5,10,15] |
| ; AVX512F-NEXT: vpermt2q %zmm29, %zmm0, %zmm31 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm27 {%k1} |
| ; AVX512F-NEXT: vpermt2q %zmm15, %zmm0, %zmm27 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm26 {%k1} |
| ; AVX512F-NEXT: vpermt2q %zmm14, %zmm0, %zmm26 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm30 {%k1} |
| ; AVX512F-NEXT: vpermt2q %zmm18, %zmm0, %zmm30 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm24 {%k1} |
| ; AVX512F-NEXT: vpermt2q %zmm12, %zmm0, %zmm24 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm13 {%k1} |
| ; AVX512F-NEXT: vpermt2q %zmm17, %zmm0, %zmm13 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm7 {%k1} |
| ; AVX512F-NEXT: vpermt2q %zmm9, %zmm0, %zmm7 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm10 {%k1} |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm0, %zmm10 |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, 448(%rsi) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, 384(%rsi) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, 320(%rsi) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, 256(%rsi) |
| ; AVX512F-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, 192(%rsi) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, 128(%rsi) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, 64(%rsi) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, (%rsi) |
| ; AVX512F-NEXT: vmovdqa64 %zmm6, 448(%rdx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm16, 256(%rdx) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, 320(%rdx) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, 128(%rdx) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, 192(%rdx) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, (%rdx) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, 64(%rdx) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, 384(%rdx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm8, 448(%rcx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm22, 256(%rcx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm25, 320(%rcx) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, 128(%rcx) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, 192(%rcx) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, (%rcx) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, 64(%rcx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm20, 384(%rcx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm11, 448(%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm23, 256(%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, 320(%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm3, 128(%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm2, 192(%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm28, (%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm5, 64(%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm21, 384(%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm10, 384(%r9) |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, 448(%r9) |
| ; AVX512F-NEXT: vmovdqa64 %zmm13, 256(%r9) |
| ; AVX512F-NEXT: vmovdqa64 %zmm24, 320(%r9) |
| ; AVX512F-NEXT: vmovdqa64 %zmm30, 128(%r9) |
| ; AVX512F-NEXT: vmovdqa64 %zmm26, 192(%r9) |
| ; AVX512F-NEXT: vmovdqa64 %zmm31, (%r9) |
| ; AVX512F-NEXT: vmovdqa64 %zmm27, 64(%r9) |
| ; AVX512F-NEXT: addq $3336, %rsp # imm = 0xD08 |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: load_i64_stride5_vf64: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: subq $3336, %rsp # imm = 0xD08 |
| ; AVX512BW-NEXT: vmovdqa64 1728(%rdi), %zmm16 |
| ; AVX512BW-NEXT: vmovdqa64 1792(%rdi), %zmm3 |
| ; AVX512BW-NEXT: vmovdqa64 1408(%rdi), %zmm15 |
| ; AVX512BW-NEXT: vmovdqa64 1088(%rdi), %zmm2 |
| ; AVX512BW-NEXT: vmovdqa64 1152(%rdi), %zmm4 |
| ; AVX512BW-NEXT: vmovdqa64 768(%rdi), %zmm19 |
| ; AVX512BW-NEXT: vmovdqa64 832(%rdi), %zmm5 |
| ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %zmm6 |
| ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm1 |
| ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm7 |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [12,1,6,0,12,1,6,0] |
| ; AVX512BW-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm8 |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm9, %zmm8 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm8 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm9, %zmm8 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm8 |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm9, %zmm8 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm8 |
| ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm9, %zmm8 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm10 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm8 |
| ; AVX512BW-NEXT: vpermt2q %zmm16, %zmm9, %zmm10 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm10, (%rsp) # 64-byte Spill |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [5,10,15,0,5,10,15,0] |
| ; AVX512BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm3 |
| ; AVX512BW-NEXT: vpermt2q %zmm7, %zmm10, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm3 |
| ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm10, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm3 |
| ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm10, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm19, %zmm3 |
| ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm10, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm16, %zmm3 |
| ; AVX512BW-NEXT: vpermt2q %zmm8, %zmm10, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [6,11,0,1,6,11,0,1] |
| ; AVX512BW-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm3 |
| ; AVX512BW-NEXT: vpermt2q %zmm7, %zmm11, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm3 |
| ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm11, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm3 |
| ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm11, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm19, %zmm3 |
| ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm11, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm16, %zmm3 |
| ; AVX512BW-NEXT: vpermt2q %zmm8, %zmm11, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [7,12,0,2,7,12,0,2] |
| ; AVX512BW-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm3 |
| ; AVX512BW-NEXT: vpermt2q %zmm7, %zmm12, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [0,5,0,11,0,5,0,11] |
| ; AVX512BW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm3, %zmm7 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1 |
| ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm12, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm3, %zmm6 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0 |
| ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm12, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm3, %zmm4 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm19, %zmm0 |
| ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm12, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm3, %zmm5 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm16, %zmm0 |
| ; AVX512BW-NEXT: vpermt2q %zmm8, %zmm12, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm16, %zmm3, %zmm8 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 1472(%rdi), %zmm1 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm18 |
| ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm9, %zmm18 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm0 |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm10, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm0 |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm11, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm0 |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm12, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm3, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 2048(%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 2112(%rdi), %zmm5 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm19 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm9, %zmm19 |
| ; AVX512BW-NEXT: vmovdqa64 2368(%rdi), %zmm1 |
| ; AVX512BW-NEXT: vmovdqa64 2432(%rdi), %zmm6 |
| ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm6, %zmm9 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm10, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermi2q %zmm6, %zmm1, %zmm10 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm11, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermi2q %zmm6, %zmm1, %zmm11 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm12, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermi2q %zmm6, %zmm1, %zmm12 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm3, %zmm6 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm3, %zmm5 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm31 |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,5,10,15] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm5 |
| ; AVX512BW-NEXT: vpermt2q %zmm31, %zmm1, %zmm5 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm28 = <1,6,11,u> |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm31, %zmm28, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm8 = <2,7,12,u> |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm31, %zmm8, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm11 = <11,0,5,u> |
| ; AVX512BW-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm11, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm15 = <12,1,6,u> |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm15, %zmm31 |
| ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm27 |
| ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm17 |
| ; AVX512BW-NEXT: vpermt2q %zmm27, %zmm1, %zmm17 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm27, %zmm28, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm27, %zmm8, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm27, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm11, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm15, %zmm27 |
| ; AVX512BW-NEXT: vmovdqa64 1024(%rdi), %zmm26 |
| ; AVX512BW-NEXT: vmovdqa64 960(%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm3 |
| ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm1, %zmm3 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm28, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm8, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm26, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm11, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm15, %zmm26 |
| ; AVX512BW-NEXT: vmovdqa64 704(%rdi), %zmm30 |
| ; AVX512BW-NEXT: vmovdqa64 640(%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm14 |
| ; AVX512BW-NEXT: vpermt2q %zmm30, %zmm1, %zmm14 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm30, %zmm28, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm30, %zmm8, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm30, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm11, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm15, %zmm30 |
| ; AVX512BW-NEXT: vmovdqa64 1664(%rdi), %zmm24 |
| ; AVX512BW-NEXT: vmovdqa64 1600(%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm12 |
| ; AVX512BW-NEXT: vpermt2q %zmm24, %zmm1, %zmm12 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm24, %zmm28, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm25 |
| ; AVX512BW-NEXT: vpermt2q %zmm24, %zmm8, %zmm25 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm24, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm11, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm15, %zmm24 |
| ; AVX512BW-NEXT: vmovdqa64 1344(%rdi), %zmm13 |
| ; AVX512BW-NEXT: vmovdqa64 1280(%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm4 |
| ; AVX512BW-NEXT: vpermt2q %zmm13, %zmm1, %zmm4 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm6 |
| ; AVX512BW-NEXT: vpermt2q %zmm13, %zmm28, %zmm6 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm22 |
| ; AVX512BW-NEXT: vpermt2q %zmm13, %zmm8, %zmm22 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm23 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm11, %zmm23 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm15, %zmm13 |
| ; AVX512BW-NEXT: vmovdqa64 1984(%rdi), %zmm10 |
| ; AVX512BW-NEXT: vmovdqa64 1920(%rdi), %zmm9 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm29 |
| ; AVX512BW-NEXT: vpermt2q %zmm10, %zmm1, %zmm29 |
| ; AVX512BW-NEXT: vmovdqa64 2304(%rdi), %zmm7 |
| ; AVX512BW-NEXT: vmovdqa64 2240(%rdi), %zmm0 |
| ; AVX512BW-NEXT: vpermi2q %zmm7, %zmm0, %zmm1 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm16 |
| ; AVX512BW-NEXT: vpermt2q %zmm10, %zmm28, %zmm16 |
| ; AVX512BW-NEXT: vpermi2q %zmm7, %zmm0, %zmm28 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm20 |
| ; AVX512BW-NEXT: vpermt2q %zmm10, %zmm8, %zmm20 |
| ; AVX512BW-NEXT: vpermi2q %zmm7, %zmm0, %zmm8 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm21 |
| ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm11, %zmm21 |
| ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm7, %zmm11 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm15, %zmm7 |
| ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm15, %zmm10 |
| ; AVX512BW-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm9 # 64-byte Folded Reload |
| ; AVX512BW-NEXT: # zmm9 = zmm5[0,1,2,3],mem[4,5,6,7] |
| ; AVX512BW-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm2 # 64-byte Folded Reload |
| ; AVX512BW-NEXT: # zmm2 = zmm17[0,1,2,3],mem[4,5,6,7] |
| ; AVX512BW-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14, %zmm14 # 64-byte Folded Reload |
| ; AVX512BW-NEXT: # zmm14 = zmm14[0,1,2,3],mem[4,5,6,7] |
| ; AVX512BW-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload |
| ; AVX512BW-NEXT: # zmm3 = zmm3[0,1,2,3],mem[4,5,6,7] |
| ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm18[4,5,6,7] |
| ; AVX512BW-NEXT: vshufi64x2 $228, (%rsp), %zmm12, %zmm5 # 64-byte Folded Reload |
| ; AVX512BW-NEXT: # zmm5 = zmm12[0,1,2,3],mem[4,5,6,7] |
| ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm29[0,1,2,3],zmm19[4,5,6,7] |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 # 64-byte Folded Reload |
| ; AVX512BW-NEXT: # zmm0 = zmm1[0,1,2,3],mem[4,5,6,7] |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm29 |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,5,6,11] |
| ; AVX512BW-NEXT: vpermt2q %zmm29, %zmm0, %zmm9 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %zmm15 |
| ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 896(%rdi), %zmm18 |
| ; AVX512BW-NEXT: vpermt2q %zmm18, %zmm0, %zmm14 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 1216(%rdi), %zmm14 |
| ; AVX512BW-NEXT: vpermt2q %zmm14, %zmm0, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm3, (%rsp) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 1536(%rdi), %zmm17 |
| ; AVX512BW-NEXT: vpermt2q %zmm17, %zmm0, %zmm4 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 1856(%rdi), %zmm12 |
| ; AVX512BW-NEXT: vpermt2q %zmm12, %zmm0, %zmm5 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 2176(%rdi), %zmm19 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm0, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 2496(%rdi), %zmm9 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm0, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: movb $7, %al |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm16, %zmm5 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm16 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm2 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm3 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm4 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm28, %zmm6 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm28 = [0,1,2,3,4,5,6,12] |
| ; AVX512BW-NEXT: vpermt2q %zmm29, %zmm28, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm28, %zmm5 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm28, %zmm16 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm14, %zmm28, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm18, %zmm28, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm12, %zmm28, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm17, %zmm28, %zmm4 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm16 |
| ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm28, %zmm6 |
| ; AVX512BW-NEXT: movb $56, %al |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm4 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm20 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm3 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm25 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm22 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm8 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,5,8,13] |
| ; AVX512BW-NEXT: vpermt2q %zmm29, %zmm0, %zmm4 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm0, %zmm20 |
| ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm0, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm14, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm18, %zmm0, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm12, %zmm0, %zmm25 |
| ; AVX512BW-NEXT: vpermt2q %zmm17, %zmm0, %zmm22 |
| ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm0, %zmm8 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm28 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm21 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm5 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm3 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm4 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm23 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm11 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,5,9,14] |
| ; AVX512BW-NEXT: vpermt2q %zmm29, %zmm0, %zmm28 |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm0, %zmm21 |
| ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm0, %zmm5 |
| ; AVX512BW-NEXT: vpermt2q %zmm14, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm18, %zmm0, %zmm3 |
| ; AVX512BW-NEXT: vpermt2q %zmm12, %zmm0, %zmm4 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm1 |
| ; AVX512BW-NEXT: vpermt2q %zmm17, %zmm0, %zmm23 |
| ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm0, %zmm11 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm31 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,5,10,15] |
| ; AVX512BW-NEXT: vpermt2q %zmm29, %zmm0, %zmm31 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm27 {%k1} |
| ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm0, %zmm27 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm26 {%k1} |
| ; AVX512BW-NEXT: vpermt2q %zmm14, %zmm0, %zmm26 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm30 {%k1} |
| ; AVX512BW-NEXT: vpermt2q %zmm18, %zmm0, %zmm30 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm24 {%k1} |
| ; AVX512BW-NEXT: vpermt2q %zmm12, %zmm0, %zmm24 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm13 {%k1} |
| ; AVX512BW-NEXT: vpermt2q %zmm17, %zmm0, %zmm13 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm7 {%k1} |
| ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm0, %zmm7 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm10 {%k1} |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm0, %zmm10 |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, 448(%rsi) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, 384(%rsi) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, 320(%rsi) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, 256(%rsi) |
| ; AVX512BW-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, 192(%rsi) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, 128(%rsi) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, 64(%rsi) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, (%rsi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm6, 448(%rdx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm16, 256(%rdx) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, 320(%rdx) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, 128(%rdx) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, 192(%rdx) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, (%rdx) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, 64(%rdx) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, 384(%rdx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm8, 448(%rcx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm22, 256(%rcx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm25, 320(%rcx) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, 128(%rcx) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, 192(%rcx) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, (%rcx) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, 64(%rcx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm20, 384(%rcx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm11, 448(%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm23, 256(%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, 320(%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm3, 128(%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, 192(%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm28, (%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, 64(%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm21, 384(%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm10, 384(%r9) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, 448(%r9) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm13, 256(%r9) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm24, 320(%r9) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm30, 128(%r9) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm26, 192(%r9) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm31, (%r9) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm27, 64(%r9) |
| ; AVX512BW-NEXT: addq $3336, %rsp # imm = 0xD08 |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %wide.vec = load <320 x i64>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <320 x i64> %wide.vec, <320 x i64> poison, <64 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35, i32 40, i32 45, i32 50, i32 55, i32 60, i32 65, i32 70, i32 75, i32 80, i32 85, i32 90, i32 95, i32 100, i32 105, i32 110, i32 115, i32 120, i32 125, i32 130, i32 135, i32 140, i32 145, i32 150, i32 155, i32 160, i32 165, i32 170, i32 175, i32 180, i32 185, i32 190, i32 195, i32 200, i32 205, i32 210, i32 215, i32 220, i32 225, i32 230, i32 235, i32 240, i32 245, i32 250, i32 255, i32 260, i32 265, i32 270, i32 275, i32 280, i32 285, i32 290, i32 295, i32 300, i32 305, i32 310, i32 315> |
| %strided.vec1 = shufflevector <320 x i64> %wide.vec, <320 x i64> poison, <64 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36, i32 41, i32 46, i32 51, i32 56, i32 61, i32 66, i32 71, i32 76, i32 81, i32 86, i32 91, i32 96, i32 101, i32 106, i32 111, i32 116, i32 121, i32 126, i32 131, i32 136, i32 141, i32 146, i32 151, i32 156, i32 161, i32 166, i32 171, i32 176, i32 181, i32 186, i32 191, i32 196, i32 201, i32 206, i32 211, i32 216, i32 221, i32 226, i32 231, i32 236, i32 241, i32 246, i32 251, i32 256, i32 261, i32 266, i32 271, i32 276, i32 281, i32 286, i32 291, i32 296, i32 301, i32 306, i32 311, i32 316> |
| %strided.vec2 = shufflevector <320 x i64> %wide.vec, <320 x i64> poison, <64 x i32> <i32 2, i32 7, i32 12, i32 17, i32 22, i32 27, i32 32, i32 37, i32 42, i32 47, i32 52, i32 57, i32 62, i32 67, i32 72, i32 77, i32 82, i32 87, i32 92, i32 97, i32 102, i32 107, i32 112, i32 117, i32 122, i32 127, i32 132, i32 137, i32 142, i32 147, i32 152, i32 157, i32 162, i32 167, i32 172, i32 177, i32 182, i32 187, i32 192, i32 197, i32 202, i32 207, i32 212, i32 217, i32 222, i32 227, i32 232, i32 237, i32 242, i32 247, i32 252, i32 257, i32 262, i32 267, i32 272, i32 277, i32 282, i32 287, i32 292, i32 297, i32 302, i32 307, i32 312, i32 317> |
| %strided.vec3 = shufflevector <320 x i64> %wide.vec, <320 x i64> poison, <64 x i32> <i32 3, i32 8, i32 13, i32 18, i32 23, i32 28, i32 33, i32 38, i32 43, i32 48, i32 53, i32 58, i32 63, i32 68, i32 73, i32 78, i32 83, i32 88, i32 93, i32 98, i32 103, i32 108, i32 113, i32 118, i32 123, i32 128, i32 133, i32 138, i32 143, i32 148, i32 153, i32 158, i32 163, i32 168, i32 173, i32 178, i32 183, i32 188, i32 193, i32 198, i32 203, i32 208, i32 213, i32 218, i32 223, i32 228, i32 233, i32 238, i32 243, i32 248, i32 253, i32 258, i32 263, i32 268, i32 273, i32 278, i32 283, i32 288, i32 293, i32 298, i32 303, i32 308, i32 313, i32 318> |
| %strided.vec4 = shufflevector <320 x i64> %wide.vec, <320 x i64> poison, <64 x i32> <i32 4, i32 9, i32 14, i32 19, i32 24, i32 29, i32 34, i32 39, i32 44, i32 49, i32 54, i32 59, i32 64, i32 69, i32 74, i32 79, i32 84, i32 89, i32 94, i32 99, i32 104, i32 109, i32 114, i32 119, i32 124, i32 129, i32 134, i32 139, i32 144, i32 149, i32 154, i32 159, i32 164, i32 169, i32 174, i32 179, i32 184, i32 189, i32 194, i32 199, i32 204, i32 209, i32 214, i32 219, i32 224, i32 229, i32 234, i32 239, i32 244, i32 249, i32 254, i32 259, i32 264, i32 269, i32 274, i32 279, i32 284, i32 289, i32 294, i32 299, i32 304, i32 309, i32 314, i32 319> |
| store <64 x i64> %strided.vec0, ptr %out.vec0, align 64 |
| store <64 x i64> %strided.vec1, ptr %out.vec1, align 64 |
| store <64 x i64> %strided.vec2, ptr %out.vec2, align 64 |
| store <64 x i64> %strided.vec3, ptr %out.vec3, align 64 |
| store <64 x i64> %strided.vec4, ptr %out.vec4, align 64 |
| ret void |
| } |
| ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
| ; AVX: {{.*}} |
| ; AVX1: {{.*}} |
| ; AVX2: {{.*}} |
| ; AVX2-FAST: {{.*}} |
| ; AVX2-FAST-PERLANE: {{.*}} |
| ; AVX2-SLOW: {{.*}} |
| ; AVX512BW-ONLY-FAST: {{.*}} |
| ; AVX512BW-ONLY-SLOW: {{.*}} |
| ; AVX512DQ-FAST: {{.*}} |
| ; AVX512DQ-SLOW: {{.*}} |
| ; AVX512DQBW-FAST: {{.*}} |
| ; AVX512DQBW-SLOW: {{.*}} |
| ; AVX512F-ONLY-FAST: {{.*}} |
| ; AVX512F-ONLY-SLOW: {{.*}} |
| ; FALLBACK0: {{.*}} |
| ; FALLBACK1: {{.*}} |
| ; FALLBACK10: {{.*}} |
| ; FALLBACK11: {{.*}} |
| ; FALLBACK12: {{.*}} |
| ; FALLBACK2: {{.*}} |
| ; FALLBACK3: {{.*}} |
| ; FALLBACK4: {{.*}} |
| ; FALLBACK5: {{.*}} |
| ; FALLBACK6: {{.*}} |
| ; FALLBACK7: {{.*}} |
| ; FALLBACK8: {{.*}} |
| ; FALLBACK9: {{.*}} |