| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12 |
| |
| ; These patterns are produced by LoopVectorizer for interleaved loads. |
| |
| define void @load_i64_stride8_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5, ptr %out.vec6, ptr %out.vec7) nounwind { |
| ; SSE-LABEL: load_i64_stride8_vf2: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r11 |
| ; SSE-NEXT: movaps 112(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 96(%rdi), %xmm1 |
| ; SSE-NEXT: movaps 80(%rdi), %xmm2 |
| ; SSE-NEXT: movaps 64(%rdi), %xmm3 |
| ; SSE-NEXT: movaps (%rdi), %xmm4 |
| ; SSE-NEXT: movaps 16(%rdi), %xmm5 |
| ; SSE-NEXT: movaps 32(%rdi), %xmm6 |
| ; SSE-NEXT: movaps 48(%rdi), %xmm7 |
| ; SSE-NEXT: movaps %xmm4, %xmm8 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm3[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1] |
| ; SSE-NEXT: movaps %xmm5, %xmm3 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm2[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm6, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm1[1] |
| ; SSE-NEXT: movaps %xmm7, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm8, (%rsi) |
| ; SSE-NEXT: movaps %xmm4, (%rdx) |
| ; SSE-NEXT: movaps %xmm3, (%rcx) |
| ; SSE-NEXT: movaps %xmm5, (%r8) |
| ; SSE-NEXT: movaps %xmm2, (%r9) |
| ; SSE-NEXT: movaps %xmm6, (%r11) |
| ; SSE-NEXT: movaps %xmm1, (%r10) |
| ; SSE-NEXT: movaps %xmm7, (%rax) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i64_stride8_vf2: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %r11 |
| ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps 80(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm4 = xmm3[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm3[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm6 = xmm5[0],xmm3[0] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm5[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovaps 48(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm8 = xmm7[0],xmm5[0] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm7[1],xmm5[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm4, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, (%r8) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, (%r9) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, (%r11) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, (%r10) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, (%rax) |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i64_stride8_vf2: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %r11 |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %xmm6 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm7 = xmm4[0],xmm6[0] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm4[1],xmm6[1] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm3[0],ymm2[0],ymm3[2],ymm2[2] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm3[1],ymm2[1],ymm3[3],ymm2[3] |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %xmm3 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm8 = xmm5[0],xmm3[0] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm5[1],xmm3[1] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm7, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps %xmm4, (%rdx) |
| ; AVX2-ONLY-NEXT: vextractf128 $1, %ymm6, (%rcx) |
| ; AVX2-ONLY-NEXT: vextractf128 $1, %ymm2, (%r8) |
| ; AVX2-ONLY-NEXT: vmovaps %xmm8, (%r9) |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, (%r11) |
| ; AVX2-ONLY-NEXT: vextractf128 $1, %ymm5, (%r10) |
| ; AVX2-ONLY-NEXT: vextractf128 $1, %ymm0, (%rax) |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512-LABEL: load_i64_stride8_vf2: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11 |
| ; AVX512-NEXT: vmovaps 64(%rdi), %xmm0 |
| ; AVX512-NEXT: vmovaps (%rdi), %xmm1 |
| ; AVX512-NEXT: vmovaps 16(%rdi), %xmm2 |
| ; AVX512-NEXT: vmovaps 32(%rdi), %xmm3 |
| ; AVX512-NEXT: vmovaps 48(%rdi), %xmm4 |
| ; AVX512-NEXT: vmovlhps {{.*#+}} xmm5 = xmm1[0],xmm0[0] |
| ; AVX512-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX512-NEXT: vmovaps 80(%rdi), %xmm1 |
| ; AVX512-NEXT: vmovlhps {{.*#+}} xmm6 = xmm2[0],xmm1[0] |
| ; AVX512-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm1[1] |
| ; AVX512-NEXT: vmovaps 96(%rdi), %xmm2 |
| ; AVX512-NEXT: vmovlhps {{.*#+}} xmm7 = xmm3[0],xmm2[0] |
| ; AVX512-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm3[1],xmm2[1] |
| ; AVX512-NEXT: vmovaps 112(%rdi), %xmm3 |
| ; AVX512-NEXT: vmovlhps {{.*#+}} xmm8 = xmm4[0],xmm3[0] |
| ; AVX512-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm4[1],xmm3[1] |
| ; AVX512-NEXT: vmovaps %xmm5, (%rsi) |
| ; AVX512-NEXT: vmovaps %xmm0, (%rdx) |
| ; AVX512-NEXT: vmovaps %xmm6, (%rcx) |
| ; AVX512-NEXT: vmovaps %xmm1, (%r8) |
| ; AVX512-NEXT: vmovaps %xmm7, (%r9) |
| ; AVX512-NEXT: vmovaps %xmm2, (%r11) |
| ; AVX512-NEXT: vmovaps %xmm8, (%r10) |
| ; AVX512-NEXT: vmovaps %xmm3, (%rax) |
| ; AVX512-NEXT: retq |
| %wide.vec = load <16 x i64>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <16 x i64> %wide.vec, <16 x i64> poison, <2 x i32> <i32 0, i32 8> |
| %strided.vec1 = shufflevector <16 x i64> %wide.vec, <16 x i64> poison, <2 x i32> <i32 1, i32 9> |
| %strided.vec2 = shufflevector <16 x i64> %wide.vec, <16 x i64> poison, <2 x i32> <i32 2, i32 10> |
| %strided.vec3 = shufflevector <16 x i64> %wide.vec, <16 x i64> poison, <2 x i32> <i32 3, i32 11> |
| %strided.vec4 = shufflevector <16 x i64> %wide.vec, <16 x i64> poison, <2 x i32> <i32 4, i32 12> |
| %strided.vec5 = shufflevector <16 x i64> %wide.vec, <16 x i64> poison, <2 x i32> <i32 5, i32 13> |
| %strided.vec6 = shufflevector <16 x i64> %wide.vec, <16 x i64> poison, <2 x i32> <i32 6, i32 14> |
| %strided.vec7 = shufflevector <16 x i64> %wide.vec, <16 x i64> poison, <2 x i32> <i32 7, i32 15> |
| store <2 x i64> %strided.vec0, ptr %out.vec0, align 64 |
| store <2 x i64> %strided.vec1, ptr %out.vec1, align 64 |
| store <2 x i64> %strided.vec2, ptr %out.vec2, align 64 |
| store <2 x i64> %strided.vec3, ptr %out.vec3, align 64 |
| store <2 x i64> %strided.vec4, ptr %out.vec4, align 64 |
| store <2 x i64> %strided.vec5, ptr %out.vec5, align 64 |
| store <2 x i64> %strided.vec6, ptr %out.vec6, align 64 |
| store <2 x i64> %strided.vec7, ptr %out.vec7, align 64 |
| ret void |
| } |
| |
| define void @load_i64_stride8_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5, ptr %out.vec6, ptr %out.vec7) nounwind { |
| ; SSE-LABEL: load_i64_stride8_vf4: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movaps 112(%rdi), %xmm5 |
| ; SSE-NEXT: movaps 240(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 96(%rdi), %xmm8 |
| ; SSE-NEXT: movaps 224(%rdi), %xmm10 |
| ; SSE-NEXT: movaps 160(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 80(%rdi), %xmm12 |
| ; SSE-NEXT: movaps 208(%rdi), %xmm13 |
| ; SSE-NEXT: movaps 144(%rdi), %xmm2 |
| ; SSE-NEXT: movaps 64(%rdi), %xmm14 |
| ; SSE-NEXT: movaps (%rdi), %xmm7 |
| ; SSE-NEXT: movaps 16(%rdi), %xmm6 |
| ; SSE-NEXT: movaps 32(%rdi), %xmm4 |
| ; SSE-NEXT: movaps 48(%rdi), %xmm3 |
| ; SSE-NEXT: movaps 192(%rdi), %xmm15 |
| ; SSE-NEXT: movaps 128(%rdi), %xmm9 |
| ; SSE-NEXT: movaps %xmm9, %xmm11 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm11 = xmm11[0],xmm15[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm15[1] |
| ; SSE-NEXT: movaps %xmm7, %xmm15 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm14[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm14[1] |
| ; SSE-NEXT: movaps %xmm2, %xmm14 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm14 = xmm14[0],xmm13[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm13[1] |
| ; SSE-NEXT: movaps %xmm6, %xmm13 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm13 = xmm13[0],xmm12[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm12[1] |
| ; SSE-NEXT: movaps %xmm0, %xmm12 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm12 = xmm12[0],xmm10[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm10[1] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm4, %xmm10 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm10 = xmm10[0],xmm8[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm8[1] |
| ; SSE-NEXT: movaps %xmm3, %xmm8 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm5[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm5[1] |
| ; SSE-NEXT: movaps 176(%rdi), %xmm5 |
| ; SSE-NEXT: movaps %xmm5, %xmm1 |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm15, (%rsi) |
| ; SSE-NEXT: movaps %xmm11, 16(%rsi) |
| ; SSE-NEXT: movaps %xmm7, (%rdx) |
| ; SSE-NEXT: movaps %xmm9, 16(%rdx) |
| ; SSE-NEXT: movaps %xmm13, (%rcx) |
| ; SSE-NEXT: movaps %xmm14, 16(%rcx) |
| ; SSE-NEXT: movaps %xmm6, (%r8) |
| ; SSE-NEXT: movaps %xmm2, 16(%r8) |
| ; SSE-NEXT: movaps %xmm10, (%r9) |
| ; SSE-NEXT: movaps %xmm12, 16(%r9) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movaps %xmm4, (%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rax) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movaps %xmm8, (%rax) |
| ; SSE-NEXT: movaps %xmm1, 16(%rax) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movaps %xmm5, 16(%rax) |
| ; SSE-NEXT: movaps %xmm3, (%rax) |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i64_stride8_vf4: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %r11 |
| ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm4 |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %ymm5 |
| ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm6[0],xmm3[0] |
| ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm9 = xmm8[0],xmm7[0] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm6[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm8[1],xmm7[1] |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm5[0],ymm4[0],ymm5[2],ymm4[2] |
| ; AVX1-ONLY-NEXT: vmovaps 80(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm10 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm11 = xmm10[0],xmm8[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm5[1],ymm4[1],ymm5[3],ymm4[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm10[1],xmm8[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm10 = xmm8[0],xmm5[0] |
| ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm13 = xmm12[0],xmm11[0] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm8[1],xmm5[1] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm8 = xmm12[1],xmm11[1] |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm11 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vmovaps 48(%rdi), %xmm14 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm15 = xmm14[0],xmm12[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm15[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm14[1],xmm12[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm9, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, 16(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 16(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm7, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, (%r8) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm13, (%r9) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm10, 16(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, (%r11) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 16(%r11) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm11, (%r10) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax) |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i64_stride8_vf4: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %r11 |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm5 |
| ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %ymm6 |
| ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm7 |
| ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %xmm8 |
| ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %xmm9 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm10 = xmm9[0],xmm8[0] |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %xmm11 |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %xmm12 |
| ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %xmm13 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm14 = xmm11[0],xmm13[0] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm8 = xmm9[1],xmm8[1] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm9 = xmm11[1],xmm13[1] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm11 = ymm7[0],ymm6[0],ymm7[2],ymm6[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm5[0],ymm4[0],ymm5[2],ymm4[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm13[2,3],ymm11[2,3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm7[1],ymm6[1],ymm7[3],ymm6[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm5[1],ymm4[1],ymm5[3],ymm4[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3],ymm6[2,3] |
| ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %xmm6 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm7 = xmm6[0],xmm5[0] |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %xmm13 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm15 = xmm12[0],xmm13[0] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm6[1],xmm5[1] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm12[1],xmm13[1] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm3[0],ymm2[0],ymm3[2],ymm2[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm13[2,3],ymm12[2,3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm3[1],ymm2[1],ymm3[3],ymm2[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm14, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps %xmm10, 16(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps %xmm9, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps %xmm8, 16(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm11, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm4, (%r8) |
| ; AVX2-ONLY-NEXT: vmovaps %xmm15, (%r9) |
| ; AVX2-ONLY-NEXT: vmovaps %xmm7, 16(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps %xmm6, (%r11) |
| ; AVX2-ONLY-NEXT: vmovaps %xmm5, 16(%r11) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm12, (%r10) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%rax) |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512-LABEL: load_i64_stride8_vf4: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11 |
| ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm3 |
| ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm4 |
| ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm5 |
| ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm6 |
| ; AVX512-NEXT: vmovaps 192(%rdi), %xmm0 |
| ; AVX512-NEXT: vmovaps 128(%rdi), %xmm1 |
| ; AVX512-NEXT: vmovlhps {{.*#+}} xmm7 = xmm1[0],xmm0[0] |
| ; AVX512-NEXT: vmovaps (%rdi), %xmm2 |
| ; AVX512-NEXT: vmovaps 64(%rdi), %xmm8 |
| ; AVX512-NEXT: vmovlhps {{.*#+}} xmm9 = xmm2[0],xmm8[0] |
| ; AVX512-NEXT: vunpckhpd {{.*#+}} xmm10 = xmm1[1],xmm0[1] |
| ; AVX512-NEXT: vunpckhpd {{.*#+}} xmm8 = xmm2[1],xmm8[1] |
| ; AVX512-NEXT: vmovaps 192(%rdi), %ymm1 |
| ; AVX512-NEXT: vmovaps 128(%rdi), %ymm2 |
| ; AVX512-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX512-NEXT: vmovaps 64(%rdi), %ymm11 |
| ; AVX512-NEXT: vmovaps (%rdi), %ymm12 |
| ; AVX512-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm12[0],ymm11[0],ymm12[2],ymm11[2] |
| ; AVX512-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm13[2,3],ymm0[2,3] |
| ; AVX512-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3] |
| ; AVX512-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm12[1],ymm11[1],ymm12[3],ymm11[3] |
| ; AVX512-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3] |
| ; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [4,12,4,12] |
| ; AVX512-NEXT: # ymm2 = mem[0,1,0,1] |
| ; AVX512-NEXT: vpermi2q %zmm5, %zmm6, %zmm2 |
| ; AVX512-NEXT: vmovdqa {{.*#+}} xmm11 = [4,12] |
| ; AVX512-NEXT: vpermi2q %zmm3, %zmm4, %zmm11 |
| ; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm11[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [5,13,5,13] |
| ; AVX512-NEXT: # ymm11 = mem[0,1,0,1] |
| ; AVX512-NEXT: vpermi2q %zmm5, %zmm6, %zmm11 |
| ; AVX512-NEXT: vmovdqa {{.*#+}} xmm12 = [5,13] |
| ; AVX512-NEXT: vpermi2q %zmm3, %zmm4, %zmm12 |
| ; AVX512-NEXT: vpblendd {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm12 = [6,14,6,14] |
| ; AVX512-NEXT: # ymm12 = mem[0,1,0,1] |
| ; AVX512-NEXT: vpermi2q %zmm5, %zmm6, %zmm12 |
| ; AVX512-NEXT: vmovdqa {{.*#+}} xmm13 = [6,14] |
| ; AVX512-NEXT: vpermi2q %zmm3, %zmm4, %zmm13 |
| ; AVX512-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3],ymm12[4,5,6,7] |
| ; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [7,15,7,15] |
| ; AVX512-NEXT: # ymm13 = mem[0,1,0,1] |
| ; AVX512-NEXT: vpermi2q %zmm5, %zmm6, %zmm13 |
| ; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [7,15] |
| ; AVX512-NEXT: vpermi2q %zmm3, %zmm4, %zmm5 |
| ; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm13[4,5,6,7] |
| ; AVX512-NEXT: vmovaps %xmm9, (%rsi) |
| ; AVX512-NEXT: vmovaps %xmm7, 16(%rsi) |
| ; AVX512-NEXT: vmovaps %xmm8, (%rdx) |
| ; AVX512-NEXT: vmovaps %xmm10, 16(%rdx) |
| ; AVX512-NEXT: vmovaps %ymm0, (%rcx) |
| ; AVX512-NEXT: vmovaps %ymm1, (%r8) |
| ; AVX512-NEXT: vmovdqa %ymm2, (%r9) |
| ; AVX512-NEXT: vmovdqa %ymm11, (%r11) |
| ; AVX512-NEXT: vmovdqa %ymm12, (%r10) |
| ; AVX512-NEXT: vmovdqa %ymm3, (%rax) |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| %wide.vec = load <32 x i64>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <32 x i64> %wide.vec, <32 x i64> poison, <4 x i32> <i32 0, i32 8, i32 16, i32 24> |
| %strided.vec1 = shufflevector <32 x i64> %wide.vec, <32 x i64> poison, <4 x i32> <i32 1, i32 9, i32 17, i32 25> |
| %strided.vec2 = shufflevector <32 x i64> %wide.vec, <32 x i64> poison, <4 x i32> <i32 2, i32 10, i32 18, i32 26> |
| %strided.vec3 = shufflevector <32 x i64> %wide.vec, <32 x i64> poison, <4 x i32> <i32 3, i32 11, i32 19, i32 27> |
| %strided.vec4 = shufflevector <32 x i64> %wide.vec, <32 x i64> poison, <4 x i32> <i32 4, i32 12, i32 20, i32 28> |
| %strided.vec5 = shufflevector <32 x i64> %wide.vec, <32 x i64> poison, <4 x i32> <i32 5, i32 13, i32 21, i32 29> |
| %strided.vec6 = shufflevector <32 x i64> %wide.vec, <32 x i64> poison, <4 x i32> <i32 6, i32 14, i32 22, i32 30> |
| %strided.vec7 = shufflevector <32 x i64> %wide.vec, <32 x i64> poison, <4 x i32> <i32 7, i32 15, i32 23, i32 31> |
| store <4 x i64> %strided.vec0, ptr %out.vec0, align 64 |
| store <4 x i64> %strided.vec1, ptr %out.vec1, align 64 |
| store <4 x i64> %strided.vec2, ptr %out.vec2, align 64 |
| store <4 x i64> %strided.vec3, ptr %out.vec3, align 64 |
| store <4 x i64> %strided.vec4, ptr %out.vec4, align 64 |
| store <4 x i64> %strided.vec5, ptr %out.vec5, align 64 |
| store <4 x i64> %strided.vec6, ptr %out.vec6, align 64 |
| store <4 x i64> %strided.vec7, ptr %out.vec7, align 64 |
| ret void |
| } |
| |
| define void @load_i64_stride8_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5, ptr %out.vec6, ptr %out.vec7) nounwind { |
| ; SSE-LABEL: load_i64_stride8_vf8: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: subq $152, %rsp |
| ; SSE-NEXT: movaps 336(%rdi), %xmm11 |
| ; SSE-NEXT: movaps 464(%rdi), %xmm6 |
| ; SSE-NEXT: movaps 400(%rdi), %xmm7 |
| ; SSE-NEXT: movaps 80(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 208(%rdi), %xmm1 |
| ; SSE-NEXT: movaps 144(%rdi), %xmm8 |
| ; SSE-NEXT: movaps 320(%rdi), %xmm2 |
| ; SSE-NEXT: movaps 256(%rdi), %xmm10 |
| ; SSE-NEXT: movaps 448(%rdi), %xmm3 |
| ; SSE-NEXT: movaps 384(%rdi), %xmm12 |
| ; SSE-NEXT: movaps 64(%rdi), %xmm4 |
| ; SSE-NEXT: movaps (%rdi), %xmm13 |
| ; SSE-NEXT: movaps 16(%rdi), %xmm9 |
| ; SSE-NEXT: movaps 192(%rdi), %xmm5 |
| ; SSE-NEXT: movaps 128(%rdi), %xmm14 |
| ; SSE-NEXT: movaps %xmm14, %xmm15 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm5[0] |
| ; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm5[1] |
| ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm13, %xmm5 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm4[0] |
| ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm4[1] |
| ; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm12, %xmm4 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm3[0] |
| ; SSE-NEXT: movaps %xmm4, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm3[1] |
| ; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm10, %xmm3 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm2[0] |
| ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm8, %xmm3 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm1[0] |
| ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm1[1] |
| ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm9, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm7, %xmm0 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm6[0] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm6[1] |
| ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 272(%rdi), %xmm15 |
| ; SSE-NEXT: movaps %xmm15, %xmm0 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm11[0] |
| ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm15 = xmm15[1],xmm11[1] |
| ; SSE-NEXT: movaps 96(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 32(%rdi), %xmm12 |
| ; SSE-NEXT: movaps %xmm12, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm0[1] |
| ; SSE-NEXT: movaps 224(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 160(%rdi), %xmm13 |
| ; SSE-NEXT: movaps %xmm13, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm0[1] |
| ; SSE-NEXT: movaps 352(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 288(%rdi), %xmm9 |
| ; SSE-NEXT: movaps %xmm9, %xmm11 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm11 = xmm11[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm0[1] |
| ; SSE-NEXT: movaps 480(%rdi), %xmm1 |
| ; SSE-NEXT: movaps 416(%rdi), %xmm4 |
| ; SSE-NEXT: movaps %xmm4, %xmm8 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm1[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm1[1] |
| ; SSE-NEXT: movaps 112(%rdi), %xmm1 |
| ; SSE-NEXT: movaps 48(%rdi), %xmm10 |
| ; SSE-NEXT: movaps %xmm10, %xmm14 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm14 = xmm14[0],xmm1[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm1[1] |
| ; SSE-NEXT: movaps 240(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 176(%rdi), %xmm6 |
| ; SSE-NEXT: movaps %xmm6, %xmm7 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm0[1] |
| ; SSE-NEXT: movaps 368(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 304(%rdi), %xmm3 |
| ; SSE-NEXT: movaps %xmm3, %xmm5 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1] |
| ; SSE-NEXT: movaps 496(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 432(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rsi) |
| ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rcx) |
| ; SSE-NEXT: movaps %xmm15, 32(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%r8) |
| ; SSE-NEXT: movaps %xmm11, 32(%r9) |
| ; SSE-NEXT: movaps %xmm8, 48(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%r9) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movaps %xmm4, 48(%rax) |
| ; SSE-NEXT: movaps %xmm9, 32(%rax) |
| ; SSE-NEXT: movaps %xmm13, 16(%rax) |
| ; SSE-NEXT: movaps %xmm12, (%rax) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movaps %xmm2, 48(%rax) |
| ; SSE-NEXT: movaps %xmm5, 32(%rax) |
| ; SSE-NEXT: movaps %xmm7, 16(%rax) |
| ; SSE-NEXT: movaps %xmm14, (%rax) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movaps %xmm1, 48(%rax) |
| ; SSE-NEXT: movaps %xmm3, 32(%rax) |
| ; SSE-NEXT: movaps %xmm6, 16(%rax) |
| ; SSE-NEXT: movaps %xmm10, (%rax) |
| ; SSE-NEXT: addq $152, %rsp |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i64_stride8_vf8: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: subq $184, %rsp |
| ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %ymm3 |
| ; AVX1-ONLY-NEXT: vmovaps 320(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm6 = xmm5[0],xmm4[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm6 = xmm8[0],xmm7[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm10 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm6 = xmm10[0],xmm9[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm6 = xmm12[0],xmm11[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm5[1],xmm4[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm4, (%rsp) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm8[1],xmm7[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm10[1],xmm9[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm12[1],xmm11[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2] |
| ; AVX1-ONLY-NEXT: vmovaps 336(%rdi), %xmm10 |
| ; AVX1-ONLY-NEXT: vmovaps 272(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm9 = xmm11[0],xmm10[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm9[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm9 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 80(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm13 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm14 = xmm13[0],xmm12[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm14[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm3[1],ymm2[1],ymm3[3],ymm2[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm11[1],xmm10[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm13[1],xmm12[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 352(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm13 = xmm0[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm10 = xmm2[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm15 = xmm2[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm8 = xmm2[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm11 = xmm2[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %ymm5 |
| ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm2[0],ymm5[0],ymm2[2],ymm5[2] |
| ; AVX1-ONLY-NEXT: vmovaps 368(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovaps 304(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm9 = xmm1[0],xmm4[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm9[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm9 |
| ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %ymm6 |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm6[0],ymm9[0],ymm6[2],ymm9[2] |
| ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovaps 48(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm14 = xmm0[0],xmm3[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm14[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm5[1],ymm2[3],ymm5[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm6[1],ymm9[1],ymm6[3],ymm9[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, 16(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, 48(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, 32(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, 16(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, 48(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps (%rsp), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, 32(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm2, 32(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm2, 32(%r8) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, (%r9) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm10, 16(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm13, 32(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, 48(%r9) |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: vmovaps %xmm11, (%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %xmm15, 16(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, 32(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, 48(%rax) |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: vmovaps %ymm7, (%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm12, 32(%rax) |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%rax) |
| ; AVX1-ONLY-NEXT: addq $184, %rsp |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i64_stride8_vf8: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: subq $184, %rsp |
| ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %ymm11 |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm12 |
| ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %ymm9 |
| ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %ymm13 |
| ; AVX2-ONLY-NEXT: vmovaps 448(%rdi), %ymm14 |
| ; AVX2-ONLY-NEXT: vmovaps 384(%rdi), %ymm15 |
| ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm4[0],xmm2[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 448(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vmovaps 384(%rdi), %xmm10 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm10[0],xmm5[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %xmm8 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm8[0],xmm7[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm4[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, (%rsp) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %xmm6 |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm10[1],xmm5[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm6[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm8[1],xmm7[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm6[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm15[0],ymm14[0],ymm15[2],ymm14[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm13[0],ymm9[0],ymm13[2],ymm9[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm6[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm8 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm12[0],ymm11[0],ymm12[2],ymm11[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm8[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm15[1],ymm14[1],ymm15[3],ymm14[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm9 = ymm13[1],ymm9[1],ymm13[3],ymm9[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm9[2,3],ymm2[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm12[1],ymm11[1],ymm12[3],ymm11[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 352(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm12 = xmm0[0],xmm2[0] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm15 = xmm2[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm13 = xmm2[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm8 = xmm2[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm10 = xmm2[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps 352(%rdi), %ymm6 |
| ; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %ymm5 |
| ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm4[0],ymm3[2],ymm4[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm9 = ymm5[0],ymm6[0],ymm5[2],ymm6[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm9[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm9 |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm2[0],ymm9[0],ymm2[2],ymm9[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm14[2,3],ymm7[2,3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm3[1],ymm4[1],ymm3[3],ymm4[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm5[1],ymm6[1],ymm5[3],ymm6[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm4[2,3],ymm3[2,3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm2[1],ymm9[1],ymm2[3],ymm9[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 16(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 48(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 32(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 16(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 48(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 32(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%r8) |
| ; AVX2-ONLY-NEXT: vmovaps %xmm8, (%r9) |
| ; AVX2-ONLY-NEXT: vmovaps %xmm15, 16(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps %xmm12, 32(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 48(%r9) |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-ONLY-NEXT: vmovaps %xmm10, (%rax) |
| ; AVX2-ONLY-NEXT: vmovaps %xmm13, 16(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 32(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 48(%rax) |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-ONLY-NEXT: vmovaps %ymm7, (%rax) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm11, 32(%rax) |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%rax) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 32(%rax) |
| ; AVX2-ONLY-NEXT: addq $184, %rsp |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512F-LABEL: load_i64_stride8_vf8: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm1 |
| ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm4 |
| ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm2 |
| ; AVX512F-NEXT: vmovdqa64 320(%rdi), %zmm8 |
| ; AVX512F-NEXT: vmovdqa64 256(%rdi), %zmm3 |
| ; AVX512F-NEXT: vmovdqa64 448(%rdi), %zmm6 |
| ; AVX512F-NEXT: vmovdqa64 384(%rdi), %zmm9 |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm5 = [0,8,0,8,0,8,0,8] |
| ; AVX512F-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm18 |
| ; AVX512F-NEXT: vpermt2q %zmm6, %zmm5, %zmm18 |
| ; AVX512F-NEXT: vpermi2q %zmm8, %zmm3, %zmm5 |
| ; AVX512F-NEXT: movb $-64, %al |
| ; AVX512F-NEXT: kmovw %eax, %k1 |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [1,9,1,9,1,9,1,9] |
| ; AVX512F-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm11 |
| ; AVX512F-NEXT: vpermt2q %zmm6, %zmm10, %zmm11 |
| ; AVX512F-NEXT: vpermi2q %zmm8, %zmm3, %zmm10 |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm12 = [2,10,2,10,2,10,2,10] |
| ; AVX512F-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm13 |
| ; AVX512F-NEXT: vpermt2q %zmm6, %zmm12, %zmm13 |
| ; AVX512F-NEXT: vpermi2q %zmm8, %zmm3, %zmm12 |
| ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm12 {%k1} |
| ; AVX512F-NEXT: vmovdqa 192(%rdi), %ymm13 |
| ; AVX512F-NEXT: vmovdqa 128(%rdi), %ymm14 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm14[0],ymm13[0],ymm14[2],ymm13[2] |
| ; AVX512F-NEXT: vmovdqa64 64(%rdi), %ymm16 |
| ; AVX512F-NEXT: vmovdqa64 (%rdi), %ymm17 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} ymm7 = ymm17[0],ymm16[0],ymm17[2],ymm16[2] |
| ; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm7[2,3],ymm15[2,3] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm7, %zmm12, %zmm12 |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm7 = [3,11,3,11,3,11,3,11] |
| ; AVX512F-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm15 |
| ; AVX512F-NEXT: vpermt2q %zmm6, %zmm7, %zmm15 |
| ; AVX512F-NEXT: vpermi2q %zmm8, %zmm3, %zmm7 |
| ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm7 {%k1} |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} ymm13 = ymm14[1],ymm13[1],ymm14[3],ymm13[3] |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} ymm14 = ymm17[1],ymm16[1],ymm17[3],ymm16[3] |
| ; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm14[2,3],ymm13[2,3] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm13, %zmm7, %zmm13 |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm14 = [4,12,4,12,4,12,4,12] |
| ; AVX512F-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} zmm7 = zmm3[0],zmm8[0],zmm3[2],zmm8[2],zmm3[4],zmm8[4],zmm3[6],zmm8[6] |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm16 |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} zmm17 = zmm3[1],zmm8[1],zmm3[3],zmm8[3],zmm3[5],zmm8[5],zmm3[7],zmm8[7] |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm15 = [6,14,6,14,6,14,6,14] |
| ; AVX512F-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm19 |
| ; AVX512F-NEXT: vpermt2q %zmm8, %zmm15, %zmm19 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} zmm19 {%k1} = zmm9[0],zmm6[0],zmm9[2],zmm6[2],zmm9[4],zmm6[4],zmm9[6],zmm6[6] |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm20 = [7,15,7,15,7,15,7,15] |
| ; AVX512F-NEXT: # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpermt2q %zmm8, %zmm20, %zmm3 |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm9[1],zmm6[1],zmm9[3],zmm6[3],zmm9[5],zmm6[5],zmm9[7],zmm6[7] |
| ; AVX512F-NEXT: vpermt2q %zmm6, %zmm14, %zmm9 |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm7 {%k1} |
| ; AVX512F-NEXT: vpermi2q %zmm4, %zmm2, %zmm14 |
| ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [4,12,4,12] |
| ; AVX512F-NEXT: # ymm8 = mem[0,1,0,1] |
| ; AVX512F-NEXT: vpermi2q %zmm0, %zmm1, %zmm8 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm8, %zmm7, %zmm7 |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm8 = [5,13,5,13,5,13,5,13] |
| ; AVX512F-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpermt2q %zmm6, %zmm8, %zmm16 |
| ; AVX512F-NEXT: vpermi2q %zmm4, %zmm2, %zmm8 |
| ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [5,13,5,13] |
| ; AVX512F-NEXT: # ymm6 = mem[0,1,0,1] |
| ; AVX512F-NEXT: vpermi2q %zmm0, %zmm1, %zmm6 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX512F-NEXT: vmovdqa64 %zmm18, %zmm5 {%k1} |
| ; AVX512F-NEXT: vmovdqa 192(%rdi), %xmm8 |
| ; AVX512F-NEXT: vmovdqa 128(%rdi), %xmm9 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm14 = xmm9[0],xmm8[0] |
| ; AVX512F-NEXT: vmovdqa64 (%rdi), %xmm18 |
| ; AVX512F-NEXT: vmovdqa64 64(%rdi), %xmm21 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm22 = xmm18[0],xmm21[0] |
| ; AVX512F-NEXT: vinserti32x4 $1, %xmm14, %ymm22, %ymm14 |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm14, %zmm5, %zmm5 |
| ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm10 {%k1} |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} xmm8 = xmm9[1],xmm8[1] |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} xmm9 = xmm18[1],xmm21[1] |
| ; AVX512F-NEXT: vinserti128 $1, %xmm8, %ymm9, %ymm8 |
| ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rdi |
| ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm8, %zmm10, %zmm8 |
| ; AVX512F-NEXT: vmovdqa64 %zmm16, %zmm17 {%k1} |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm6, %zmm17, %zmm6 |
| ; AVX512F-NEXT: vpermi2q %zmm4, %zmm2, %zmm15 |
| ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [6,14,6,14] |
| ; AVX512F-NEXT: # ymm9 = mem[0,1,0,1] |
| ; AVX512F-NEXT: vpermi2q %zmm0, %zmm1, %zmm9 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm9 = ymm15[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm9, %zmm19, %zmm9 |
| ; AVX512F-NEXT: vpermt2q %zmm4, %zmm20, %zmm2 |
| ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [7,15,7,15] |
| ; AVX512F-NEXT: # ymm4 = mem[0,1,0,1] |
| ; AVX512F-NEXT: vpermi2q %zmm0, %zmm1, %zmm4 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm0, %zmm3, %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 %zmm5, (%rsi) |
| ; AVX512F-NEXT: vmovdqa64 %zmm8, (%rdx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm12, (%rcx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm13, (%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, (%r9) |
| ; AVX512F-NEXT: vmovdqa64 %zmm6, (%r10) |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, (%rdi) |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, (%rax) |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: load_i64_stride8_vf8: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm1 |
| ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm4 |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm2 |
| ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm8 |
| ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm3 |
| ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm6 |
| ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm9 |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm5 = [0,8,0,8,0,8,0,8] |
| ; AVX512BW-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm18 |
| ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm5, %zmm18 |
| ; AVX512BW-NEXT: vpermi2q %zmm8, %zmm3, %zmm5 |
| ; AVX512BW-NEXT: movb $-64, %al |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [1,9,1,9,1,9,1,9] |
| ; AVX512BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm11 |
| ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm10, %zmm11 |
| ; AVX512BW-NEXT: vpermi2q %zmm8, %zmm3, %zmm10 |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm12 = [2,10,2,10,2,10,2,10] |
| ; AVX512BW-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm13 |
| ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm12, %zmm13 |
| ; AVX512BW-NEXT: vpermi2q %zmm8, %zmm3, %zmm12 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm12 {%k1} |
| ; AVX512BW-NEXT: vmovdqa 192(%rdi), %ymm13 |
| ; AVX512BW-NEXT: vmovdqa 128(%rdi), %ymm14 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm14[0],ymm13[0],ymm14[2],ymm13[2] |
| ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %ymm16 |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %ymm17 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} ymm7 = ymm17[0],ymm16[0],ymm17[2],ymm16[2] |
| ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm7[2,3],ymm15[2,3] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm7, %zmm12, %zmm12 |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm7 = [3,11,3,11,3,11,3,11] |
| ; AVX512BW-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm15 |
| ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm7, %zmm15 |
| ; AVX512BW-NEXT: vpermi2q %zmm8, %zmm3, %zmm7 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm7 {%k1} |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} ymm13 = ymm14[1],ymm13[1],ymm14[3],ymm13[3] |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} ymm14 = ymm17[1],ymm16[1],ymm17[3],ymm16[3] |
| ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm14[2,3],ymm13[2,3] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm13, %zmm7, %zmm13 |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm14 = [4,12,4,12,4,12,4,12] |
| ; AVX512BW-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} zmm7 = zmm3[0],zmm8[0],zmm3[2],zmm8[2],zmm3[4],zmm8[4],zmm3[6],zmm8[6] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm16 |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} zmm17 = zmm3[1],zmm8[1],zmm3[3],zmm8[3],zmm3[5],zmm8[5],zmm3[7],zmm8[7] |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm15 = [6,14,6,14,6,14,6,14] |
| ; AVX512BW-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm19 |
| ; AVX512BW-NEXT: vpermt2q %zmm8, %zmm15, %zmm19 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} zmm19 {%k1} = zmm9[0],zmm6[0],zmm9[2],zmm6[2],zmm9[4],zmm6[4],zmm9[6],zmm6[6] |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm20 = [7,15,7,15,7,15,7,15] |
| ; AVX512BW-NEXT: # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vpermt2q %zmm8, %zmm20, %zmm3 |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm9[1],zmm6[1],zmm9[3],zmm6[3],zmm9[5],zmm6[5],zmm9[7],zmm6[7] |
| ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm14, %zmm9 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm7 {%k1} |
| ; AVX512BW-NEXT: vpermi2q %zmm4, %zmm2, %zmm14 |
| ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [4,12,4,12] |
| ; AVX512BW-NEXT: # ymm8 = mem[0,1,0,1] |
| ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm1, %zmm8 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm8, %zmm7, %zmm7 |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm8 = [5,13,5,13,5,13,5,13] |
| ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm8, %zmm16 |
| ; AVX512BW-NEXT: vpermi2q %zmm4, %zmm2, %zmm8 |
| ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [5,13,5,13] |
| ; AVX512BW-NEXT: # ymm6 = mem[0,1,0,1] |
| ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm1, %zmm6 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm18, %zmm5 {%k1} |
| ; AVX512BW-NEXT: vmovdqa 192(%rdi), %xmm8 |
| ; AVX512BW-NEXT: vmovdqa 128(%rdi), %xmm9 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm14 = xmm9[0],xmm8[0] |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %xmm18 |
| ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %xmm21 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm22 = xmm18[0],xmm21[0] |
| ; AVX512BW-NEXT: vinserti32x4 $1, %xmm14, %ymm22, %ymm14 |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm14, %zmm5, %zmm5 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm10 {%k1} |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} xmm8 = xmm9[1],xmm8[1] |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} xmm9 = xmm18[1],xmm21[1] |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm8, %ymm9, %ymm8 |
| ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rdi |
| ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm8, %zmm10, %zmm8 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm16, %zmm17 {%k1} |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm6, %zmm17, %zmm6 |
| ; AVX512BW-NEXT: vpermi2q %zmm4, %zmm2, %zmm15 |
| ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [6,14,6,14] |
| ; AVX512BW-NEXT: # ymm9 = mem[0,1,0,1] |
| ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm1, %zmm9 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm9 = ymm15[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm9, %zmm19, %zmm9 |
| ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm20, %zmm2 |
| ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [7,15,7,15] |
| ; AVX512BW-NEXT: # ymm4 = mem[0,1,0,1] |
| ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm1, %zmm4 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm0, %zmm3, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, (%rsi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm8, (%rdx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm12, (%rcx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm13, (%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, (%r9) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm6, (%r10) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, (%rdi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rax) |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %wide.vec = load <64 x i64>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <64 x i64> %wide.vec, <64 x i64> poison, <8 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 40, i32 48, i32 56> |
| %strided.vec1 = shufflevector <64 x i64> %wide.vec, <64 x i64> poison, <8 x i32> <i32 1, i32 9, i32 17, i32 25, i32 33, i32 41, i32 49, i32 57> |
| %strided.vec2 = shufflevector <64 x i64> %wide.vec, <64 x i64> poison, <8 x i32> <i32 2, i32 10, i32 18, i32 26, i32 34, i32 42, i32 50, i32 58> |
| %strided.vec3 = shufflevector <64 x i64> %wide.vec, <64 x i64> poison, <8 x i32> <i32 3, i32 11, i32 19, i32 27, i32 35, i32 43, i32 51, i32 59> |
| %strided.vec4 = shufflevector <64 x i64> %wide.vec, <64 x i64> poison, <8 x i32> <i32 4, i32 12, i32 20, i32 28, i32 36, i32 44, i32 52, i32 60> |
| %strided.vec5 = shufflevector <64 x i64> %wide.vec, <64 x i64> poison, <8 x i32> <i32 5, i32 13, i32 21, i32 29, i32 37, i32 45, i32 53, i32 61> |
| %strided.vec6 = shufflevector <64 x i64> %wide.vec, <64 x i64> poison, <8 x i32> <i32 6, i32 14, i32 22, i32 30, i32 38, i32 46, i32 54, i32 62> |
| %strided.vec7 = shufflevector <64 x i64> %wide.vec, <64 x i64> poison, <8 x i32> <i32 7, i32 15, i32 23, i32 31, i32 39, i32 47, i32 55, i32 63> |
| store <8 x i64> %strided.vec0, ptr %out.vec0, align 64 |
| store <8 x i64> %strided.vec1, ptr %out.vec1, align 64 |
| store <8 x i64> %strided.vec2, ptr %out.vec2, align 64 |
| store <8 x i64> %strided.vec3, ptr %out.vec3, align 64 |
| store <8 x i64> %strided.vec4, ptr %out.vec4, align 64 |
| store <8 x i64> %strided.vec5, ptr %out.vec5, align 64 |
| store <8 x i64> %strided.vec6, ptr %out.vec6, align 64 |
| store <8 x i64> %strided.vec7, ptr %out.vec7, align 64 |
| ret void |
| } |
| |
| define void @load_i64_stride8_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5, ptr %out.vec6, ptr %out.vec7) nounwind { |
| ; SSE-LABEL: load_i64_stride8_vf16: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: subq $664, %rsp # imm = 0x298 |
| ; SSE-NEXT: movaps 832(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 320(%rdi), %xmm1 |
| ; SSE-NEXT: movaps 256(%rdi), %xmm8 |
| ; SSE-NEXT: movaps 960(%rdi), %xmm2 |
| ; SSE-NEXT: movaps 896(%rdi), %xmm9 |
| ; SSE-NEXT: movaps 448(%rdi), %xmm3 |
| ; SSE-NEXT: movaps 384(%rdi), %xmm10 |
| ; SSE-NEXT: movaps 576(%rdi), %xmm4 |
| ; SSE-NEXT: movaps 512(%rdi), %xmm11 |
| ; SSE-NEXT: movaps 64(%rdi), %xmm5 |
| ; SSE-NEXT: movaps (%rdi), %xmm12 |
| ; SSE-NEXT: movaps 704(%rdi), %xmm6 |
| ; SSE-NEXT: movaps 640(%rdi), %xmm13 |
| ; SSE-NEXT: movaps 192(%rdi), %xmm7 |
| ; SSE-NEXT: movaps 128(%rdi), %xmm14 |
| ; SSE-NEXT: movaps %xmm14, %xmm15 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm7[0] |
| ; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm7[1] |
| ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm13, %xmm7 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm6[0] |
| ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm6[1] |
| ; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm12, %xmm6 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm5[0] |
| ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm5[1] |
| ; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm11, %xmm5 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm4[0] |
| ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm4[1] |
| ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm10, %xmm4 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm3[0] |
| ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm3[1] |
| ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm9, %xmm3 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm2[0] |
| ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm8, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm1[1] |
| ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 768(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 80(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 16(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 208(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 144(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 336(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 272(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 464(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 400(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 592(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 528(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 720(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 656(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 848(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 784(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 976(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 912(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 96(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 32(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 224(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 160(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 352(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 288(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 480(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 416(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 608(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 544(%rdi), %xmm15 |
| ; SSE-NEXT: movaps %xmm15, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm15 = xmm15[1],xmm0[1] |
| ; SSE-NEXT: movaps 736(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 672(%rdi), %xmm12 |
| ; SSE-NEXT: movaps %xmm12, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm0[1] |
| ; SSE-NEXT: movaps 864(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 800(%rdi), %xmm9 |
| ; SSE-NEXT: movaps %xmm9, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm0[1] |
| ; SSE-NEXT: movaps 992(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 928(%rdi), %xmm7 |
| ; SSE-NEXT: movaps %xmm7, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm0[1] |
| ; SSE-NEXT: movaps 112(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 48(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 240(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 176(%rdi), %xmm13 |
| ; SSE-NEXT: movaps %xmm13, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm0[1] |
| ; SSE-NEXT: movaps 368(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 304(%rdi), %xmm10 |
| ; SSE-NEXT: movaps %xmm10, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm0[1] |
| ; SSE-NEXT: movaps 496(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 432(%rdi), %xmm14 |
| ; SSE-NEXT: movaps %xmm14, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm0[1] |
| ; SSE-NEXT: movaps 624(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 560(%rdi), %xmm5 |
| ; SSE-NEXT: movaps %xmm5, %xmm11 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm11 = xmm11[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1] |
| ; SSE-NEXT: movaps 752(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 688(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, %xmm8 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] |
| ; SSE-NEXT: movaps 880(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 816(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm6 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps 1008(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 944(%rdi), %xmm3 |
| ; SSE-NEXT: movaps %xmm3, %xmm4 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%r9) |
| ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%r9) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movaps %xmm7, 112(%rax) |
| ; SSE-NEXT: movaps %xmm9, 96(%rax) |
| ; SSE-NEXT: movaps %xmm12, 80(%rax) |
| ; SSE-NEXT: movaps %xmm15, 64(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rax) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movaps %xmm4, 112(%rax) |
| ; SSE-NEXT: movaps %xmm6, 96(%rax) |
| ; SSE-NEXT: movaps %xmm8, 80(%rax) |
| ; SSE-NEXT: movaps %xmm11, 64(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rax) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movaps %xmm3, 112(%rax) |
| ; SSE-NEXT: movaps %xmm1, 96(%rax) |
| ; SSE-NEXT: movaps %xmm2, 80(%rax) |
| ; SSE-NEXT: movaps %xmm5, 64(%rax) |
| ; SSE-NEXT: movaps %xmm14, 48(%rax) |
| ; SSE-NEXT: movaps %xmm10, 32(%rax) |
| ; SSE-NEXT: movaps %xmm13, 16(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rax) |
| ; SSE-NEXT: addq $664, %rsp # imm = 0x298 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i64_stride8_vf16: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: subq $792, %rsp # imm = 0x318 |
| ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 320(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm4 = xmm3[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 832(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovaps 768(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm6 = xmm5[0],xmm4[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 960(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vmovaps 896(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm8 = xmm7[0],xmm6[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 704(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vmovaps 640(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm10 = xmm9[0],xmm8[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %xmm10 |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 512(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm3[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm10[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm7[1],xmm6[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm5[1],xmm4[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm9[1],xmm8[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm10[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm3[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm3[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 336(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovaps 272(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm5 = xmm4[0],xmm3[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 960(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vmovaps 896(%rdi), %ymm5 |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm5[0],ymm2[0],ymm5[2],ymm2[2] |
| ; AVX1-ONLY-NEXT: vmovaps 848(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vmovaps 784(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm9 = xmm8[0],xmm7[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm6 |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %ymm9 |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm10 = ymm9[0],ymm6[0],ymm9[2],ymm6[2] |
| ; AVX1-ONLY-NEXT: vmovaps 80(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm13 = xmm12[0],xmm11[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 704(%rdi), %ymm10 |
| ; AVX1-ONLY-NEXT: vmovaps 640(%rdi), %ymm13 |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm13[0],ymm10[0],ymm13[2],ymm10[2] |
| ; AVX1-ONLY-NEXT: vmovaps 592(%rdi), %xmm15 |
| ; AVX1-ONLY-NEXT: vmovaps 528(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm15[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm14[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm4[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm5[1],ymm2[1],ymm5[3],ymm2[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm8[1],xmm7[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm9[1],ymm6[1],ymm9[3],ymm6[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm12[1],xmm11[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm13[1],ymm10[1],ymm13[3],ymm10[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm15[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 352(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 992(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 928(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 864(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 800(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 736(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 672(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 608(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 544(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %ymm11 |
| ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %ymm8 |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm8[0],ymm11[0],ymm8[2],ymm11[2] |
| ; AVX1-ONLY-NEXT: vmovaps 368(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vmovaps 304(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm5 = xmm7[0],xmm9[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 992(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 928(%rdi), %ymm14 |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm14[0],ymm0[0],ymm14[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 880(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 816(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm10 = xmm12[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm10 |
| ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm10[0],ymm2[2],ymm10[2] |
| ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps 48(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm15 = xmm0[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm15[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 736(%rdi), %ymm6 |
| ; AVX1-ONLY-NEXT: vmovaps 672(%rdi), %ymm5 |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm15 = ymm5[0],ymm6[0],ymm5[2],ymm6[2] |
| ; AVX1-ONLY-NEXT: vmovaps 624(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovaps 560(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm13 = xmm3[0],xmm4[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm13[0,1,2,3],ymm15[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm10[1],ymm2[3],ymm10[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm8[1],ymm11[1],ymm8[3],ymm11[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm7[1],xmm9[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm5[1],ymm6[1],ymm5[3],ymm6[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm3[1],xmm4[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm14[1],mem[1],ymm14[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm3 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm3 = xmm12[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 16(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 64(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 80(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 112(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 96(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 32(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 48(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 16(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 64(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 80(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 96(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 112(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 32(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 48(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 64(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 96(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 32(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 64(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 96(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 32(%r8) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 80(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 64(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, (%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 16(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 112(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 96(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 32(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 48(%r9) |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 64(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 80(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps (%rsp), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, (%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 16(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 96(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 112(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 32(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, 48(%rax) |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: vmovaps %ymm15, 64(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 96(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 32(%rax) |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm2, 32(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm10, (%rax) |
| ; AVX1-ONLY-NEXT: addq $792, %rsp # imm = 0x318 |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i64_stride8_vf16: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: subq $808, %rsp # imm = 0x328 |
| ; AVX2-ONLY-NEXT: vmovaps 448(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 384(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %xmm3 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm4 = xmm3[0],xmm2[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 832(%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vmovaps 768(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm6 = xmm5[0],xmm4[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 960(%rdi), %xmm6 |
| ; AVX2-ONLY-NEXT: vmovaps 896(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm8 = xmm7[0],xmm6[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 704(%rdi), %xmm8 |
| ; AVX2-ONLY-NEXT: vmovaps 640(%rdi), %xmm9 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm10 = xmm9[0],xmm8[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 576(%rdi), %xmm10 |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 512(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm3[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm10[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm7[1],xmm6[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm5[1],xmm4[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm9[1],xmm8[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm3 = xmm2[0],xmm1[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm10[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm1[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm1[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps 448(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovaps 384(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm5[2,3],ymm4[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 832(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovaps 768(%rdi), %ymm5 |
| ; AVX2-ONLY-NEXT: vmovaps 960(%rdi), %ymm6 |
| ; AVX2-ONLY-NEXT: vmovaps 896(%rdi), %ymm7 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm8 = ymm7[0],ymm6[0],ymm7[2],ymm6[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm9 = ymm5[0],ymm4[0],ymm5[2],ymm4[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm9[2,3],ymm8[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %ymm8 |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm9 |
| ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %ymm10 |
| ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm11 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm11[0],ymm10[0],ymm11[2],ymm10[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm9[0],ymm8[0],ymm9[2],ymm8[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm13[2,3],ymm12[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 576(%rdi), %ymm12 |
| ; AVX2-ONLY-NEXT: vmovaps 512(%rdi), %ymm13 |
| ; AVX2-ONLY-NEXT: vmovaps 704(%rdi), %ymm14 |
| ; AVX2-ONLY-NEXT: vmovaps 640(%rdi), %ymm15 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm15[0],ymm14[0],ymm15[2],ymm14[2] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm13[0],ymm12[0],ymm13[2],ymm12[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[2,3],mem[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm3[1],ymm2[1],ymm3[3],ymm2[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm7[1],ymm6[1],ymm7[3],ymm6[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm5[1],ymm4[1],ymm5[3],ymm4[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm11[1],ymm10[1],ymm11[3],ymm10[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm9[1],ymm8[1],ymm9[3],ymm8[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm15[1],ymm14[1],ymm15[3],ymm14[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm13[1],ymm12[1],ymm13[3],ymm12[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 352(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 992(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 928(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, (%rsp) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 864(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 800(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm1[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 736(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 672(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 608(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 544(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm2[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 352(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %ymm15 |
| ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %ymm10 |
| ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %ymm8 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm8[0],ymm10[0],ymm8[2],ymm10[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm15[0],ymm0[0],ymm15[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm7[2,3],ymm2[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 864(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 800(%rdi), %ymm11 |
| ; AVX2-ONLY-NEXT: vmovaps 992(%rdi), %ymm9 |
| ; AVX2-ONLY-NEXT: vmovaps 928(%rdi), %ymm7 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm9[0],ymm7[2],ymm9[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm11[0],ymm1[0],ymm11[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm12[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm13 |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm6 |
| ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm6[0],ymm13[0],ymm6[2],ymm13[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm12[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 608(%rdi), %ymm5 |
| ; AVX2-ONLY-NEXT: vmovaps 544(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovaps 736(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps 672(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm14[2,3],ymm12[2,3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm6[1],ymm13[1],ymm6[3],ymm13[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm3[2,3],ymm2[2,3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm8[1],ymm10[1],ymm8[3],ymm10[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm3 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm3 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm3[2,3],ymm2[2,3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm7[1],ymm9[1],ymm7[3],ymm9[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm3 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm3 = ymm11[1],mem[1],ymm11[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 16(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 64(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 80(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 112(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 96(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 32(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 48(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 16(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 64(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 80(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 96(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 112(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 32(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 48(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 64(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 96(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 32(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 64(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, (%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 96(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 32(%r8) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 80(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 64(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, (%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 16(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps (%rsp), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 112(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 96(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 32(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 48(%r9) |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 64(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 80(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, (%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 16(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 96(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 112(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 32(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, 48(%rax) |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-ONLY-NEXT: vmovaps %ymm12, 64(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, (%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 96(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 32(%rax) |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 96(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm2, 32(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm13, (%rax) |
| ; AVX2-ONLY-NEXT: addq $808, %rsp # imm = 0x328 |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512F-LABEL: load_i64_stride8_vf16: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: subq $280, %rsp # imm = 0x118 |
| ; AVX512F-NEXT: vmovdqa64 704(%rdi), %zmm19 |
| ; AVX512F-NEXT: vmovdqa64 640(%rdi), %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 576(%rdi), %zmm30 |
| ; AVX512F-NEXT: vmovaps 512(%rdi), %zmm0 |
| ; AVX512F-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 832(%rdi), %zmm16 |
| ; AVX512F-NEXT: vmovdqa64 768(%rdi), %zmm8 |
| ; AVX512F-NEXT: vmovdqa64 960(%rdi), %zmm17 |
| ; AVX512F-NEXT: vmovdqa64 896(%rdi), %zmm9 |
| ; AVX512F-NEXT: vmovdqa64 320(%rdi), %zmm11 |
| ; AVX512F-NEXT: vmovdqa64 256(%rdi), %zmm7 |
| ; AVX512F-NEXT: vmovdqa64 448(%rdi), %zmm10 |
| ; AVX512F-NEXT: vmovdqa64 384(%rdi), %zmm12 |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm18 = [0,8,0,8,0,8,0,8] |
| ; AVX512F-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm20 |
| ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm5 |
| ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512F-NEXT: vpermt2q %zmm10, %zmm18, %zmm3 |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm13 |
| ; AVX512F-NEXT: vpermt2q %zmm11, %zmm18, %zmm13 |
| ; AVX512F-NEXT: movb $-64, %al |
| ; AVX512F-NEXT: kmovw %eax, %k1 |
| ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm13 {%k1} |
| ; AVX512F-NEXT: vmovdqa 192(%rdi), %xmm1 |
| ; AVX512F-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 128(%rdi), %xmm31 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm31[0],xmm1[0] |
| ; AVX512F-NEXT: vmovdqa64 (%rdi), %xmm23 |
| ; AVX512F-NEXT: vmovdqa64 64(%rdi), %xmm21 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm14 = xmm23[0],xmm21[0] |
| ; AVX512F-NEXT: vinserti128 $1, %xmm3, %ymm14, %ymm3 |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm3, %zmm13, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm3 |
| ; AVX512F-NEXT: vpermt2q %zmm17, %zmm18, %zmm3 |
| ; AVX512F-NEXT: vpermi2q %zmm16, %zmm8, %zmm18 |
| ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm18 {%k1} |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10] |
| ; AVX512F-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpermt2q %zmm10, %zmm3, %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm13 |
| ; AVX512F-NEXT: vpermt2q %zmm11, %zmm3, %zmm13 |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm13 {%k1} |
| ; AVX512F-NEXT: vmovdqa 192(%rdi), %ymm0 |
| ; AVX512F-NEXT: vmovdqa 128(%rdi), %ymm14 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm14[0],ymm0[0],ymm14[2],ymm0[2] |
| ; AVX512F-NEXT: vmovdqa64 64(%rdi), %ymm22 |
| ; AVX512F-NEXT: vmovdqa64 (%rdi), %ymm24 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm24[0],ymm22[0],ymm24[2],ymm22[2] |
| ; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm15[2,3],ymm1[2,3] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm1, %zmm13, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm1 |
| ; AVX512F-NEXT: vpermt2q %zmm17, %zmm3, %zmm1 |
| ; AVX512F-NEXT: vpermi2q %zmm16, %zmm8, %zmm3 |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm3 {%k1} |
| ; AVX512F-NEXT: vmovdqa 704(%rdi), %ymm1 |
| ; AVX512F-NEXT: vmovdqa 640(%rdi), %ymm15 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} ymm13 = ymm15[0],ymm1[0],ymm15[2],ymm1[2] |
| ; AVX512F-NEXT: vmovdqa64 576(%rdi), %ymm26 |
| ; AVX512F-NEXT: vmovdqa64 512(%rdi), %ymm28 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} ymm6 = ymm28[0],ymm26[0],ymm28[2],ymm26[2] |
| ; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm6[2,3],ymm13[2,3] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm6, %zmm3, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm29 = [3,11,3,11,3,11,3,11] |
| ; AVX512F-NEXT: # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpermt2q %zmm10, %zmm29, %zmm20 |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm4 |
| ; AVX512F-NEXT: vpermt2q %zmm11, %zmm29, %zmm4 |
| ; AVX512F-NEXT: vmovdqa64 %zmm20, %zmm4 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm25 |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm14[1],ymm0[1],ymm14[3],ymm0[3] |
| ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm13 |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} ymm3 = ymm24[1],ymm22[1],ymm24[3],ymm22[3] |
| ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm27 |
| ; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm3 |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm0, %zmm4, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm0 |
| ; AVX512F-NEXT: vpermt2q %zmm17, %zmm29, %zmm0 |
| ; AVX512F-NEXT: vpermi2q %zmm16, %zmm8, %zmm29 |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm29 {%k1} |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm15[1],ymm1[1],ymm15[3],ymm1[3] |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm28[1],ymm26[1],ymm28[3],ymm26[3] |
| ; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm0, %zmm29, %zmm22 |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12] |
| ; AVX512F-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpermt2q %zmm10, %zmm0, %zmm5 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} zmm1 = zmm7[0],zmm11[0],zmm7[2],zmm11[2],zmm7[4],zmm11[4],zmm7[6],zmm11[6] |
| ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm1 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm4 |
| ; AVX512F-NEXT: vpermt2q %zmm27, %zmm0, %zmm4 |
| ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [4,12,4,12] |
| ; AVX512F-NEXT: # ymm5 = mem[0,1,0,1] |
| ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm14 |
| ; AVX512F-NEXT: vpermt2q %zmm25, %zmm5, %zmm14 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm14[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm4, %zmm1, %zmm24 |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm1 |
| ; AVX512F-NEXT: vpermt2q %zmm17, %zmm0, %zmm1 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} zmm4 = zmm8[0],zmm16[0],zmm8[2],zmm16[2],zmm8[4],zmm16[4],zmm8[6],zmm16[6] |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm4 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload |
| ; AVX512F-NEXT: vpermi2q %zmm30, %zmm6, %zmm0 |
| ; AVX512F-NEXT: vpermi2q %zmm19, %zmm2, %zmm5 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm0, %zmm4, %zmm26 |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm14 = [5,13,5,13,5,13,5,13] |
| ; AVX512F-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm1 |
| ; AVX512F-NEXT: vpermt2q %zmm27, %zmm14, %zmm1 |
| ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [5,13,5,13] |
| ; AVX512F-NEXT: # ymm2 = mem[0,1,0,1] |
| ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm4 |
| ; AVX512F-NEXT: vpermt2q %zmm25, %zmm2, %zmm4 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm28 = [1,9,1,9,1,9,1,9] |
| ; AVX512F-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm29 |
| ; AVX512F-NEXT: vpermt2q %zmm11, %zmm28, %zmm29 |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm5 = [6,14,6,14,6,14,6,14] |
| ; AVX512F-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm4 |
| ; AVX512F-NEXT: vpermt2q %zmm11, %zmm5, %zmm4 |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} zmm15 = zmm7[1],zmm11[1],zmm7[3],zmm11[3],zmm7[5],zmm11[5],zmm7[7],zmm11[7] |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,7,15,7,15,7,15] |
| ; AVX512F-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpermt2q %zmm11, %zmm1, %zmm7 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm12[0],zmm10[0],zmm12[2],zmm10[2],zmm12[4],zmm10[4],zmm12[6],zmm10[6] |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} zmm7 {%k1} = zmm12[1],zmm10[1],zmm12[3],zmm10[3],zmm12[5],zmm10[5],zmm12[7],zmm10[7] |
| ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm11 |
| ; AVX512F-NEXT: vpermt2q %zmm10, %zmm28, %zmm11 |
| ; AVX512F-NEXT: vpermt2q %zmm10, %zmm14, %zmm12 |
| ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm15 {%k1} |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm0, %zmm15, %zmm20 |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm10 |
| ; AVX512F-NEXT: vpermt2q %zmm17, %zmm28, %zmm10 |
| ; AVX512F-NEXT: vpermi2q %zmm16, %zmm8, %zmm28 |
| ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm12 |
| ; AVX512F-NEXT: vpermt2q %zmm16, %zmm5, %zmm12 |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} zmm15 = zmm8[1],zmm16[1],zmm8[3],zmm16[3],zmm8[5],zmm16[5],zmm8[7],zmm16[7] |
| ; AVX512F-NEXT: vpermt2q %zmm16, %zmm1, %zmm8 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} zmm12 {%k1} = zmm9[0],zmm17[0],zmm9[2],zmm17[2],zmm9[4],zmm17[4],zmm9[6],zmm17[6] |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} zmm8 {%k1} = zmm9[1],zmm17[1],zmm9[3],zmm17[3],zmm9[5],zmm17[5],zmm9[7],zmm17[7] |
| ; AVX512F-NEXT: vpermt2q %zmm17, %zmm14, %zmm9 |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm15 {%k1} |
| ; AVX512F-NEXT: vpermi2q %zmm30, %zmm6, %zmm14 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm19, %zmm16 |
| ; AVX512F-NEXT: vpermi2q %zmm19, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm2, %zmm15, %zmm19 |
| ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm9 |
| ; AVX512F-NEXT: vpermt2q %zmm27, %zmm5, %zmm9 |
| ; AVX512F-NEXT: vpermt2q %zmm27, %zmm1, %zmm3 |
| ; AVX512F-NEXT: vpermi2q %zmm30, %zmm6, %zmm5 |
| ; AVX512F-NEXT: vpermt2q %zmm30, %zmm1, %zmm6 |
| ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14] |
| ; AVX512F-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm14 |
| ; AVX512F-NEXT: vpermt2q %zmm25, %zmm1, %zmm14 |
| ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [7,15,7,15] |
| ; AVX512F-NEXT: # ymm15 = mem[0,1,0,1] |
| ; AVX512F-NEXT: vpermt2q %zmm25, %zmm15, %zmm13 |
| ; AVX512F-NEXT: vpermi2q %zmm16, %zmm0, %zmm1 |
| ; AVX512F-NEXT: vpermt2q %zmm16, %zmm15, %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512F-NEXT: vmovdqa 704(%rdi), %xmm15 |
| ; AVX512F-NEXT: vmovdqa64 640(%rdi), %xmm16 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm17 = xmm16[0],xmm15[0] |
| ; AVX512F-NEXT: vmovdqa64 576(%rdi), %xmm25 |
| ; AVX512F-NEXT: vmovdqa64 512(%rdi), %xmm27 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm30 = xmm27[0],xmm25[0] |
| ; AVX512F-NEXT: vinserti32x4 $1, %xmm17, %ymm30, %ymm17 |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm17, %zmm18, %zmm17 |
| ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm29 {%k1} |
| ; AVX512F-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm31, %xmm11 # 16-byte Folded Reload |
| ; AVX512F-NEXT: # xmm11 = xmm31[1],mem[1] |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} xmm18 = xmm23[1],xmm21[1] |
| ; AVX512F-NEXT: vinserti32x4 $1, %xmm11, %ymm18, %ymm11 |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm11, %zmm29, %zmm11 |
| ; AVX512F-NEXT: vmovdqa64 %zmm10, %zmm28 {%k1} |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} xmm10 = xmm16[1],xmm15[1] |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} xmm15 = xmm27[1],xmm25[1] |
| ; AVX512F-NEXT: vinserti128 $1, %xmm10, %ymm15, %ymm10 |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm10, %zmm28, %zmm10 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm14[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm9, %zmm4, %zmm4 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm0, %zmm12, %zmm0 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm13[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm3, %zmm7, %zmm3 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm5, %zmm8, %zmm5 |
| ; AVX512F-NEXT: vmovdqa64 %zmm17, 64(%rsi) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, (%rsi) |
| ; AVX512F-NEXT: vmovdqa64 %zmm10, 64(%rdx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm11, (%rdx) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, 64(%rcx) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, (%rcx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm22, 64(%r8) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, (%r8) |
| ; AVX512F-NEXT: vmovdqa64 %zmm26, 64(%r9) |
| ; AVX512F-NEXT: vmovdqa64 %zmm24, (%r9) |
| ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512F-NEXT: vmovdqa64 %zmm19, 64(%rax) |
| ; AVX512F-NEXT: vmovdqa64 %zmm20, (%rax) |
| ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, 64(%rax) |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, (%rax) |
| ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512F-NEXT: vmovdqa64 %zmm5, 64(%rax) |
| ; AVX512F-NEXT: vmovdqa64 %zmm3, (%rax) |
| ; AVX512F-NEXT: addq $280, %rsp # imm = 0x118 |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: load_i64_stride8_vf16: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: subq $280, %rsp # imm = 0x118 |
| ; AVX512BW-NEXT: vmovdqa64 704(%rdi), %zmm19 |
| ; AVX512BW-NEXT: vmovdqa64 640(%rdi), %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %zmm30 |
| ; AVX512BW-NEXT: vmovaps 512(%rdi), %zmm0 |
| ; AVX512BW-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 832(%rdi), %zmm16 |
| ; AVX512BW-NEXT: vmovdqa64 768(%rdi), %zmm8 |
| ; AVX512BW-NEXT: vmovdqa64 960(%rdi), %zmm17 |
| ; AVX512BW-NEXT: vmovdqa64 896(%rdi), %zmm9 |
| ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm11 |
| ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm7 |
| ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm10 |
| ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm12 |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm18 = [0,8,0,8,0,8,0,8] |
| ; AVX512BW-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm20 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm5 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512BW-NEXT: vpermt2q %zmm10, %zmm18, %zmm3 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm13 |
| ; AVX512BW-NEXT: vpermt2q %zmm11, %zmm18, %zmm13 |
| ; AVX512BW-NEXT: movb $-64, %al |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm13 {%k1} |
| ; AVX512BW-NEXT: vmovdqa 192(%rdi), %xmm1 |
| ; AVX512BW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %xmm31 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm31[0],xmm1[0] |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %xmm23 |
| ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %xmm21 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm14 = xmm23[0],xmm21[0] |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm3, %ymm14, %ymm3 |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm3, %zmm13, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm3 |
| ; AVX512BW-NEXT: vpermt2q %zmm17, %zmm18, %zmm3 |
| ; AVX512BW-NEXT: vpermi2q %zmm16, %zmm8, %zmm18 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm18 {%k1} |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10] |
| ; AVX512BW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vpermt2q %zmm10, %zmm3, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm13 |
| ; AVX512BW-NEXT: vpermt2q %zmm11, %zmm3, %zmm13 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm13 {%k1} |
| ; AVX512BW-NEXT: vmovdqa 192(%rdi), %ymm0 |
| ; AVX512BW-NEXT: vmovdqa 128(%rdi), %ymm14 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm14[0],ymm0[0],ymm14[2],ymm0[2] |
| ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %ymm22 |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %ymm24 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm24[0],ymm22[0],ymm24[2],ymm22[2] |
| ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm15[2,3],ymm1[2,3] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm1, %zmm13, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm1 |
| ; AVX512BW-NEXT: vpermt2q %zmm17, %zmm3, %zmm1 |
| ; AVX512BW-NEXT: vpermi2q %zmm16, %zmm8, %zmm3 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm3 {%k1} |
| ; AVX512BW-NEXT: vmovdqa 704(%rdi), %ymm1 |
| ; AVX512BW-NEXT: vmovdqa 640(%rdi), %ymm15 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} ymm13 = ymm15[0],ymm1[0],ymm15[2],ymm1[2] |
| ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %ymm26 |
| ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %ymm28 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} ymm6 = ymm28[0],ymm26[0],ymm28[2],ymm26[2] |
| ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm6[2,3],ymm13[2,3] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm6, %zmm3, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm29 = [3,11,3,11,3,11,3,11] |
| ; AVX512BW-NEXT: # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vpermt2q %zmm10, %zmm29, %zmm20 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm4 |
| ; AVX512BW-NEXT: vpermt2q %zmm11, %zmm29, %zmm4 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm20, %zmm4 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm25 |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm14[1],ymm0[1],ymm14[3],ymm0[3] |
| ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm13 |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} ymm3 = ymm24[1],ymm22[1],ymm24[3],ymm22[3] |
| ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm27 |
| ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm3 |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm0, %zmm4, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm0 |
| ; AVX512BW-NEXT: vpermt2q %zmm17, %zmm29, %zmm0 |
| ; AVX512BW-NEXT: vpermi2q %zmm16, %zmm8, %zmm29 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm29 {%k1} |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm15[1],ymm1[1],ymm15[3],ymm1[3] |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm28[1],ymm26[1],ymm28[3],ymm26[3] |
| ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm0, %zmm29, %zmm22 |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12] |
| ; AVX512BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vpermt2q %zmm10, %zmm0, %zmm5 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} zmm1 = zmm7[0],zmm11[0],zmm7[2],zmm11[2],zmm7[4],zmm11[4],zmm7[6],zmm11[6] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm1 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm4 |
| ; AVX512BW-NEXT: vpermt2q %zmm27, %zmm0, %zmm4 |
| ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [4,12,4,12] |
| ; AVX512BW-NEXT: # ymm5 = mem[0,1,0,1] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm14 |
| ; AVX512BW-NEXT: vpermt2q %zmm25, %zmm5, %zmm14 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm14[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm4, %zmm1, %zmm24 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm1 |
| ; AVX512BW-NEXT: vpermt2q %zmm17, %zmm0, %zmm1 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} zmm4 = zmm8[0],zmm16[0],zmm8[2],zmm16[2],zmm8[4],zmm16[4],zmm8[6],zmm16[6] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm4 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload |
| ; AVX512BW-NEXT: vpermi2q %zmm30, %zmm6, %zmm0 |
| ; AVX512BW-NEXT: vpermi2q %zmm19, %zmm2, %zmm5 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm0, %zmm4, %zmm26 |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm14 = [5,13,5,13,5,13,5,13] |
| ; AVX512BW-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm1 |
| ; AVX512BW-NEXT: vpermt2q %zmm27, %zmm14, %zmm1 |
| ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [5,13,5,13] |
| ; AVX512BW-NEXT: # ymm2 = mem[0,1,0,1] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm4 |
| ; AVX512BW-NEXT: vpermt2q %zmm25, %zmm2, %zmm4 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm28 = [1,9,1,9,1,9,1,9] |
| ; AVX512BW-NEXT: # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm29 |
| ; AVX512BW-NEXT: vpermt2q %zmm11, %zmm28, %zmm29 |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm5 = [6,14,6,14,6,14,6,14] |
| ; AVX512BW-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm4 |
| ; AVX512BW-NEXT: vpermt2q %zmm11, %zmm5, %zmm4 |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} zmm15 = zmm7[1],zmm11[1],zmm7[3],zmm11[3],zmm7[5],zmm11[5],zmm7[7],zmm11[7] |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,7,15,7,15,7,15] |
| ; AVX512BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vpermt2q %zmm11, %zmm1, %zmm7 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm12[0],zmm10[0],zmm12[2],zmm10[2],zmm12[4],zmm10[4],zmm12[6],zmm10[6] |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} zmm7 {%k1} = zmm12[1],zmm10[1],zmm12[3],zmm10[3],zmm12[5],zmm10[5],zmm12[7],zmm10[7] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm11 |
| ; AVX512BW-NEXT: vpermt2q %zmm10, %zmm28, %zmm11 |
| ; AVX512BW-NEXT: vpermt2q %zmm10, %zmm14, %zmm12 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm15 {%k1} |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm0, %zmm15, %zmm20 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm10 |
| ; AVX512BW-NEXT: vpermt2q %zmm17, %zmm28, %zmm10 |
| ; AVX512BW-NEXT: vpermi2q %zmm16, %zmm8, %zmm28 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm12 |
| ; AVX512BW-NEXT: vpermt2q %zmm16, %zmm5, %zmm12 |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} zmm15 = zmm8[1],zmm16[1],zmm8[3],zmm16[3],zmm8[5],zmm16[5],zmm8[7],zmm16[7] |
| ; AVX512BW-NEXT: vpermt2q %zmm16, %zmm1, %zmm8 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} zmm12 {%k1} = zmm9[0],zmm17[0],zmm9[2],zmm17[2],zmm9[4],zmm17[4],zmm9[6],zmm17[6] |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} zmm8 {%k1} = zmm9[1],zmm17[1],zmm9[3],zmm17[3],zmm9[5],zmm17[5],zmm9[7],zmm17[7] |
| ; AVX512BW-NEXT: vpermt2q %zmm17, %zmm14, %zmm9 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm15 {%k1} |
| ; AVX512BW-NEXT: vpermi2q %zmm30, %zmm6, %zmm14 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm19, %zmm16 |
| ; AVX512BW-NEXT: vpermi2q %zmm19, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm2, %zmm15, %zmm19 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm9 |
| ; AVX512BW-NEXT: vpermt2q %zmm27, %zmm5, %zmm9 |
| ; AVX512BW-NEXT: vpermt2q %zmm27, %zmm1, %zmm3 |
| ; AVX512BW-NEXT: vpermi2q %zmm30, %zmm6, %zmm5 |
| ; AVX512BW-NEXT: vpermt2q %zmm30, %zmm1, %zmm6 |
| ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14] |
| ; AVX512BW-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm14 |
| ; AVX512BW-NEXT: vpermt2q %zmm25, %zmm1, %zmm14 |
| ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [7,15,7,15] |
| ; AVX512BW-NEXT: # ymm15 = mem[0,1,0,1] |
| ; AVX512BW-NEXT: vpermt2q %zmm25, %zmm15, %zmm13 |
| ; AVX512BW-NEXT: vpermi2q %zmm16, %zmm0, %zmm1 |
| ; AVX512BW-NEXT: vpermt2q %zmm16, %zmm15, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vmovdqa 704(%rdi), %xmm15 |
| ; AVX512BW-NEXT: vmovdqa64 640(%rdi), %xmm16 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm17 = xmm16[0],xmm15[0] |
| ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %xmm25 |
| ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %xmm27 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm30 = xmm27[0],xmm25[0] |
| ; AVX512BW-NEXT: vinserti32x4 $1, %xmm17, %ymm30, %ymm17 |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm17, %zmm18, %zmm17 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm29 {%k1} |
| ; AVX512BW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm31, %xmm11 # 16-byte Folded Reload |
| ; AVX512BW-NEXT: # xmm11 = xmm31[1],mem[1] |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} xmm18 = xmm23[1],xmm21[1] |
| ; AVX512BW-NEXT: vinserti32x4 $1, %xmm11, %ymm18, %ymm11 |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm11, %zmm29, %zmm11 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm28 {%k1} |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} xmm10 = xmm16[1],xmm15[1] |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} xmm15 = xmm27[1],xmm25[1] |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm10, %ymm15, %ymm10 |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm10, %zmm28, %zmm10 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm14[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm9, %zmm4, %zmm4 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm0, %zmm12, %zmm0 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm13[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm3, %zmm7, %zmm3 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm5, %zmm8, %zmm5 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm17, 64(%rsi) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, (%rsi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm10, 64(%rdx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm11, (%rdx) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, 64(%rcx) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, (%rcx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm22, 64(%r8) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, (%r8) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm26, 64(%r9) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm24, (%r9) |
| ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512BW-NEXT: vmovdqa64 %zmm19, 64(%rax) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm20, (%rax) |
| ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, 64(%rax) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, (%rax) |
| ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, 64(%rax) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm3, (%rax) |
| ; AVX512BW-NEXT: addq $280, %rsp # imm = 0x118 |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %wide.vec = load <128 x i64>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <128 x i64> %wide.vec, <128 x i64> poison, <16 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 40, i32 48, i32 56, i32 64, i32 72, i32 80, i32 88, i32 96, i32 104, i32 112, i32 120> |
| %strided.vec1 = shufflevector <128 x i64> %wide.vec, <128 x i64> poison, <16 x i32> <i32 1, i32 9, i32 17, i32 25, i32 33, i32 41, i32 49, i32 57, i32 65, i32 73, i32 81, i32 89, i32 97, i32 105, i32 113, i32 121> |
| %strided.vec2 = shufflevector <128 x i64> %wide.vec, <128 x i64> poison, <16 x i32> <i32 2, i32 10, i32 18, i32 26, i32 34, i32 42, i32 50, i32 58, i32 66, i32 74, i32 82, i32 90, i32 98, i32 106, i32 114, i32 122> |
| %strided.vec3 = shufflevector <128 x i64> %wide.vec, <128 x i64> poison, <16 x i32> <i32 3, i32 11, i32 19, i32 27, i32 35, i32 43, i32 51, i32 59, i32 67, i32 75, i32 83, i32 91, i32 99, i32 107, i32 115, i32 123> |
| %strided.vec4 = shufflevector <128 x i64> %wide.vec, <128 x i64> poison, <16 x i32> <i32 4, i32 12, i32 20, i32 28, i32 36, i32 44, i32 52, i32 60, i32 68, i32 76, i32 84, i32 92, i32 100, i32 108, i32 116, i32 124> |
| %strided.vec5 = shufflevector <128 x i64> %wide.vec, <128 x i64> poison, <16 x i32> <i32 5, i32 13, i32 21, i32 29, i32 37, i32 45, i32 53, i32 61, i32 69, i32 77, i32 85, i32 93, i32 101, i32 109, i32 117, i32 125> |
| %strided.vec6 = shufflevector <128 x i64> %wide.vec, <128 x i64> poison, <16 x i32> <i32 6, i32 14, i32 22, i32 30, i32 38, i32 46, i32 54, i32 62, i32 70, i32 78, i32 86, i32 94, i32 102, i32 110, i32 118, i32 126> |
| %strided.vec7 = shufflevector <128 x i64> %wide.vec, <128 x i64> poison, <16 x i32> <i32 7, i32 15, i32 23, i32 31, i32 39, i32 47, i32 55, i32 63, i32 71, i32 79, i32 87, i32 95, i32 103, i32 111, i32 119, i32 127> |
| store <16 x i64> %strided.vec0, ptr %out.vec0, align 64 |
| store <16 x i64> %strided.vec1, ptr %out.vec1, align 64 |
| store <16 x i64> %strided.vec2, ptr %out.vec2, align 64 |
| store <16 x i64> %strided.vec3, ptr %out.vec3, align 64 |
| store <16 x i64> %strided.vec4, ptr %out.vec4, align 64 |
| store <16 x i64> %strided.vec5, ptr %out.vec5, align 64 |
| store <16 x i64> %strided.vec6, ptr %out.vec6, align 64 |
| store <16 x i64> %strided.vec7, ptr %out.vec7, align 64 |
| ret void |
| } |
| |
| define void @load_i64_stride8_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5, ptr %out.vec6, ptr %out.vec7) nounwind { |
| ; SSE-LABEL: load_i64_stride8_vf32: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: subq $1688, %rsp # imm = 0x698 |
| ; SSE-NEXT: movaps 832(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 320(%rdi), %xmm1 |
| ; SSE-NEXT: movaps 256(%rdi), %xmm8 |
| ; SSE-NEXT: movaps 960(%rdi), %xmm2 |
| ; SSE-NEXT: movaps 896(%rdi), %xmm10 |
| ; SSE-NEXT: movaps 448(%rdi), %xmm3 |
| ; SSE-NEXT: movaps 384(%rdi), %xmm9 |
| ; SSE-NEXT: movaps 576(%rdi), %xmm4 |
| ; SSE-NEXT: movaps 512(%rdi), %xmm12 |
| ; SSE-NEXT: movaps 64(%rdi), %xmm5 |
| ; SSE-NEXT: movaps (%rdi), %xmm11 |
| ; SSE-NEXT: movaps 704(%rdi), %xmm6 |
| ; SSE-NEXT: movaps 640(%rdi), %xmm14 |
| ; SSE-NEXT: movaps 192(%rdi), %xmm7 |
| ; SSE-NEXT: movaps 128(%rdi), %xmm13 |
| ; SSE-NEXT: movaps %xmm13, %xmm15 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm7[0] |
| ; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm7[1] |
| ; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm11, %xmm7 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm5[0] |
| ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm5[1] |
| ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm9, %xmm5 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm3[0] |
| ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm3[1] |
| ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm8, %xmm3 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm1[0] |
| ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm1[1] |
| ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm14, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm6[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm6[1] |
| ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm12, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm4[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm4[1] |
| ; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm10, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 768(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1216(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1152(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1088(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1024(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1472(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1408(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1344(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1280(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1728(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1664(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1600(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1536(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1984(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1920(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1856(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1792(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 80(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 16(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 208(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 144(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 336(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 272(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 464(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 400(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 592(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 528(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 720(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 656(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 848(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 784(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 976(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 912(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1104(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1040(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1232(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1168(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1360(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1296(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1488(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1424(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1616(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1552(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1744(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1680(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1872(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1808(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2000(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1936(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 96(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 32(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 224(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 160(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 352(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 288(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 480(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 416(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 608(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 544(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 736(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 672(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 864(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 800(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 992(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 928(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1120(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1056(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1248(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1184(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1376(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1312(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1504(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1440(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1632(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1568(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1760(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1696(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1888(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1824(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2016(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1952(%rdi), %xmm14 |
| ; SSE-NEXT: movaps %xmm14, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm0[1] |
| ; SSE-NEXT: movaps 112(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 48(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 240(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 176(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 368(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 304(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 496(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 432(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 624(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 560(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 752(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 688(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 880(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 816(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1008(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 944(%rdi), %xmm15 |
| ; SSE-NEXT: movaps %xmm15, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm15 = xmm15[1],xmm0[1] |
| ; SSE-NEXT: movaps 1136(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1072(%rdi), %xmm12 |
| ; SSE-NEXT: movaps %xmm12, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm0[1] |
| ; SSE-NEXT: movaps 1264(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1200(%rdi), %xmm10 |
| ; SSE-NEXT: movaps %xmm10, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm0[1] |
| ; SSE-NEXT: movaps 1392(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1328(%rdi), %xmm6 |
| ; SSE-NEXT: movaps %xmm6, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm0[1] |
| ; SSE-NEXT: movaps 1520(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1456(%rdi), %xmm8 |
| ; SSE-NEXT: movaps %xmm8, %xmm13 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm13 = xmm13[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm0[1] |
| ; SSE-NEXT: movaps 1648(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1584(%rdi), %xmm7 |
| ; SSE-NEXT: movaps %xmm7, %xmm11 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm11 = xmm11[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm0[1] |
| ; SSE-NEXT: movaps 1776(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1712(%rdi), %xmm2 |
| ; SSE-NEXT: movaps %xmm2, %xmm9 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm9 = xmm9[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] |
| ; SSE-NEXT: movaps 1904(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1840(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm5 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps 2032(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1968(%rdi), %xmm3 |
| ; SSE-NEXT: movaps %xmm3, %xmm4 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 224(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 160(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 240(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 176(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 192(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 128(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 208(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 144(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 224(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 240(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 192(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 208(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 160(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 176(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 128(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 144(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 240(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 224(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 208(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 192(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 176(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 160(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 144(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 128(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 240(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 224(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 208(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 192(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 176(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 160(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 144(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 128(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 240(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 224(%r9) |
| ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 208(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 192(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 176(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 160(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 144(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 128(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%r9) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movaps %xmm14, 240(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 224(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 208(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 192(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 176(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 160(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 144(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 128(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rax) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movaps %xmm4, 240(%rax) |
| ; SSE-NEXT: movaps %xmm5, 224(%rax) |
| ; SSE-NEXT: movaps %xmm9, 208(%rax) |
| ; SSE-NEXT: movaps %xmm11, 192(%rax) |
| ; SSE-NEXT: movaps %xmm13, 176(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 160(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 144(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 128(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rax) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movaps %xmm3, 240(%rax) |
| ; SSE-NEXT: movaps %xmm1, 224(%rax) |
| ; SSE-NEXT: movaps %xmm2, 208(%rax) |
| ; SSE-NEXT: movaps %xmm7, 192(%rax) |
| ; SSE-NEXT: movaps %xmm8, 176(%rax) |
| ; SSE-NEXT: movaps %xmm6, 160(%rax) |
| ; SSE-NEXT: movaps %xmm10, 144(%rax) |
| ; SSE-NEXT: movaps %xmm12, 128(%rax) |
| ; SSE-NEXT: movaps %xmm15, 112(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rax) |
| ; SSE-NEXT: addq $1688, %rsp # imm = 0x698 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i64_stride8_vf32: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: subq $2296, %rsp # imm = 0x8F8 |
| ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 704(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps 640(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm4 = xmm3[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1216(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vmovaps 1152(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm4 = xmm7[0],xmm6[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm8 = xmm5[0],xmm4[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vmovaps 512(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vmovaps 1088(%rdi), %xmm10 |
| ; AVX1-ONLY-NEXT: vmovaps 1024(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm7[1],xmm6[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm6 = xmm11[0],xmm10[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm11[1],xmm10[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm6 = xmm9[0],xmm8[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm3[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1600(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm9[1],xmm8[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1728(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1664(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm5[1],xmm4[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm3[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1536(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 320(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 960(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 896(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 832(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 768(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1472(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1408(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1344(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1280(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1984(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1920(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1856(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1792(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1728(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1664(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 1616(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1552(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %ymm13 |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm13[0],ymm0[0],ymm13[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 80(%rdi), %xmm14 |
| ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm11[0],xmm14[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 704(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 640(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 592(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 528(%rdi), %xmm10 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm4 = xmm10[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1216(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1152(%rdi), %ymm15 |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm8 = ymm15[0],ymm0[0],ymm15[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 1104(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1040(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm9 = xmm7[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1984(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1920(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm8 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 1872(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1808(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm9 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1472(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1408(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 1360(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1296(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm9 = xmm2[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 960(%rdi), %ymm6 |
| ; AVX1-ONLY-NEXT: vmovaps 896(%rdi), %ymm9 |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm9[0],ymm6[0],ymm9[2],ymm6[2] |
| ; AVX1-ONLY-NEXT: vmovaps 848(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovaps 784(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm8 = xmm4[0],xmm5[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %ymm3 |
| ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %ymm8 |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm8[0],ymm3[0],ymm8[2],ymm3[2] |
| ; AVX1-ONLY-NEXT: vmovaps 336(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps 272(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm12 = xmm1[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm11 = xmm11[1],xmm14[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm10 = xmm10[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm7 = xmm7[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm7 = xmm7[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm8[1],ymm3[1],ymm8[3],ymm3[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm9[1],ymm6[1],ymm9[3],ymm6[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm4[1],xmm5[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 352(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 992(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 928(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 864(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 800(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1120(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1056(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1248(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1184(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1504(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1440(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1376(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1312(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 736(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 672(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 608(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 544(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1632(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1568(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1760(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1696(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1888(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1824(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, (%rsp) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2016(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1952(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] |
| ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 48(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %ymm2 |
| ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2] |
| ; AVX1-ONLY-NEXT: vmovaps 368(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 304(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm3 = xmm2[0],xmm3[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 736(%rdi), %ymm3 |
| ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 672(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2] |
| ; AVX1-ONLY-NEXT: vmovaps 624(%rdi), %xmm10 |
| ; AVX1-ONLY-NEXT: vmovaps 560(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm4 = xmm3[0],xmm10[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 992(%rdi), %ymm4 |
| ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 928(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[2],ymm4[2] |
| ; AVX1-ONLY-NEXT: vmovaps 880(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vmovaps 816(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm12 = xmm4[0],xmm11[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1248(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1184(%rdi), %ymm5 |
| ; AVX1-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm5[0],ymm0[0],ymm5[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 1136(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vmovaps 1072(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm13 = xmm5[0],xmm12[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1504(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1440(%rdi), %ymm6 |
| ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm6[0],ymm0[0],ymm6[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 1392(%rdi), %xmm13 |
| ; AVX1-ONLY-NEXT: vmovaps 1328(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm14 = xmm6[0],xmm13[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1760(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1696(%rdi), %ymm7 |
| ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm0[0],ymm7[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 1648(%rdi), %xmm14 |
| ; AVX1-ONLY-NEXT: vmovaps 1584(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm15 = xmm7[0],xmm14[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2016(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1952(%rdi), %ymm8 |
| ; AVX1-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm8[0],ymm0[0],ymm8[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 1904(%rdi), %xmm15 |
| ; AVX1-ONLY-NEXT: vmovaps 1840(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm9 = xmm8[0],xmm15[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm2[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm3[1],xmm10[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm4[1],xmm11[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm5[1],xmm12[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm13[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm7[1],xmm14[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm8[1],xmm15[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 240(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 224(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 32(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 160(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 96(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 48(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 176(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 112(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 208(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 192(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 128(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 64(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 144(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 80(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 16(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 224(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 240(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 160(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 176(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 96(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 112(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 32(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 48(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 192(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 208(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 16(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 64(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 80(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 128(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 144(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 32(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 96(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 160(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 224(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 128(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 64(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 192(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 224(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 160(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 96(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 32(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 192(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 128(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 64(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, (%r8) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 240(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps (%rsp), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 224(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 48(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 160(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 176(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 96(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 112(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 32(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 16(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, (%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 192(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 208(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 128(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 144(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 64(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 80(%r9) |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 240(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 224(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 208(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 192(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, (%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 16(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 64(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 80(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 160(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 176(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 144(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 128(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 96(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 112(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 32(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm5, 48(%rax) |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 224(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 192(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 160(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 128(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 96(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 64(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 32(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, (%rax) |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 192(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm2, 160(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 128(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm10, 64(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm9, 32(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax) |
| ; AVX1-ONLY-NEXT: addq $2296, %rsp # imm = 0x8F8 |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i64_stride8_vf32: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: subq $2408, %rsp # imm = 0x968 |
| ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 704(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps 640(%rdi), %xmm3 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm4 = xmm3[0],xmm2[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1216(%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vmovaps 1152(%rdi), %xmm6 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm5 = xmm6[0],xmm4[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 576(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vmovaps 512(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm8 = xmm7[0],xmm5[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1088(%rdi), %xmm9 |
| ; AVX2-ONLY-NEXT: vmovaps 1024(%rdi), %xmm10 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm8 = xmm10[0],xmm9[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1600(%rdi), %xmm8 |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm6[1],xmm4[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1536(%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm10[1],xmm9[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm6 = xmm4[0],xmm8[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm3[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1728(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm7[1],xmm5[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1664(%rdi), %xmm3 |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm3[0],xmm2[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm3[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 448(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm4[1],xmm8[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 384(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 960(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 896(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 832(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 768(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1472(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1408(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1344(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1280(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1984(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1920(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1856(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1792(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm1[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1600(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1536(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1728(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1664(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm5[2,3],ymm4[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm9 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm9[2,3],ymm8[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 576(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 512(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 704(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 640(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm3[0],ymm2[0],ymm3[2],ymm2[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm13[2,3],ymm12[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1088(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1024(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1216(%rdi), %ymm14 |
| ; AVX2-ONLY-NEXT: vmovaps 1152(%rdi), %ymm15 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm15[0],ymm14[0],ymm15[2],ymm14[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1856(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1792(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1984(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1920(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm4[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1344(%rdi), %ymm12 |
| ; AVX2-ONLY-NEXT: vmovaps 1280(%rdi), %ymm11 |
| ; AVX2-ONLY-NEXT: vmovaps 1472(%rdi), %ymm10 |
| ; AVX2-ONLY-NEXT: vmovaps 1408(%rdi), %ymm8 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm8[0],ymm10[0],ymm8[2],ymm10[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm11[0],ymm12[0],ymm11[2],ymm12[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm6[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 832(%rdi), %ymm7 |
| ; AVX2-ONLY-NEXT: vmovaps 768(%rdi), %ymm6 |
| ; AVX2-ONLY-NEXT: vmovaps 960(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovaps 896(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm3[0],ymm4[0],ymm3[2],ymm4[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm9 = ymm6[0],ymm7[0],ymm6[2],ymm7[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm9[2,3],ymm2[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %ymm9 |
| ; AVX2-ONLY-NEXT: vmovaps 448(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps 384(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm9[0],ymm2[0],ymm9[2],ymm2[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm13[2,3],ymm5[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm5 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm13[2,3],ymm5[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm5 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm13[2,3],ymm5[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm5 = ymm15[1],ymm14[1],ymm15[3],ymm14[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm13[2,3],ymm5[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm5 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm13[2,3],ymm5[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm9[1],ymm2[1],ymm9[3],ymm2[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm3[1],ymm4[1],ymm3[3],ymm4[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm6[1],ymm7[1],ymm6[3],ymm7[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm8[1],ymm10[1],ymm8[3],ymm10[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm11[1],ymm12[1],ymm11[3],ymm12[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 352(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 992(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 928(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 864(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 800(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1120(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1056(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1248(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1184(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1504(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1440(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1376(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1312(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 736(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 672(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 608(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 544(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm1[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1632(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1568(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1760(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1696(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, (%rsp) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1888(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1824(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2016(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1952(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm2[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm7[2,3],ymm4[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 352(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm7[2,3],ymm4[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 608(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 544(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 736(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 672(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm7[2,3],ymm4[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 864(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 800(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 992(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 928(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm7[2,3],ymm4[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1120(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1056(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1248(%rdi), %ymm15 |
| ; AVX2-ONLY-NEXT: vmovaps 1184(%rdi), %ymm14 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm14[0],ymm15[0],ymm14[2],ymm15[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm7[2,3],ymm4[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1376(%rdi), %ymm13 |
| ; AVX2-ONLY-NEXT: vmovaps 1312(%rdi), %ymm12 |
| ; AVX2-ONLY-NEXT: vmovaps 1504(%rdi), %ymm11 |
| ; AVX2-ONLY-NEXT: vmovaps 1440(%rdi), %ymm10 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm10[0],ymm11[0],ymm10[2],ymm11[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm12[0],ymm13[0],ymm12[2],ymm13[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm7[2,3],ymm4[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1632(%rdi), %ymm9 |
| ; AVX2-ONLY-NEXT: vmovaps 1568(%rdi), %ymm8 |
| ; AVX2-ONLY-NEXT: vmovaps 1760(%rdi), %ymm6 |
| ; AVX2-ONLY-NEXT: vmovaps 1696(%rdi), %ymm5 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm5[0],ymm6[0],ymm5[2],ymm6[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm8[0],ymm9[0],ymm8[2],ymm9[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm7[2,3],ymm4[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1888(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovaps 1824(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovaps 2016(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovaps 1952(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm7[2,3],ymm4[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm7 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm7 = ymm4[1],mem[1],ymm4[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm4 = ymm4[1],mem[1],ymm4[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3],ymm7[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm4 = ymm4[1],mem[1],ymm4[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm7 = ymm7[1],mem[1],ymm7[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm7[2,3],ymm4[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm4 = ymm4[1],mem[1],ymm4[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm7 = ymm7[1],mem[1],ymm7[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm7[2,3],ymm4[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm4 = ymm4[1],mem[1],ymm4[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm7 = ymm7[1],mem[1],ymm7[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm7[2,3],ymm4[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm14[1],ymm15[1],ymm14[3],ymm15[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm7 = ymm7[1],mem[1],ymm7[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm7[2,3],ymm4[2,3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm10[1],ymm11[1],ymm10[3],ymm11[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm12[1],ymm13[1],ymm12[3],ymm13[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm7[2,3],ymm4[2,3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm5[1],ymm6[1],ymm5[3],ymm6[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm5 = ymm8[1],ymm9[1],ymm8[3],ymm9[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm5[2,3],ymm4[2,3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 240(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 224(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 32(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 160(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 96(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 48(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 176(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 112(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 208(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 192(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 128(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 64(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 144(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 80(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 16(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 224(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 240(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 160(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 176(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 96(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 112(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 32(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 48(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 192(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 208(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 16(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 64(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 80(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 128(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 144(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%r8) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 240(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 224(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 48(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 160(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 176(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 96(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 112(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 32(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 16(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, (%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 192(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 208(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 128(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 144(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 64(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 80(%r9) |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 240(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 224(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 208(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 192(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, (%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 16(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 64(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 80(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 160(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 176(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 144(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 128(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 96(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 112(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 32(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 48(%rax) |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rax) |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 224(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm5, 192(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm10, 160(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm14, 128(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 96(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 64(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 32(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%rax) |
| ; AVX2-ONLY-NEXT: addq $2408, %rsp # imm = 0x968 |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512F-LABEL: load_i64_stride8_vf32: |
| ; AVX512F: # %bb.0: |
| ; AVX512F-NEXT: subq $2568, %rsp # imm = 0xA08 |
| ; AVX512F-NEXT: vmovdqa64 1856(%rdi), %zmm21 |
| ; AVX512F-NEXT: vmovdqa64 1984(%rdi), %zmm24 |
| ; AVX512F-NEXT: vmovdqa64 1344(%rdi), %zmm19 |
| ; AVX512F-NEXT: vmovdqa64 1280(%rdi), %zmm31 |
| ; AVX512F-NEXT: vmovdqa64 1472(%rdi), %zmm26 |
| ; AVX512F-NEXT: vmovdqa64 1408(%rdi), %zmm27 |
| ; AVX512F-NEXT: vmovdqa64 832(%rdi), %zmm22 |
| ; AVX512F-NEXT: vmovdqa64 768(%rdi), %zmm9 |
| ; AVX512F-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 960(%rdi), %zmm30 |
| ; AVX512F-NEXT: vmovdqa64 896(%rdi), %zmm10 |
| ; AVX512F-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 320(%rdi), %zmm12 |
| ; AVX512F-NEXT: vmovdqa64 256(%rdi), %zmm6 |
| ; AVX512F-NEXT: vmovdqa64 448(%rdi), %zmm20 |
| ; AVX512F-NEXT: vmovdqa64 384(%rdi), %zmm29 |
| ; AVX512F-NEXT: movb $-64, %al |
| ; AVX512F-NEXT: kmovw %eax, %k1 |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,2,10,2,10,2,10] |
| ; AVX512F-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm27, %zmm1 |
| ; AVX512F-NEXT: vpermt2q %zmm26, %zmm0, %zmm1 |
| ; AVX512F-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 1216(%rdi), %ymm28 |
| ; AVX512F-NEXT: vmovdqa 1152(%rdi), %ymm1 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} ymm4 = ymm1[0],ymm28[0],ymm1[2],ymm28[2] |
| ; AVX512F-NEXT: vmovdqa64 1088(%rdi), %ymm25 |
| ; AVX512F-NEXT: vmovdqa 1024(%rdi), %ymm5 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} ymm7 = ymm5[0],ymm25[0],ymm5[2],ymm25[2] |
| ; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm7[2,3],ymm4[2,3] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm4, %zmm2, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm10, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm30, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm4 |
| ; AVX512F-NEXT: vpermt2q %zmm22, %zmm0, %zmm4 |
| ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm4 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 704(%rdi), %ymm23 |
| ; AVX512F-NEXT: vmovdqa64 640(%rdi), %ymm17 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} ymm8 = ymm17[0],ymm23[0],ymm17[2],ymm23[2] |
| ; AVX512F-NEXT: vmovdqa 576(%rdi), %ymm2 |
| ; AVX512F-NEXT: vmovdqa 512(%rdi), %ymm10 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} ymm11 = ymm10[0],ymm2[0],ymm10[2],ymm2[2] |
| ; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm8 = ymm11[2,3],ymm8[2,3] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm8, %zmm4, %zmm4 |
| ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm29, %zmm4 |
| ; AVX512F-NEXT: vpermt2q %zmm20, %zmm0, %zmm4 |
| ; AVX512F-NEXT: vmovdqa64 %zmm6, %zmm8 |
| ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm7 |
| ; AVX512F-NEXT: vpermt2q %zmm12, %zmm0, %zmm8 |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm8 {%k1} |
| ; AVX512F-NEXT: vmovdqa 192(%rdi), %ymm9 |
| ; AVX512F-NEXT: vmovdqa 128(%rdi), %ymm11 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} ymm12 = ymm11[0],ymm9[0],ymm11[2],ymm9[2] |
| ; AVX512F-NEXT: vmovdqa 64(%rdi), %ymm4 |
| ; AVX512F-NEXT: vmovdqa64 (%rdi), %ymm16 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm16[0],ymm4[0],ymm16[2],ymm4[2] |
| ; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm12 = ymm15[2,3],ymm12[2,3] |
| ; AVX512F-NEXT: vmovdqa64 1920(%rdi), %zmm13 |
| ; AVX512F-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm12, %zmm8, %zmm8 |
| ; AVX512F-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm8 |
| ; AVX512F-NEXT: vpermt2q %zmm24, %zmm0, %zmm8 |
| ; AVX512F-NEXT: vmovdqa64 1792(%rdi), %zmm12 |
| ; AVX512F-NEXT: vmovdqu64 %zmm12, (%rsp) # 64-byte Spill |
| ; AVX512F-NEXT: vpermi2q %zmm21, %zmm12, %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm0 {%k1} |
| ; AVX512F-NEXT: vmovdqa 1728(%rdi), %ymm14 |
| ; AVX512F-NEXT: vmovdqa 1664(%rdi), %ymm12 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm12[0],ymm14[0],ymm12[2],ymm14[2] |
| ; AVX512F-NEXT: vmovdqa64 1600(%rdi), %ymm18 |
| ; AVX512F-NEXT: vmovdqa 1536(%rdi), %ymm8 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} ymm13 = ymm8[0],ymm18[0],ymm8[2],ymm18[2] |
| ; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm15[2,3] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm13, %zmm0, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [3,11,3,11,3,11,3,11] |
| ; AVX512F-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm27, %zmm13 |
| ; AVX512F-NEXT: vpermt2q %zmm26, %zmm0, %zmm13 |
| ; AVX512F-NEXT: vmovdqa64 %zmm31, %zmm15 |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm0, %zmm15 |
| ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm15 {%k1} |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm1[1],ymm28[1],ymm1[3],ymm28[3] |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} ymm3 = ymm5[1],ymm25[1],ymm5[3],ymm25[3] |
| ; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],ymm1[2,3] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm1, %zmm15, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm1 |
| ; AVX512F-NEXT: vpermt2q %zmm30, %zmm0, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm3 |
| ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm13 |
| ; AVX512F-NEXT: vpermt2q %zmm22, %zmm0, %zmm3 |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm3 {%k1} |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm17[1],ymm23[1],ymm17[3],ymm23[3] |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} ymm2 = ymm10[1],ymm2[1],ymm10[3],ymm2[3] |
| ; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm1, %zmm3, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm29, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm20, %zmm0, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm6, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1} |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm11[1],ymm9[1],ymm11[3],ymm9[3] |
| ; AVX512F-NEXT: vmovdqa64 1216(%rdi), %zmm9 |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} ymm3 = ymm16[1],ymm4[1],ymm16[3],ymm4[3] |
| ; AVX512F-NEXT: vmovdqa64 1152(%rdi), %zmm10 |
| ; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],ymm1[2,3] |
| ; AVX512F-NEXT: vmovdqa64 1088(%rdi), %zmm11 |
| ; AVX512F-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm1, %zmm2, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm1 |
| ; AVX512F-NEXT: vpermt2q %zmm24, %zmm0, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 (%rsp), %zmm23 # 64-byte Reload |
| ; AVX512F-NEXT: vpermi2q %zmm21, %zmm23, %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 1024(%rdi), %zmm4 |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm14[1],ymm12[3],ymm14[3] |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} ymm2 = ymm8[1],ymm18[1],ymm8[3],ymm18[3] |
| ; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm1, %zmm0, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12] |
| ; AVX512F-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm27, %zmm1 |
| ; AVX512F-NEXT: vpermt2q %zmm26, %zmm0, %zmm1 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} zmm2 = zmm31[0],zmm19[0],zmm31[2],zmm19[2],zmm31[4],zmm19[4],zmm31[6],zmm19[6] |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm3 |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm12 |
| ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm11, %zmm0, %zmm3 |
| ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,12,4,12] |
| ; AVX512F-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512F-NEXT: vmovdqa64 %zmm10, %zmm4 |
| ; AVX512F-NEXT: vmovdqa64 %zmm10, %zmm14 |
| ; AVX512F-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm9, %zmm1, %zmm4 |
| ; AVX512F-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm3, %zmm2, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm30, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm15[0],zmm13[0],zmm15[2],zmm13[2],zmm15[4],zmm13[4],zmm15[6],zmm13[6] |
| ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm11 |
| ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm16 |
| ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 576(%rdi), %zmm4 |
| ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 512(%rdi), %zmm13 |
| ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm4, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vmovdqa64 704(%rdi), %zmm5 |
| ; AVX512F-NEXT: vmovdqa64 640(%rdi), %zmm4 |
| ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm5, %zmm1, %zmm4 |
| ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm20, %zmm0, %zmm29 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm6[0],zmm7[0],zmm6[2],zmm7[2],zmm6[4],zmm7[4],zmm6[6],zmm7[6] |
| ; AVX512F-NEXT: vmovdqa64 %zmm29, %zmm3 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm4 |
| ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm4, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm6 |
| ; AVX512F-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm4 |
| ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm6, %zmm1, %zmm4 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm24, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vmovdqa64 %zmm24, %zmm4 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm23[0],zmm21[0],zmm23[2],zmm21[2],zmm23[4],zmm21[4],zmm23[6],zmm21[6] |
| ; AVX512F-NEXT: vmovdqa64 %zmm21, %zmm6 |
| ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 1600(%rdi), %zmm7 |
| ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 1536(%rdi), %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermi2q %zmm7, %zmm2, %zmm0 |
| ; AVX512F-NEXT: vmovdqa64 1728(%rdi), %zmm7 |
| ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 1664(%rdi), %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermi2q %zmm7, %zmm2, %zmm1 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm0, %zmm3, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm18 = [0,8,0,8,0,8,0,8] |
| ; AVX512F-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm31, %zmm0 |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm18, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm23 = [1,9,1,9,1,9,1,9] |
| ; AVX512F-NEXT: # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm31, %zmm0 |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm23, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [6,14,6,14,6,14,6,14] |
| ; AVX512F-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm3, %zmm2 |
| ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm1 |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} zmm8 = zmm31[1],zmm19[1],zmm31[3],zmm19[3],zmm31[5],zmm19[5],zmm31[7],zmm19[7] |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [7,15,7,15,7,15,7,15] |
| ; AVX512F-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm10, %zmm31 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} zmm1 {%k1} = zmm27[0],zmm26[0],zmm27[2],zmm26[2],zmm27[4],zmm26[4],zmm27[6],zmm26[6] |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} zmm31 {%k1} = zmm27[1],zmm26[1],zmm27[3],zmm26[3],zmm27[5],zmm26[5],zmm27[7],zmm26[7] |
| ; AVX512F-NEXT: vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm27, %zmm7 |
| ; AVX512F-NEXT: vmovdqa64 %zmm27, %zmm1 |
| ; AVX512F-NEXT: vpermt2q %zmm26, %zmm18, %zmm27 |
| ; AVX512F-NEXT: vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm26, %zmm23, %zmm7 |
| ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [5,13,5,13,5,13,5,13] |
| ; AVX512F-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-NEXT: vpermt2q %zmm26, %zmm2, %zmm1 |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm8 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm2, %zmm12 |
| ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13] |
| ; AVX512F-NEXT: # ymm7 = mem[0,1,0,1] |
| ; AVX512F-NEXT: vmovdqa64 %zmm14, %zmm15 |
| ; AVX512F-NEXT: vpermt2q %zmm9, %zmm7, %zmm15 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm1 = ymm12[0,1,2,3],ymm15[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm1, %zmm8, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm27 |
| ; AVX512F-NEXT: vpermt2q %zmm16, %zmm18, %zmm27 |
| ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm22 |
| ; AVX512F-NEXT: vpermt2q %zmm16, %zmm23, %zmm22 |
| ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm21 |
| ; AVX512F-NEXT: vpermt2q %zmm16, %zmm3, %zmm21 |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} zmm14 = zmm11[1],zmm16[1],zmm11[3],zmm16[3],zmm11[5],zmm16[5],zmm11[7],zmm16[7] |
| ; AVX512F-NEXT: vpermt2q %zmm16, %zmm10, %zmm11 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} zmm21 {%k1} = zmm16[0],zmm30[0],zmm16[2],zmm30[2],zmm16[4],zmm30[4],zmm16[6],zmm30[6] |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} zmm11 {%k1} = zmm16[1],zmm30[1],zmm16[3],zmm30[3],zmm16[5],zmm30[5],zmm16[7],zmm30[7] |
| ; AVX512F-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm16, %zmm29 |
| ; AVX512F-NEXT: vmovdqa64 %zmm16, %zmm11 |
| ; AVX512F-NEXT: vpermt2q %zmm30, %zmm18, %zmm16 |
| ; AVX512F-NEXT: vpermt2q %zmm30, %zmm23, %zmm29 |
| ; AVX512F-NEXT: vpermt2q %zmm30, %zmm2, %zmm11 |
| ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm8 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm2, %zmm8 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512F-NEXT: vpermt2q %zmm5, %zmm7, %zmm15 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm15[4,5,6,7] |
| ; AVX512F-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm31 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm18, %zmm31 |
| ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm20 |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm23, %zmm20 |
| ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm26 |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm3, %zmm26 |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} zmm9 = zmm5[1],zmm1[1],zmm5[3],zmm1[3],zmm5[5],zmm1[5],zmm5[7],zmm1[7] |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm10, %zmm5 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} zmm26 {%k1} = zmm12[0],zmm28[0],zmm12[2],zmm28[2],zmm12[4],zmm28[4],zmm12[6],zmm28[6] |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm12[1],zmm28[1],zmm12[3],zmm28[3],zmm12[5],zmm28[5],zmm12[7],zmm28[7] |
| ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm24 |
| ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm5 |
| ; AVX512F-NEXT: vpermt2q %zmm28, %zmm18, %zmm12 |
| ; AVX512F-NEXT: vpermt2q %zmm28, %zmm23, %zmm24 |
| ; AVX512F-NEXT: vpermt2q %zmm28, %zmm2, %zmm5 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm30, %zmm25 |
| ; AVX512F-NEXT: vpermt2q %zmm4, %zmm18, %zmm25 |
| ; AVX512F-NEXT: vmovdqa64 %zmm6, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 (%rsp), %zmm6 # 64-byte Reload |
| ; AVX512F-NEXT: vpermi2q %zmm1, %zmm6, %zmm18 |
| ; AVX512F-NEXT: vmovdqa64 %zmm30, %zmm28 |
| ; AVX512F-NEXT: vpermt2q %zmm4, %zmm23, %zmm28 |
| ; AVX512F-NEXT: vpermi2q %zmm1, %zmm6, %zmm23 |
| ; AVX512F-NEXT: vmovdqa64 %zmm6, %zmm17 |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm3, %zmm17 |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} zmm8 = zmm6[1],zmm1[1],zmm6[3],zmm1[3],zmm6[5],zmm1[5],zmm6[7],zmm1[7] |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm10, %zmm6 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} zmm17 {%k1} = zmm30[0],zmm4[0],zmm30[2],zmm4[2],zmm30[4],zmm4[4],zmm30[6],zmm4[6] |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} zmm6 {%k1} = zmm30[1],zmm4[1],zmm30[3],zmm4[3],zmm30[5],zmm4[5],zmm30[7],zmm4[7] |
| ; AVX512F-NEXT: vmovdqu64 %zmm6, (%rsp) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm4, %zmm2, %zmm30 |
| ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm6 |
| ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm15 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm3, %zmm6 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm10, %zmm13 |
| ; AVX512F-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm2, %zmm3 |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm1 |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm15, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm0, %zmm10, %zmm4 |
| ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm4 |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm15, %zmm4 |
| ; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm15 |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm10, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm19, %zmm10, %zmm13 |
| ; AVX512F-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm14 {%k1} |
| ; AVX512F-NEXT: vinserti64x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14, %zmm0 # 32-byte Folded Reload |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm9 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm10, %zmm5 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512F-NEXT: vpermt2q %zmm13, %zmm7, %zmm5 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm5[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm1, %zmm9, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm30, %zmm8 {%k1} |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512F-NEXT: vpermi2q %zmm9, %zmm5, %zmm7 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm0, %zmm8, %zmm0 |
| ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [6,14,6,14] |
| ; AVX512F-NEXT: # ymm0 = mem[0,1,0,1] |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm0, %zmm3 |
| ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [7,15,7,15] |
| ; AVX512F-NEXT: # ymm7 = mem[0,1,0,1] |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm7, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm10, %zmm1 |
| ; AVX512F-NEXT: vmovdqa64 %zmm10, %zmm11 |
| ; AVX512F-NEXT: vpermt2q %zmm13, %zmm0, %zmm11 |
| ; AVX512F-NEXT: vpermt2q %zmm13, %zmm7, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm10 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm0, %zmm10 |
| ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm2 |
| ; AVX512F-NEXT: vpermi2q %zmm9, %zmm5, %zmm0 |
| ; AVX512F-NEXT: vpermt2q %zmm9, %zmm7, %zmm2 |
| ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpermt2q %zmm1, %zmm7, %zmm8 |
| ; AVX512F-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm4, %zmm1, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm3, %zmm21, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm31 {%k1} |
| ; AVX512F-NEXT: vmovdqa 192(%rdi), %xmm14 |
| ; AVX512F-NEXT: vmovdqa 128(%rdi), %xmm13 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm13[0],xmm14[0] |
| ; AVX512F-NEXT: vmovdqa (%rdi), %xmm10 |
| ; AVX512F-NEXT: vmovdqa 64(%rdi), %xmm7 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm21 = xmm10[0],xmm7[0] |
| ; AVX512F-NEXT: vinserti32x4 $1, %xmm8, %ymm21, %ymm8 |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm8, %zmm31, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqa64 %zmm16, %zmm27 {%k1} |
| ; AVX512F-NEXT: vmovdqa 704(%rdi), %xmm12 |
| ; AVX512F-NEXT: vmovdqa64 640(%rdi), %xmm21 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm30 = xmm21[0],xmm12[0] |
| ; AVX512F-NEXT: vmovdqa64 576(%rdi), %xmm31 |
| ; AVX512F-NEXT: vmovdqa 512(%rdi), %xmm5 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm5[0],xmm31[0] |
| ; AVX512F-NEXT: vinserti32x4 $1, %xmm30, %ymm9, %ymm9 |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm9, %zmm27, %zmm1 |
| ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm3 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 1216(%rdi), %xmm19 |
| ; AVX512F-NEXT: vmovdqa64 1152(%rdi), %xmm27 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm30 = xmm27[0],xmm19[0] |
| ; AVX512F-NEXT: vmovdqa 1088(%rdi), %xmm2 |
| ; AVX512F-NEXT: vmovdqa 1024(%rdi), %xmm1 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm1[0],xmm2[0] |
| ; AVX512F-NEXT: vinserti32x4 $1, %xmm30, %ymm4, %ymm4 |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm4, %zmm3, %zmm8 |
| ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm18 {%k1} |
| ; AVX512F-NEXT: vmovdqa64 1728(%rdi), %xmm25 |
| ; AVX512F-NEXT: vmovdqa64 1664(%rdi), %xmm30 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm6 = xmm30[0],xmm25[0] |
| ; AVX512F-NEXT: vmovdqa 1600(%rdi), %xmm4 |
| ; AVX512F-NEXT: vmovdqa 1536(%rdi), %xmm3 |
| ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm3[0],xmm4[0] |
| ; AVX512F-NEXT: vinserti128 $1, %xmm6, %ymm9, %ymm6 |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm6, %zmm18, %zmm6 |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm16 {%k1} |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} xmm9 = xmm27[1],xmm19[1] |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm2[1] |
| ; AVX512F-NEXT: vinserti128 $1, %xmm9, %ymm1, %ymm1 |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm1, %zmm16, %zmm1 |
| ; AVX512F-NEXT: vmovdqa64 %zmm29, %zmm22 {%k1} |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm21[1],xmm12[1] |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm31[1] |
| ; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm2 |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm2, %zmm22, %zmm2 |
| ; AVX512F-NEXT: vmovdqa64 %zmm24, %zmm20 {%k1} |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm13[1],xmm14[1] |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} xmm7 = xmm10[1],xmm7[1] |
| ; AVX512F-NEXT: vinserti128 $1, %xmm5, %ymm7, %ymm5 |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm5, %zmm20, %zmm5 |
| ; AVX512F-NEXT: vmovdqa64 %zmm28, %zmm23 {%k1} |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} xmm7 = xmm30[1],xmm25[1] |
| ; AVX512F-NEXT: vpunpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm4[1] |
| ; AVX512F-NEXT: vinserti128 $1, %xmm7, %ymm3, %ymm3 |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm3, %zmm23, %zmm3 |
| ; AVX512F-NEXT: vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm4 # 32-byte Folded Reload |
| ; AVX512F-NEXT: # ymm4 = mem[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm4, %zmm26, %zmm4 |
| ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX512F-NEXT: vinserti64x4 $0, %ymm0, %zmm17, %zmm0 |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512F-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload |
| ; AVX512F-NEXT: # ymm7 = mem[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512F-NEXT: vinsertf64x4 $0, %ymm7, %zmm9, %zmm7 |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512F-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload |
| ; AVX512F-NEXT: # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512F-NEXT: vinsertf64x4 $0, %ymm9, %zmm10, %zmm9 |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512F-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload |
| ; AVX512F-NEXT: # ymm10 = mem[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX512F-NEXT: vmovups (%rsp), %zmm11 # 64-byte Reload |
| ; AVX512F-NEXT: vinsertf64x4 $0, %ymm10, %zmm11, %zmm10 |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512F-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload |
| ; AVX512F-NEXT: # ymm11 = mem[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512F-NEXT: vinsertf64x4 $0, %ymm11, %zmm12, %zmm11 |
| ; AVX512F-NEXT: vmovdqa64 %zmm6, 192(%rsi) |
| ; AVX512F-NEXT: vmovdqa64 %zmm8, 128(%rsi) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm6, 64(%rsi) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm6, (%rsi) |
| ; AVX512F-NEXT: vmovdqa64 %zmm3, 192(%rdx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm5, (%rdx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm2, 64(%rdx) |
| ; AVX512F-NEXT: vmovdqa64 %zmm1, 128(%rdx) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, 192(%rcx) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, (%rcx) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, 64(%rcx) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, 128(%rcx) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, 192(%r8) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, (%r8) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, 64(%r8) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, 128(%r8) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, 192(%r9) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, (%r9) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, 64(%r9) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, 128(%r9) |
| ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, 192(%rax) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, (%rax) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, 64(%rax) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm1, 128(%rax) |
| ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512F-NEXT: vmovdqa64 %zmm0, 192(%rax) |
| ; AVX512F-NEXT: vmovdqa64 %zmm4, (%rax) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512F-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512F-NEXT: vmovaps %zmm11, 128(%rax) |
| ; AVX512F-NEXT: vmovaps %zmm10, 192(%rax) |
| ; AVX512F-NEXT: vmovaps %zmm9, (%rax) |
| ; AVX512F-NEXT: vmovaps %zmm7, 64(%rax) |
| ; AVX512F-NEXT: addq $2568, %rsp # imm = 0xA08 |
| ; AVX512F-NEXT: vzeroupper |
| ; AVX512F-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: load_i64_stride8_vf32: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: subq $2568, %rsp # imm = 0xA08 |
| ; AVX512BW-NEXT: vmovdqa64 1856(%rdi), %zmm21 |
| ; AVX512BW-NEXT: vmovdqa64 1984(%rdi), %zmm24 |
| ; AVX512BW-NEXT: vmovdqa64 1344(%rdi), %zmm19 |
| ; AVX512BW-NEXT: vmovdqa64 1280(%rdi), %zmm31 |
| ; AVX512BW-NEXT: vmovdqa64 1472(%rdi), %zmm26 |
| ; AVX512BW-NEXT: vmovdqa64 1408(%rdi), %zmm27 |
| ; AVX512BW-NEXT: vmovdqa64 832(%rdi), %zmm22 |
| ; AVX512BW-NEXT: vmovdqa64 768(%rdi), %zmm9 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 960(%rdi), %zmm30 |
| ; AVX512BW-NEXT: vmovdqa64 896(%rdi), %zmm10 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm12 |
| ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm6 |
| ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm20 |
| ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm29 |
| ; AVX512BW-NEXT: movb $-64, %al |
| ; AVX512BW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,2,10,2,10,2,10] |
| ; AVX512BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm27, %zmm1 |
| ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm0, %zmm1 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 1216(%rdi), %ymm28 |
| ; AVX512BW-NEXT: vmovdqa 1152(%rdi), %ymm1 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} ymm4 = ymm1[0],ymm28[0],ymm1[2],ymm28[2] |
| ; AVX512BW-NEXT: vmovdqa64 1088(%rdi), %ymm25 |
| ; AVX512BW-NEXT: vmovdqa 1024(%rdi), %ymm5 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} ymm7 = ymm5[0],ymm25[0],ymm5[2],ymm25[2] |
| ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm7[2,3],ymm4[2,3] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm4, %zmm2, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm30, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm4 |
| ; AVX512BW-NEXT: vpermt2q %zmm22, %zmm0, %zmm4 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm4 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 704(%rdi), %ymm23 |
| ; AVX512BW-NEXT: vmovdqa64 640(%rdi), %ymm17 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} ymm8 = ymm17[0],ymm23[0],ymm17[2],ymm23[2] |
| ; AVX512BW-NEXT: vmovdqa 576(%rdi), %ymm2 |
| ; AVX512BW-NEXT: vmovdqa 512(%rdi), %ymm10 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} ymm11 = ymm10[0],ymm2[0],ymm10[2],ymm2[2] |
| ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm8 = ymm11[2,3],ymm8[2,3] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm8, %zmm4, %zmm4 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm29, %zmm4 |
| ; AVX512BW-NEXT: vpermt2q %zmm20, %zmm0, %zmm4 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm8 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm7 |
| ; AVX512BW-NEXT: vpermt2q %zmm12, %zmm0, %zmm8 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm8 {%k1} |
| ; AVX512BW-NEXT: vmovdqa 192(%rdi), %ymm9 |
| ; AVX512BW-NEXT: vmovdqa 128(%rdi), %ymm11 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} ymm12 = ymm11[0],ymm9[0],ymm11[2],ymm9[2] |
| ; AVX512BW-NEXT: vmovdqa 64(%rdi), %ymm4 |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %ymm16 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm16[0],ymm4[0],ymm16[2],ymm4[2] |
| ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm12 = ymm15[2,3],ymm12[2,3] |
| ; AVX512BW-NEXT: vmovdqa64 1920(%rdi), %zmm13 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm12, %zmm8, %zmm8 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm8 |
| ; AVX512BW-NEXT: vpermt2q %zmm24, %zmm0, %zmm8 |
| ; AVX512BW-NEXT: vmovdqa64 1792(%rdi), %zmm12 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm12, (%rsp) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermi2q %zmm21, %zmm12, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm0 {%k1} |
| ; AVX512BW-NEXT: vmovdqa 1728(%rdi), %ymm14 |
| ; AVX512BW-NEXT: vmovdqa 1664(%rdi), %ymm12 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm12[0],ymm14[0],ymm12[2],ymm14[2] |
| ; AVX512BW-NEXT: vmovdqa64 1600(%rdi), %ymm18 |
| ; AVX512BW-NEXT: vmovdqa 1536(%rdi), %ymm8 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} ymm13 = ymm8[0],ymm18[0],ymm8[2],ymm18[2] |
| ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm15[2,3] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm13, %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [3,11,3,11,3,11,3,11] |
| ; AVX512BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm27, %zmm13 |
| ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm0, %zmm13 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm31, %zmm15 |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm0, %zmm15 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm15 {%k1} |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm1[1],ymm28[1],ymm1[3],ymm28[3] |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} ymm3 = ymm5[1],ymm25[1],ymm5[3],ymm25[3] |
| ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],ymm1[2,3] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm1, %zmm15, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm1 |
| ; AVX512BW-NEXT: vpermt2q %zmm30, %zmm0, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm3 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm13 |
| ; AVX512BW-NEXT: vpermt2q %zmm22, %zmm0, %zmm3 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm3 {%k1} |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm17[1],ymm23[1],ymm17[3],ymm23[3] |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} ymm2 = ymm10[1],ymm2[1],ymm10[3],ymm2[3] |
| ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm1, %zmm3, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm29, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm20, %zmm0, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1} |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm11[1],ymm9[1],ymm11[3],ymm9[3] |
| ; AVX512BW-NEXT: vmovdqa64 1216(%rdi), %zmm9 |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} ymm3 = ymm16[1],ymm4[1],ymm16[3],ymm4[3] |
| ; AVX512BW-NEXT: vmovdqa64 1152(%rdi), %zmm10 |
| ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],ymm1[2,3] |
| ; AVX512BW-NEXT: vmovdqa64 1088(%rdi), %zmm11 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm1, %zmm2, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm1 |
| ; AVX512BW-NEXT: vpermt2q %zmm24, %zmm0, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 (%rsp), %zmm23 # 64-byte Reload |
| ; AVX512BW-NEXT: vpermi2q %zmm21, %zmm23, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 1024(%rdi), %zmm4 |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm14[1],ymm12[3],ymm14[3] |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} ymm2 = ymm8[1],ymm18[1],ymm8[3],ymm18[3] |
| ; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm1, %zmm0, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12] |
| ; AVX512BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm27, %zmm1 |
| ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm0, %zmm1 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} zmm2 = zmm31[0],zmm19[0],zmm31[2],zmm19[2],zmm31[4],zmm19[4],zmm31[6],zmm19[6] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm3 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm12 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm11, %zmm0, %zmm3 |
| ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,12,4,12] |
| ; AVX512BW-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm4 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm14 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm1, %zmm4 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm3, %zmm2, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm30, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm15[0],zmm13[0],zmm15[2],zmm13[2],zmm15[4],zmm13[4],zmm15[6],zmm13[6] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm11 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm16 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %zmm4 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %zmm13 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vmovdqa64 704(%rdi), %zmm5 |
| ; AVX512BW-NEXT: vmovdqa64 640(%rdi), %zmm4 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm1, %zmm4 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm20, %zmm0, %zmm29 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm6[0],zmm7[0],zmm6[2],zmm7[2],zmm6[4],zmm7[4],zmm6[6],zmm7[6] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm29, %zmm3 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm4 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm6 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm4 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm1, %zmm4 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm24, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm24, %zmm4 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm23[0],zmm21[0],zmm23[2],zmm21[2],zmm23[4],zmm21[4],zmm23[6],zmm21[6] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm21, %zmm6 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 1600(%rdi), %zmm7 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 1536(%rdi), %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermi2q %zmm7, %zmm2, %zmm0 |
| ; AVX512BW-NEXT: vmovdqa64 1728(%rdi), %zmm7 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 1664(%rdi), %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermi2q %zmm7, %zmm2, %zmm1 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm0, %zmm3, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm18 = [0,8,0,8,0,8,0,8] |
| ; AVX512BW-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm31, %zmm0 |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm18, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm23 = [1,9,1,9,1,9,1,9] |
| ; AVX512BW-NEXT: # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm31, %zmm0 |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm23, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [6,14,6,14,6,14,6,14] |
| ; AVX512BW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm3, %zmm2 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm1 |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} zmm8 = zmm31[1],zmm19[1],zmm31[3],zmm19[3],zmm31[5],zmm19[5],zmm31[7],zmm19[7] |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm10 = [7,15,7,15,7,15,7,15] |
| ; AVX512BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm10, %zmm31 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} zmm1 {%k1} = zmm27[0],zmm26[0],zmm27[2],zmm26[2],zmm27[4],zmm26[4],zmm27[6],zmm26[6] |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} zmm31 {%k1} = zmm27[1],zmm26[1],zmm27[3],zmm26[3],zmm27[5],zmm26[5],zmm27[7],zmm26[7] |
| ; AVX512BW-NEXT: vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm27, %zmm7 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm27, %zmm1 |
| ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm18, %zmm27 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm23, %zmm7 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [5,13,5,13,5,13,5,13] |
| ; AVX512BW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm2, %zmm1 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm8 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm2, %zmm12 |
| ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13] |
| ; AVX512BW-NEXT: # ymm7 = mem[0,1,0,1] |
| ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm15 |
| ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm7, %zmm15 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm1 = ymm12[0,1,2,3],ymm15[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm1, %zmm8, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm27 |
| ; AVX512BW-NEXT: vpermt2q %zmm16, %zmm18, %zmm27 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm22 |
| ; AVX512BW-NEXT: vpermt2q %zmm16, %zmm23, %zmm22 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm21 |
| ; AVX512BW-NEXT: vpermt2q %zmm16, %zmm3, %zmm21 |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} zmm14 = zmm11[1],zmm16[1],zmm11[3],zmm16[3],zmm11[5],zmm16[5],zmm11[7],zmm16[7] |
| ; AVX512BW-NEXT: vpermt2q %zmm16, %zmm10, %zmm11 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} zmm21 {%k1} = zmm16[0],zmm30[0],zmm16[2],zmm30[2],zmm16[4],zmm30[4],zmm16[6],zmm30[6] |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} zmm11 {%k1} = zmm16[1],zmm30[1],zmm16[3],zmm30[3],zmm16[5],zmm30[5],zmm16[7],zmm30[7] |
| ; AVX512BW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm16, %zmm29 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm16, %zmm11 |
| ; AVX512BW-NEXT: vpermt2q %zmm30, %zmm18, %zmm16 |
| ; AVX512BW-NEXT: vpermt2q %zmm30, %zmm23, %zmm29 |
| ; AVX512BW-NEXT: vpermt2q %zmm30, %zmm2, %zmm11 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm8 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm2, %zmm8 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm7, %zmm15 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm15[4,5,6,7] |
| ; AVX512BW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm31 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm18, %zmm31 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm20 |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm23, %zmm20 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm26 |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm3, %zmm26 |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} zmm9 = zmm5[1],zmm1[1],zmm5[3],zmm1[3],zmm5[5],zmm1[5],zmm5[7],zmm1[7] |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm10, %zmm5 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} zmm26 {%k1} = zmm12[0],zmm28[0],zmm12[2],zmm28[2],zmm12[4],zmm28[4],zmm12[6],zmm28[6] |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm12[1],zmm28[1],zmm12[3],zmm28[3],zmm12[5],zmm28[5],zmm12[7],zmm28[7] |
| ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm24 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm5 |
| ; AVX512BW-NEXT: vpermt2q %zmm28, %zmm18, %zmm12 |
| ; AVX512BW-NEXT: vpermt2q %zmm28, %zmm23, %zmm24 |
| ; AVX512BW-NEXT: vpermt2q %zmm28, %zmm2, %zmm5 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm30, %zmm25 |
| ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm18, %zmm25 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 (%rsp), %zmm6 # 64-byte Reload |
| ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm6, %zmm18 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm30, %zmm28 |
| ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm23, %zmm28 |
| ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm6, %zmm23 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm17 |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm3, %zmm17 |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} zmm8 = zmm6[1],zmm1[1],zmm6[3],zmm1[3],zmm6[5],zmm1[5],zmm6[7],zmm1[7] |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm10, %zmm6 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} zmm17 {%k1} = zmm30[0],zmm4[0],zmm30[2],zmm4[2],zmm30[4],zmm4[4],zmm30[6],zmm4[6] |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} zmm6 {%k1} = zmm30[1],zmm4[1],zmm30[3],zmm4[3],zmm30[5],zmm4[5],zmm30[7],zmm4[7] |
| ; AVX512BW-NEXT: vmovdqu64 %zmm6, (%rsp) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm2, %zmm30 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm6 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm15 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm3, %zmm6 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm10, %zmm13 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm2, %zmm3 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm1 |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm15, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm10, %zmm4 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm4 |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm15, %zmm4 |
| ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm15 |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm10, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm10, %zmm13 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm14 {%k1} |
| ; AVX512BW-NEXT: vinserti64x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14, %zmm0 # 32-byte Folded Reload |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm9 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm5 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512BW-NEXT: vpermt2q %zmm13, %zmm7, %zmm5 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm5[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm1, %zmm9, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm30, %zmm8 {%k1} |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512BW-NEXT: vpermi2q %zmm9, %zmm5, %zmm7 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm0, %zmm8, %zmm0 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [6,14,6,14] |
| ; AVX512BW-NEXT: # ymm0 = mem[0,1,0,1] |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm3 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm0, %zmm3 |
| ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [7,15,7,15] |
| ; AVX512BW-NEXT: # ymm7 = mem[0,1,0,1] |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm7, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm1 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm11 |
| ; AVX512BW-NEXT: vpermt2q %zmm13, %zmm0, %zmm11 |
| ; AVX512BW-NEXT: vpermt2q %zmm13, %zmm7, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm10 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm0, %zmm10 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm2 |
| ; AVX512BW-NEXT: vpermi2q %zmm9, %zmm5, %zmm0 |
| ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm7, %zmm2 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm7, %zmm8 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm4, %zmm1, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm3, %zmm21, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm31 {%k1} |
| ; AVX512BW-NEXT: vmovdqa 192(%rdi), %xmm14 |
| ; AVX512BW-NEXT: vmovdqa 128(%rdi), %xmm13 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm13[0],xmm14[0] |
| ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm10 |
| ; AVX512BW-NEXT: vmovdqa 64(%rdi), %xmm7 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm21 = xmm10[0],xmm7[0] |
| ; AVX512BW-NEXT: vinserti32x4 $1, %xmm8, %ymm21, %ymm8 |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm8, %zmm31, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqa64 %zmm16, %zmm27 {%k1} |
| ; AVX512BW-NEXT: vmovdqa 704(%rdi), %xmm12 |
| ; AVX512BW-NEXT: vmovdqa64 640(%rdi), %xmm21 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm30 = xmm21[0],xmm12[0] |
| ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %xmm31 |
| ; AVX512BW-NEXT: vmovdqa 512(%rdi), %xmm5 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm5[0],xmm31[0] |
| ; AVX512BW-NEXT: vinserti32x4 $1, %xmm30, %ymm9, %ymm9 |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm9, %zmm27, %zmm1 |
| ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm3 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 1216(%rdi), %xmm19 |
| ; AVX512BW-NEXT: vmovdqa64 1152(%rdi), %xmm27 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm30 = xmm27[0],xmm19[0] |
| ; AVX512BW-NEXT: vmovdqa 1088(%rdi), %xmm2 |
| ; AVX512BW-NEXT: vmovdqa 1024(%rdi), %xmm1 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm1[0],xmm2[0] |
| ; AVX512BW-NEXT: vinserti32x4 $1, %xmm30, %ymm4, %ymm4 |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm4, %zmm3, %zmm8 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm18 {%k1} |
| ; AVX512BW-NEXT: vmovdqa64 1728(%rdi), %xmm25 |
| ; AVX512BW-NEXT: vmovdqa64 1664(%rdi), %xmm30 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm6 = xmm30[0],xmm25[0] |
| ; AVX512BW-NEXT: vmovdqa 1600(%rdi), %xmm4 |
| ; AVX512BW-NEXT: vmovdqa 1536(%rdi), %xmm3 |
| ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm3[0],xmm4[0] |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm6, %ymm9, %ymm6 |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm6, %zmm18, %zmm6 |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm16 {%k1} |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} xmm9 = xmm27[1],xmm19[1] |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm2[1] |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm9, %ymm1, %ymm1 |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm1, %zmm16, %zmm1 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm29, %zmm22 {%k1} |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm21[1],xmm12[1] |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm31[1] |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm2 |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm2, %zmm22, %zmm2 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm24, %zmm20 {%k1} |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm13[1],xmm14[1] |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} xmm7 = xmm10[1],xmm7[1] |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm5, %ymm7, %ymm5 |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm5, %zmm20, %zmm5 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm28, %zmm23 {%k1} |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} xmm7 = xmm30[1],xmm25[1] |
| ; AVX512BW-NEXT: vpunpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm4[1] |
| ; AVX512BW-NEXT: vinserti128 $1, %xmm7, %ymm3, %ymm3 |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm3, %zmm23, %zmm3 |
| ; AVX512BW-NEXT: vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm4 # 32-byte Folded Reload |
| ; AVX512BW-NEXT: # ymm4 = mem[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm4, %zmm26, %zmm4 |
| ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX512BW-NEXT: vinserti64x4 $0, %ymm0, %zmm17, %zmm0 |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512BW-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload |
| ; AVX512BW-NEXT: # ymm7 = mem[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512BW-NEXT: vinsertf64x4 $0, %ymm7, %zmm9, %zmm7 |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512BW-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload |
| ; AVX512BW-NEXT: # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512BW-NEXT: vinsertf64x4 $0, %ymm9, %zmm10, %zmm9 |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512BW-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload |
| ; AVX512BW-NEXT: # ymm10 = mem[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX512BW-NEXT: vmovups (%rsp), %zmm11 # 64-byte Reload |
| ; AVX512BW-NEXT: vinsertf64x4 $0, %ymm10, %zmm11, %zmm10 |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512BW-NEXT: vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload |
| ; AVX512BW-NEXT: # ymm11 = mem[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512BW-NEXT: vinsertf64x4 $0, %ymm11, %zmm12, %zmm11 |
| ; AVX512BW-NEXT: vmovdqa64 %zmm6, 192(%rsi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm8, 128(%rsi) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm6, 64(%rsi) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm6, (%rsi) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm3, 192(%rdx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm5, (%rdx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm2, 64(%rdx) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm1, 128(%rdx) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, 192(%rcx) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, (%rcx) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, 64(%rcx) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, 128(%rcx) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, 192(%r8) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, (%r8) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, 64(%r8) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, 128(%r8) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, 192(%r9) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, (%r9) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, 64(%r9) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, 128(%r9) |
| ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, 192(%rax) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, (%rax) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, 64(%rax) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm1, 128(%rax) |
| ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512BW-NEXT: vmovdqa64 %zmm0, 192(%rax) |
| ; AVX512BW-NEXT: vmovdqa64 %zmm4, (%rax) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512BW-NEXT: vmovaps %zmm11, 128(%rax) |
| ; AVX512BW-NEXT: vmovaps %zmm10, 192(%rax) |
| ; AVX512BW-NEXT: vmovaps %zmm9, (%rax) |
| ; AVX512BW-NEXT: vmovaps %zmm7, 64(%rax) |
| ; AVX512BW-NEXT: addq $2568, %rsp # imm = 0xA08 |
| ; AVX512BW-NEXT: vzeroupper |
| ; AVX512BW-NEXT: retq |
| %wide.vec = load <256 x i64>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <256 x i64> %wide.vec, <256 x i64> poison, <32 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 40, i32 48, i32 56, i32 64, i32 72, i32 80, i32 88, i32 96, i32 104, i32 112, i32 120, i32 128, i32 136, i32 144, i32 152, i32 160, i32 168, i32 176, i32 184, i32 192, i32 200, i32 208, i32 216, i32 224, i32 232, i32 240, i32 248> |
| %strided.vec1 = shufflevector <256 x i64> %wide.vec, <256 x i64> poison, <32 x i32> <i32 1, i32 9, i32 17, i32 25, i32 33, i32 41, i32 49, i32 57, i32 65, i32 73, i32 81, i32 89, i32 97, i32 105, i32 113, i32 121, i32 129, i32 137, i32 145, i32 153, i32 161, i32 169, i32 177, i32 185, i32 193, i32 201, i32 209, i32 217, i32 225, i32 233, i32 241, i32 249> |
| %strided.vec2 = shufflevector <256 x i64> %wide.vec, <256 x i64> poison, <32 x i32> <i32 2, i32 10, i32 18, i32 26, i32 34, i32 42, i32 50, i32 58, i32 66, i32 74, i32 82, i32 90, i32 98, i32 106, i32 114, i32 122, i32 130, i32 138, i32 146, i32 154, i32 162, i32 170, i32 178, i32 186, i32 194, i32 202, i32 210, i32 218, i32 226, i32 234, i32 242, i32 250> |
| %strided.vec3 = shufflevector <256 x i64> %wide.vec, <256 x i64> poison, <32 x i32> <i32 3, i32 11, i32 19, i32 27, i32 35, i32 43, i32 51, i32 59, i32 67, i32 75, i32 83, i32 91, i32 99, i32 107, i32 115, i32 123, i32 131, i32 139, i32 147, i32 155, i32 163, i32 171, i32 179, i32 187, i32 195, i32 203, i32 211, i32 219, i32 227, i32 235, i32 243, i32 251> |
| %strided.vec4 = shufflevector <256 x i64> %wide.vec, <256 x i64> poison, <32 x i32> <i32 4, i32 12, i32 20, i32 28, i32 36, i32 44, i32 52, i32 60, i32 68, i32 76, i32 84, i32 92, i32 100, i32 108, i32 116, i32 124, i32 132, i32 140, i32 148, i32 156, i32 164, i32 172, i32 180, i32 188, i32 196, i32 204, i32 212, i32 220, i32 228, i32 236, i32 244, i32 252> |
| %strided.vec5 = shufflevector <256 x i64> %wide.vec, <256 x i64> poison, <32 x i32> <i32 5, i32 13, i32 21, i32 29, i32 37, i32 45, i32 53, i32 61, i32 69, i32 77, i32 85, i32 93, i32 101, i32 109, i32 117, i32 125, i32 133, i32 141, i32 149, i32 157, i32 165, i32 173, i32 181, i32 189, i32 197, i32 205, i32 213, i32 221, i32 229, i32 237, i32 245, i32 253> |
| %strided.vec6 = shufflevector <256 x i64> %wide.vec, <256 x i64> poison, <32 x i32> <i32 6, i32 14, i32 22, i32 30, i32 38, i32 46, i32 54, i32 62, i32 70, i32 78, i32 86, i32 94, i32 102, i32 110, i32 118, i32 126, i32 134, i32 142, i32 150, i32 158, i32 166, i32 174, i32 182, i32 190, i32 198, i32 206, i32 214, i32 222, i32 230, i32 238, i32 246, i32 254> |
| %strided.vec7 = shufflevector <256 x i64> %wide.vec, <256 x i64> poison, <32 x i32> <i32 7, i32 15, i32 23, i32 31, i32 39, i32 47, i32 55, i32 63, i32 71, i32 79, i32 87, i32 95, i32 103, i32 111, i32 119, i32 127, i32 135, i32 143, i32 151, i32 159, i32 167, i32 175, i32 183, i32 191, i32 199, i32 207, i32 215, i32 223, i32 231, i32 239, i32 247, i32 255> |
| store <32 x i64> %strided.vec0, ptr %out.vec0, align 64 |
| store <32 x i64> %strided.vec1, ptr %out.vec1, align 64 |
| store <32 x i64> %strided.vec2, ptr %out.vec2, align 64 |
| store <32 x i64> %strided.vec3, ptr %out.vec3, align 64 |
| store <32 x i64> %strided.vec4, ptr %out.vec4, align 64 |
| store <32 x i64> %strided.vec5, ptr %out.vec5, align 64 |
| store <32 x i64> %strided.vec6, ptr %out.vec6, align 64 |
| store <32 x i64> %strided.vec7, ptr %out.vec7, align 64 |
| ret void |
| } |
| |
| define void @load_i64_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5, ptr %out.vec6, ptr %out.vec7) nounwind { |
| ; SSE-LABEL: load_i64_stride8_vf64: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: subq $3720, %rsp # imm = 0xE88 |
| ; SSE-NEXT: movaps 960(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 832(%rdi), %xmm1 |
| ; SSE-NEXT: movaps 768(%rdi), %xmm8 |
| ; SSE-NEXT: movaps 704(%rdi), %xmm2 |
| ; SSE-NEXT: movaps 640(%rdi), %xmm9 |
| ; SSE-NEXT: movaps 576(%rdi), %xmm3 |
| ; SSE-NEXT: movaps 512(%rdi), %xmm10 |
| ; SSE-NEXT: movaps 448(%rdi), %xmm4 |
| ; SSE-NEXT: movaps 384(%rdi), %xmm11 |
| ; SSE-NEXT: movaps 320(%rdi), %xmm5 |
| ; SSE-NEXT: movaps 256(%rdi), %xmm12 |
| ; SSE-NEXT: movaps 192(%rdi), %xmm6 |
| ; SSE-NEXT: movaps 128(%rdi), %xmm13 |
| ; SSE-NEXT: movaps 64(%rdi), %xmm7 |
| ; SSE-NEXT: movaps (%rdi), %xmm14 |
| ; SSE-NEXT: movaps %xmm14, %xmm15 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm7[0] |
| ; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm7[1] |
| ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm13, %xmm7 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm6[0] |
| ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm6[1] |
| ; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm12, %xmm6 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm5[0] |
| ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm5[1] |
| ; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm11, %xmm5 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm4[0] |
| ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm4[1] |
| ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm10, %xmm4 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm3[0] |
| ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm3[1] |
| ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm9, %xmm3 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm2[0] |
| ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm2[1] |
| ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps %xmm8, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm1[1] |
| ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 896(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1088(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1024(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1216(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1152(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1344(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1280(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1472(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1408(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1600(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1536(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1728(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1664(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1856(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1792(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1984(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1920(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2112(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2048(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2240(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2176(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2368(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2304(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2496(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2432(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2624(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2560(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2752(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2688(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2880(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2816(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3008(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2944(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3136(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3072(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3264(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3200(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3392(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3328(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3520(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3456(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3648(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3584(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3776(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3712(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3904(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3840(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 4032(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3968(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm7 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 80(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 16(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 208(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 144(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 336(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 272(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 464(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 400(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 592(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 528(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 720(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 656(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 848(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 784(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 976(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 912(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1104(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1040(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1232(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1168(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1360(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1296(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1488(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1424(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1616(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1552(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1744(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1680(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1872(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1808(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2000(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1936(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2128(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2064(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2256(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2192(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2384(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2320(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2512(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2448(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2640(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2576(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2768(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2704(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2896(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2832(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3024(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2960(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3152(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3088(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3280(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3216(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3408(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3344(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3536(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3472(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3664(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3600(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3792(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3728(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3920(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3856(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 4048(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3984(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 96(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 32(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 224(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 160(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 352(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 288(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 480(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 416(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 608(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 544(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 736(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 672(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 864(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 800(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 992(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 928(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1120(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1056(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1248(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1184(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1376(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1312(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1504(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1440(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1632(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1568(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1760(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1696(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1888(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1824(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2016(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1952(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2144(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2080(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2272(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2208(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2400(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2336(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2528(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2464(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2656(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2592(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2784(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2720(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2912(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2848(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3040(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2976(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3168(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3104(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3296(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3232(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3424(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3360(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3552(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3488(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3680(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3616(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3808(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3744(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 3936(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3872(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 4064(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 4000(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 112(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 48(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 240(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 176(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 368(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 304(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 496(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 432(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 624(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 560(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 752(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 688(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 880(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 816(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1008(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 944(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1136(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1072(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1264(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1200(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1392(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1328(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1520(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1456(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1648(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1584(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1776(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1712(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 1904(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1840(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2032(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 1968(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2160(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2096(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2288(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2224(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2416(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2352(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, (%rsp) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2544(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2480(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2672(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2608(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2800(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2736(%rdi), %xmm1 |
| ; SSE-NEXT: movaps %xmm1, %xmm2 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: movaps 2928(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2864(%rdi), %xmm15 |
| ; SSE-NEXT: movaps %xmm15, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm15 = xmm15[1],xmm0[1] |
| ; SSE-NEXT: movaps 3056(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 2992(%rdi), %xmm13 |
| ; SSE-NEXT: movaps %xmm13, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm0[1] |
| ; SSE-NEXT: movaps 3184(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3120(%rdi), %xmm9 |
| ; SSE-NEXT: movaps %xmm9, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm0[1] |
| ; SSE-NEXT: movaps 3312(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3248(%rdi), %xmm8 |
| ; SSE-NEXT: movaps %xmm8, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm0[1] |
| ; SSE-NEXT: movaps 3440(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3376(%rdi), %xmm12 |
| ; SSE-NEXT: movaps %xmm12, %xmm1 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm0[1] |
| ; SSE-NEXT: movaps 3568(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3504(%rdi), %xmm10 |
| ; SSE-NEXT: movaps %xmm10, %xmm14 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm14 = xmm14[0],xmm0[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm0[1] |
| ; SSE-NEXT: movaps 4016(%rdi), %xmm2 |
| ; SSE-NEXT: movaps 3952(%rdi), %xmm4 |
| ; SSE-NEXT: movaps 3696(%rdi), %xmm0 |
| ; SSE-NEXT: movaps 3632(%rdi), %xmm3 |
| ; SSE-NEXT: movaps %xmm3, %xmm11 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm11 = xmm11[0],xmm0[0] |
| ; SSE-NEXT: movaps 4080(%rdi), %xmm1 |
| ; SSE-NEXT: movaps 3888(%rdi), %xmm5 |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1] |
| ; SSE-NEXT: movaps 3824(%rdi), %xmm6 |
| ; SSE-NEXT: movaps 3760(%rdi), %xmm0 |
| ; SSE-NEXT: movaps %xmm7, 496(%rsi) |
| ; SSE-NEXT: movaps %xmm0, %xmm7 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm6[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm6[1] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm6, 480(%rsi) |
| ; SSE-NEXT: movaps %xmm5, %xmm6 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm4[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm4[1] |
| ; SSE-NEXT: movaps %xmm2, %xmm4 |
| ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm1[0] |
| ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1] |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 464(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 448(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 432(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 416(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 400(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 384(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 368(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 352(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 336(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 320(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 304(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 288(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 272(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 256(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 240(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 224(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 208(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 192(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 176(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 160(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 144(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 128(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 112(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 96(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 80(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 64(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 48(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 32(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 16(%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, (%rsi) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 496(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 480(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 464(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 448(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 432(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 416(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 400(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 384(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 368(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 352(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 336(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 320(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 304(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 288(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 272(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 256(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 240(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 224(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 208(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 192(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 176(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 160(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 144(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 128(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 112(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 96(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 80(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 64(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 48(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 32(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 16(%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, (%rdx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 496(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 480(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 464(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 448(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 432(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 416(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 400(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 384(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 368(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 352(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 336(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 320(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 304(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 288(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 272(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 256(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 240(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 224(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 208(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 192(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 176(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 160(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 144(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 128(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 112(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 96(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 80(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 64(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 48(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 32(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 16(%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, (%rcx) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 496(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 480(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 464(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 448(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 432(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 416(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 400(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 384(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 368(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 352(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 336(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 320(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 304(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 288(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 272(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 256(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 240(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 224(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 208(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 192(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 176(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 160(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 144(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 128(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 112(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 96(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 80(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 64(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 48(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 32(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 16(%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, (%r8) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 496(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 480(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 464(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 448(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 432(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 416(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 400(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 384(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 368(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 352(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 336(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 320(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 304(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 288(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 272(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 256(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 240(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 224(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 208(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 192(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 176(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 160(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 144(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 128(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 112(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 96(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 80(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 64(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 48(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 32(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 16(%r9) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, (%r9) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 496(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 480(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 464(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 448(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 432(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 416(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 400(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 384(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 368(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 352(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 336(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 320(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 304(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 288(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 272(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 256(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 240(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 224(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 208(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 192(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 176(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 160(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 144(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 128(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 112(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 96(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 80(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 64(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 48(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 32(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 16(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, (%rax) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movaps %xmm4, 496(%rax) |
| ; SSE-NEXT: movaps %xmm6, 480(%rax) |
| ; SSE-NEXT: movaps %xmm7, 464(%rax) |
| ; SSE-NEXT: movaps %xmm11, 448(%rax) |
| ; SSE-NEXT: movaps %xmm14, 432(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 416(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 400(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 384(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 368(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 352(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 336(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 320(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 304(%rax) |
| ; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 288(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 272(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 256(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 240(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 224(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 208(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 192(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 176(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 160(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 144(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 128(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 112(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 96(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 80(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 64(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 48(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 32(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, 16(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm1, (%rax) |
| ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE-NEXT: movaps %xmm2, 496(%rax) |
| ; SSE-NEXT: movaps %xmm5, 480(%rax) |
| ; SSE-NEXT: movaps %xmm0, 464(%rax) |
| ; SSE-NEXT: movaps %xmm3, 448(%rax) |
| ; SSE-NEXT: movaps %xmm10, 432(%rax) |
| ; SSE-NEXT: movaps %xmm12, 416(%rax) |
| ; SSE-NEXT: movaps %xmm8, 400(%rax) |
| ; SSE-NEXT: movaps %xmm9, 384(%rax) |
| ; SSE-NEXT: movaps %xmm13, 368(%rax) |
| ; SSE-NEXT: movaps %xmm15, 352(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 336(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 320(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 304(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 288(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 272(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 256(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 240(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 224(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 208(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 192(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 176(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 160(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 144(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 128(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 112(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 96(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 80(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 64(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 48(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 32(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, 16(%rax) |
| ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE-NEXT: movaps %xmm0, (%rax) |
| ; SSE-NEXT: addq $3720, %rsp # imm = 0xE88 |
| ; SSE-NEXT: retq |
| ; |
| ; AVX1-ONLY-LABEL: load_i64_stride8_vf64: |
| ; AVX1-ONLY: # %bb.0: |
| ; AVX1-ONLY-NEXT: subq $4968, %rsp # imm = 0x1368 |
| ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 704(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps 640(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm4 = xmm3[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1216(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovaps 1152(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm6 = xmm5[0],xmm4[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2240(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vmovaps 2176(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm8 = xmm7[0],xmm6[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vmovaps 1088(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vmovaps 2112(%rdi), %xmm10 |
| ; AVX1-ONLY-NEXT: vmovaps 2048(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm7[1],xmm6[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm6 = xmm11[0],xmm10[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm11[1],xmm10[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1024(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm5[1],xmm4[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm4 = xmm6[0],xmm9[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm6[1],xmm9[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 512(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm3[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm4[0],xmm8[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm4[1],xmm8[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1728(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1664(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1600(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1536(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2752(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 2688(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2624(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 2560(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3264(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 3200(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3136(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 3072(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3776(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 3712(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3648(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 3584(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2496(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 2432(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2368(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 2304(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3008(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 2944(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2880(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 2816(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3520(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 3456(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3392(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 3328(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 4032(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 3968(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3904(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 3840(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 320(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 960(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 896(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 832(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 768(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1472(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1408(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1344(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1280(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1984(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1920(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1856(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1792(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 80(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 704(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 640(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 592(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 528(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, (%rsp) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1216(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1152(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] |
| ; AVX1-ONLY-NEXT: vmovaps 1104(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1040(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1728(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1664(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] |
| ; AVX1-ONLY-NEXT: vmovaps 1616(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1552(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2240(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2176(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] |
| ; AVX1-ONLY-NEXT: vmovaps 2128(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2064(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2752(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2688(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] |
| ; AVX1-ONLY-NEXT: vmovaps 2640(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2576(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3264(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3200(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] |
| ; AVX1-ONLY-NEXT: vmovaps 3152(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3088(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3776(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3712(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] |
| ; AVX1-ONLY-NEXT: vmovaps 3664(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3600(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 4032(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3968(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 3920(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3856(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3520(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3456(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 3408(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3344(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3008(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2944(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 2896(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2832(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2496(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2432(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 2384(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2320(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1984(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1920(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] |
| ; AVX1-ONLY-NEXT: vmovaps 1872(%rdi), %xmm14 |
| ; AVX1-ONLY-NEXT: vmovaps 1808(%rdi), %xmm13 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm13[0],xmm14[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1472(%rdi), %ymm12 |
| ; AVX1-ONLY-NEXT: vmovaps 1408(%rdi), %ymm11 |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm11[0],ymm12[0],ymm11[2],ymm12[2] |
| ; AVX1-ONLY-NEXT: vmovaps 1360(%rdi), %xmm10 |
| ; AVX1-ONLY-NEXT: vmovaps 1296(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm9[0],xmm10[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 960(%rdi), %ymm8 |
| ; AVX1-ONLY-NEXT: vmovaps 896(%rdi), %ymm7 |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm8[0],ymm7[2],ymm8[2] |
| ; AVX1-ONLY-NEXT: vmovaps 848(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vmovaps 784(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm5[0],xmm6[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %ymm4 |
| ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %ymm3 |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm4[0],ymm3[2],ymm4[2] |
| ; AVX1-ONLY-NEXT: vmovaps 336(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps 272(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm15 = xmm1[0],xmm2[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = xmm15[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps (%rsp), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = xmm15[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = xmm15[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = xmm15[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = xmm15[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = xmm15[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = xmm15[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm15 = xmm15[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm3[1],ymm4[1],ymm3[3],ymm4[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm7[1],ymm8[1],ymm7[3],ymm8[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm5[1],xmm6[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm11[1],ymm12[1],ymm11[3],ymm12[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm9[1],xmm10[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm13[1],xmm14[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 352(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, (%rsp) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 608(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 544(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 736(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 672(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 864(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 800(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 992(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 928(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1120(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1056(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1248(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1184(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1376(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1312(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1504(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1440(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1632(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1568(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1760(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1696(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1888(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1824(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2016(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 1952(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2144(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 2080(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2272(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 2208(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2400(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 2336(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2528(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 2464(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2656(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 2592(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2784(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 2720(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2912(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 2848(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3040(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 2976(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3168(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 3104(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3296(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 3232(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3424(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 3360(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3552(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 3488(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3680(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 3616(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3808(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 3744(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3936(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 3872(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 4064(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps 4000(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 224(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 160(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 48(%rdi), %xmm2 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 480(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 368(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 304(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm3 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 736(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 672(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 624(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 560(%rdi), %xmm1 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm4 = xmm1[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 992(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 928(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 880(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 816(%rdi), %xmm3 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm5 = xmm3[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1248(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1184(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 1136(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1072(%rdi), %xmm4 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm6 = xmm4[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm5[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1504(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1440(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 1392(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1328(%rdi), %xmm5 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm7 = xmm5[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1760(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1696(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 1648(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1584(%rdi), %xmm6 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm8 = xmm6[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm7[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2016(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1952(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm8 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 1904(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 1840(%rdi), %xmm7 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm9 = xmm7[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2272(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2208(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm9 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 2160(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2096(%rdi), %xmm8 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm10 = xmm8[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2528(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2464(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm10 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 2416(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2352(%rdi), %xmm9 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm11 = xmm9[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm10[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2784(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2720(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm11 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 2672(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2608(%rdi), %xmm10 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm12 = xmm10[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3040(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2976(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 2928(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 2864(%rdi), %xmm11 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm13 = xmm11[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm12[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3296(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3232(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 3184(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3120(%rdi), %xmm12 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm14 = xmm12[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1,2,3],ymm13[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3552(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3488(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 3440(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3376(%rdi), %xmm13 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm15 = xmm13[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm14[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3808(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3744(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm15 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 3696(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3632(%rdi), %xmm14 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm14[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 4064(%rdi), %ymm0 |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 4000(%rdi), %ymm1 |
| ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; AVX1-ONLY-NEXT: vmovaps 3952(%rdi), %xmm0 |
| ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX1-ONLY-NEXT: vmovaps 3888(%rdi), %xmm15 |
| ; AVX1-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm15[0],xmm0[0] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm1[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm3[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm4[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm5[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm6[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm7[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm8[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm9[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm10[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm11[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm12[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm13[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm1 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm1 = xmm14[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX1-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm8 # 16-byte Folded Reload |
| ; AVX1-ONLY-NEXT: # xmm8 = xmm15[1],mem[1] |
| ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 496(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 480(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 432(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 416(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 368(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 352(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 32(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 288(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 224(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 160(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 96(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 48(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 304(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 240(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 176(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 112(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 464(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 448(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 256(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 384(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 320(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 192(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 128(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 64(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, (%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 272(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 400(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 336(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 208(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 144(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 80(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 16(%rsi) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 224(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 240(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 160(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 176(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 96(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 112(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 32(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 48(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 480(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 496(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 416(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 432(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 352(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 368(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 288(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 304(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 448(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 464(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 384(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 400(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 320(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 336(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 192(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 208(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, (%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 16(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 64(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 80(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 128(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 144(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 256(%rdx) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 272(%rdx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 32(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 96(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 160(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 224(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 288(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 352(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 416(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 480(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 448(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 384(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 320(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 256(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 192(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 128(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 64(%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, (%rcx) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 480(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 416(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 352(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 288(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 224(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 160(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 96(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 32(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 448(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 384(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 320(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 256(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 192(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 128(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 64(%r8) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, (%r8) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 496(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 480(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 464(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 448(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 432(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 416(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 400(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 384(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 368(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 352(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 336(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 320(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 304(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 288(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 272(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 256(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 240(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 224(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 208(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 192(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 176(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 160(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 144(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 128(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 112(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 96(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 80(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 64(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 48(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 32(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 16(%r9) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, (%r9) |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 496(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 480(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 464(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 448(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 432(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 416(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 400(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 384(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 368(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 352(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 336(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 320(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 304(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 288(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 272(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 256(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 240(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 224(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 208(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 192(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 176(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 160(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 144(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 128(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 112(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 96(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 80(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 64(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 48(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 32(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, 16(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %xmm8, (%rax) |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 480(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 448(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 416(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 384(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 352(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 320(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 288(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 256(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 224(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 192(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 160(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 128(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 96(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 64(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, 32(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm8, (%rax) |
| ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 480(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm1, 448(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm2, 416(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm3, 384(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm4, 352(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm5, 320(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm6, 288(%rax) |
| ; AVX1-ONLY-NEXT: vmovaps %ymm7, 256(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 192(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 128(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 64(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rax) |
| ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rax) |
| ; AVX1-ONLY-NEXT: addq $4968, %rsp # imm = 0x1368 |
| ; AVX1-ONLY-NEXT: vzeroupper |
| ; AVX1-ONLY-NEXT: retq |
| ; |
| ; AVX2-ONLY-LABEL: load_i64_stride8_vf64: |
| ; AVX2-ONLY: # %bb.0: |
| ; AVX2-ONLY-NEXT: subq $5560, %rsp # imm = 0x15B8 |
| ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 704(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vmovaps 640(%rdi), %xmm3 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm4 = xmm3[0],xmm2[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1216(%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vmovaps 1152(%rdi), %xmm5 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm6 = xmm5[0],xmm4[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2240(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vmovaps 2176(%rdi), %xmm8 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm6 = xmm8[0],xmm7[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 576(%rdi), %xmm6 |
| ; AVX2-ONLY-NEXT: vmovaps 1088(%rdi), %xmm9 |
| ; AVX2-ONLY-NEXT: vmovaps 2112(%rdi), %xmm10 |
| ; AVX2-ONLY-NEXT: vmovaps 2048(%rdi), %xmm11 |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm8[1],xmm7[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm7 = xmm11[0],xmm10[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm11[1],xmm10[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1024(%rdi), %xmm7 |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm5[1],xmm4[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm4 = xmm7[0],xmm9[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm7[1],xmm9[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 512(%rdi), %xmm4 |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm3[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm4[0],xmm6[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm4[1],xmm6[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1728(%rdi), %xmm2 |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1664(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm2[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1600(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1536(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2752(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 2688(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2624(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 2560(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3264(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 3200(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3136(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 3072(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3776(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 3712(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3648(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 3584(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2496(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 2432(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2368(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 2304(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3008(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 2944(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2880(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 2816(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3520(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 3456(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3392(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 3328(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 4032(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 3968(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3904(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 3840(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 448(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 384(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 960(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 896(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 832(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 768(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1472(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1408(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1344(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1280(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1984(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1920(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1856(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1792(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm1[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 576(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 512(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 704(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 640(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1088(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1024(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1216(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1152(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1600(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1536(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1728(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1664(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2112(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2048(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2240(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2176(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2624(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2560(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2752(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2688(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3136(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3072(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3264(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3200(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3648(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3584(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3776(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3712(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3904(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3840(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 4032(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3968(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3392(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3328(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3520(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3456(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2880(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2816(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3008(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2944(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2368(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2304(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2496(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2432(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm15 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1856(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1792(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1984(%rdi), %ymm11 |
| ; AVX2-ONLY-NEXT: vmovaps 1920(%rdi), %ymm10 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm10[0],ymm11[0],ymm10[2],ymm11[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm14[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1344(%rdi), %ymm9 |
| ; AVX2-ONLY-NEXT: vmovaps 1280(%rdi), %ymm14 |
| ; AVX2-ONLY-NEXT: vmovaps 1472(%rdi), %ymm8 |
| ; AVX2-ONLY-NEXT: vmovaps 1408(%rdi), %ymm7 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm8[0],ymm7[2],ymm8[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm14[0],ymm9[0],ymm14[2],ymm9[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm13[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 832(%rdi), %ymm6 |
| ; AVX2-ONLY-NEXT: vmovaps 768(%rdi), %ymm13 |
| ; AVX2-ONLY-NEXT: vmovaps 960(%rdi), %ymm5 |
| ; AVX2-ONLY-NEXT: vmovaps 896(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm13[0],ymm6[0],ymm13[2],ymm6[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm12[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %ymm12 |
| ; AVX2-ONLY-NEXT: vmovaps 448(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovaps 384(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm0[0],ymm2[0],ymm0[2],ymm2[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm15 = ymm12[0],ymm3[0],ymm12[2],ymm3[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm15[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm15[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm15[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm15[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm15[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm15[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm15[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm15[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm15[2,3],ymm1[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm12[1],ymm3[1],ymm12[3],ymm3[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm13[1],ymm6[1],ymm13[3],ymm6[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm7[1],ymm8[1],ymm7[3],ymm8[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm14[1],ymm9[1],ymm14[3],ymm9[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm10[1],ymm11[1],ymm10[3],ymm11[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm1[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 352(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 608(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 544(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 736(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 672(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 864(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 800(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 992(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 928(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1120(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1056(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1248(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1184(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1376(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1312(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1504(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1440(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1632(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1568(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1760(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1696(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1888(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1824(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2016(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 1952(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2144(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 2080(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2272(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 2208(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2400(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 2336(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2528(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 2464(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2656(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 2592(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2784(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 2720(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2912(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 2848(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3040(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 2976(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3168(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 3104(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3296(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 3232(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3424(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 3360(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3552(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 3488(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3680(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 3616(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3808(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 3744(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3936(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 3872(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 4064(%rdi), %xmm0 |
| ; AVX2-ONLY-NEXT: vmovaps 4000(%rdi), %xmm1 |
| ; AVX2-ONLY-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] |
| ; AVX2-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 352(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 608(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 544(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 736(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 672(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 864(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 800(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 992(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 928(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1120(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1056(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1248(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1184(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1376(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1312(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1504(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1440(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1632(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1568(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1760(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1696(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1888(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1824(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2016(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 1952(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2144(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2080(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2272(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2208(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2400(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2336(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2528(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2464(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2656(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2592(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2784(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2720(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2912(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2848(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3040(%rdi), %ymm0 |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 2976(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm15 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3168(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3104(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3296(%rdi), %ymm11 |
| ; AVX2-ONLY-NEXT: vmovaps 3232(%rdi), %ymm9 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm9[0],ymm11[0],ymm9[2],ymm11[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm2[0],ymm1[0],ymm2[2],ymm1[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm14[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3424(%rdi), %ymm10 |
| ; AVX2-ONLY-NEXT: vmovaps 3360(%rdi), %ymm14 |
| ; AVX2-ONLY-NEXT: vmovaps 3552(%rdi), %ymm8 |
| ; AVX2-ONLY-NEXT: vmovaps 3488(%rdi), %ymm7 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm8[0],ymm7[2],ymm8[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm14[0],ymm10[0],ymm14[2],ymm10[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm13[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3680(%rdi), %ymm6 |
| ; AVX2-ONLY-NEXT: vmovaps 3616(%rdi), %ymm13 |
| ; AVX2-ONLY-NEXT: vmovaps 3808(%rdi), %ymm5 |
| ; AVX2-ONLY-NEXT: vmovaps 3744(%rdi), %ymm3 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm5[0],ymm3[2],ymm5[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm13[0],ymm6[0],ymm13[2],ymm6[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm12[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovaps 3936(%rdi), %ymm4 |
| ; AVX2-ONLY-NEXT: vmovaps 3872(%rdi), %ymm12 |
| ; AVX2-ONLY-NEXT: vmovaps 4064(%rdi), %ymm2 |
| ; AVX2-ONLY-NEXT: vmovaps 4000(%rdi), %ymm1 |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm2[0],ymm1[2],ymm2[2] |
| ; AVX2-ONLY-NEXT: vunpcklpd {{.*#+}} ymm15 = ymm12[0],ymm4[0],ymm12[2],ymm4[2] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm15 = ymm15[1],mem[1],ymm15[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm15 = ymm15[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm9[1],ymm11[1],ymm9[3],ymm11[3] |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload |
| ; AVX2-ONLY-NEXT: # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm9[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm7[1],ymm8[1],ymm7[3],ymm8[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm14[1],ymm10[1],ymm14[3],ymm10[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm7[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm3[1],ymm5[1],ymm3[3],ymm5[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm13[1],ymm6[1],ymm13[3],ymm6[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm3[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm1[1],ymm2[1],ymm1[3],ymm2[3] |
| ; AVX2-ONLY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm12[1],ymm4[1],ymm12[3],ymm4[3] |
| ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 496(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 480(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 432(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 416(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 368(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 352(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 32(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 288(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 224(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 160(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 96(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 48(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 304(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 240(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 176(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 112(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 464(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 448(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 256(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 384(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 320(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 192(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 128(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 64(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, (%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 272(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 400(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 336(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 208(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 144(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 80(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 16(%rsi) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 224(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 240(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 160(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 176(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 96(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 112(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 32(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 48(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 480(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 496(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 416(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 432(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 352(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 368(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 288(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 304(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 448(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 464(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 384(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 400(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 320(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 336(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 192(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 208(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, (%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 16(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 64(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 80(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 128(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 144(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 256(%rdx) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 272(%rdx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 288(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 352(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 416(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 480(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 448(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 384(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 320(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 256(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rcx) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 480(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 416(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 352(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 288(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 448(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 384(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 320(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 256(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%r8) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%r8) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 496(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 480(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 464(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 448(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 432(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 416(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 400(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 384(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 368(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 352(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 336(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 320(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 304(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 288(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 272(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 256(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 240(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 224(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 208(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 192(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 176(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 160(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 144(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 128(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 112(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 96(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 80(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 64(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 48(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 32(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 16(%r9) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, (%r9) |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 496(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 480(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 464(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 448(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 432(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 416(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 400(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 384(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 368(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 352(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 336(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 320(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 304(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 288(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 272(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 256(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 240(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 224(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 208(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 192(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 176(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 160(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 144(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 128(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 112(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 96(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 80(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 64(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 48(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 32(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, 16(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %xmm1, (%rax) |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 480(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 448(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 416(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 384(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 352(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 320(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 288(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 256(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rax) |
| ; AVX2-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 480(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm3, 448(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm7, 416(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm9, 384(%rax) |
| ; AVX2-ONLY-NEXT: vmovaps %ymm15, 352(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 320(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 288(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 256(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 224(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 192(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 160(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 128(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 96(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 64(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, 32(%rax) |
| ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%rax) |
| ; AVX2-ONLY-NEXT: addq $5560, %rsp # imm = 0x15B8 |
| ; AVX2-ONLY-NEXT: vzeroupper |
| ; AVX2-ONLY-NEXT: retq |
| ; |
| ; AVX512F-ONLY-SLOW-LABEL: load_i64_stride8_vf64: |
| ; AVX512F-ONLY-SLOW: # %bb.0: |
| ; AVX512F-ONLY-SLOW-NEXT: subq $6728, %rsp # imm = 0x1A48 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3392(%rdi), %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3328(%rdi), %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3520(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3456(%rdi), %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1856(%rdi), %zmm11 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1984(%rdi), %zmm15 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 832(%rdi), %zmm6 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 768(%rdi), %zmm7 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 960(%rdi), %zmm24 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 896(%rdi), %zmm10 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 320(%rdi), %zmm12 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 256(%rdi), %zmm5 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 448(%rdi), %zmm8 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 384(%rdi), %zmm14 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: movb $-64, %al |
| ; AVX512F-ONLY-SLOW-NEXT: kmovw %eax, %k1 |
| ; AVX512F-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [2,10,2,10,2,10,2,10] |
| ; AVX512F-ONLY-SLOW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm13, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3264(%rdi), %ymm21 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 3200(%rdi), %ymm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm21[0],ymm0[2],ymm21[2] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 3136(%rdi), %ymm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 3072(%rdi), %ymm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm3, (%rsp) # 32-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[2] |
| ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm24, %zmm2, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm7, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm6, %zmm2, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 704(%rdi), %ymm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 640(%rdi), %ymm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 576(%rdi), %ymm25 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 512(%rdi), %ymm23 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm23[0],ymm25[0],ymm23[2],ymm25[2] |
| ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm14, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm8, %zmm2, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm5, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm12, %zmm2, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 192(%rdi), %ymm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 128(%rdi), %ymm31 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm31[0],ymm0[0],ymm31[2],ymm0[2] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 64(%rdi), %ymm20 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 (%rdi), %ymm19 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm19[0],ymm20[0],ymm19[2],ymm20[2] |
| ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1920(%rdi), %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm15, %zmm2, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1792(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm11, %zmm2, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 1728(%rdi), %ymm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 1664(%rdi), %ymm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1600(%rdi), %ymm18 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1536(%rdi), %ymm26 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm26[0],ymm18[0],ymm26[2],ymm18[2] |
| ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1472(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1408(%rdi), %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1344(%rdi), %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1280(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1216(%rdi), %ymm28 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1152(%rdi), %ymm29 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm29[0],ymm28[0],ymm29[2],ymm28[2] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1088(%rdi), %ymm30 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1024(%rdi), %ymm27 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm27[0],ymm30[0],ymm27[2],ymm30[2] |
| ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3008(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2944(%rdi), %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2880(%rdi), %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2816(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 2752(%rdi), %ymm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 2688(%rdi), %ymm11 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm11[0],ymm0[0],ymm11[2],ymm0[2] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2624(%rdi), %ymm16 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 2560(%rdi), %ymm9 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm9[0],ymm16[0],ymm9[2],ymm16[2] |
| ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2496(%rdi), %zmm10 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2432(%rdi), %zmm7 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm7, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm10, %zmm2, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2368(%rdi), %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2304(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 2240(%rdi), %ymm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 2176(%rdi), %ymm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 2112(%rdi), %ymm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 2048(%rdi), %ymm8 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm8[0],ymm3[0],ymm8[2],ymm3[2] |
| ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 4032(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3968(%rdi), %zmm6 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm6, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3904(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3840(%rdi), %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermi2q %zmm1, %zmm3, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm1, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3776(%rdi), %ymm22 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3712(%rdi), %ymm17 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm17[0],ymm22[0],ymm17[2],ymm22[2] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 3648(%rdi), %ymm12 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 3584(%rdi), %ymm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm14 = ymm0[0],ymm12[0],ymm0[2],ymm12[2] |
| ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm14 = ymm14[2,3],ymm15[2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm14, %zmm2, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,3,11,3,11,3,11] |
| ; AVX512F-ONLY-SLOW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm1, %zmm14 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm4, %zmm2, %zmm14 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm13, %zmm15 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm15 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm14, %zmm15 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm14 = ymm13[1],ymm21[1],ymm13[3],ymm21[3] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu (%rsp), %ymm13 # 32-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm14[2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm15, %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm24, %zmm2, %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm21, %zmm14 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm23[1],ymm25[1],ymm23[3],ymm25[3] |
| ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm25, %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm24, %zmm2, %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm23, %zmm2, %zmm5 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm13, %zmm5 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm31, %ymm13 # 32-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: # ymm13 = ymm31[1],mem[1],ymm31[3],mem[3] |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm19[1],ymm20[1],ymm19[3],ymm20[3] |
| ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm5, %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm31, %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm20, %zmm2, %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm19, %zmm2, %zmm14 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: # ymm13 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm26[1],ymm18[1],ymm26[3],ymm18[3] |
| ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm18, %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm26, %zmm2, %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm13 = ymm29[1],ymm28[1],ymm29[3],ymm28[3] |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm27[1],ymm30[1],ymm27[3],ymm30[3] |
| ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm27, %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm28, %zmm2, %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm30, %zmm14 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm29, %zmm2, %zmm14 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: # ymm11 = ymm11[1],mem[1],ymm11[3],mem[3] |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm9 = ymm9[1],ymm16[1],ymm9[3],ymm16[3] |
| ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm9 = ymm9[2,3],ymm11[2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm9, %zmm14, %zmm9 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm10, %zmm2, %zmm7 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm16, %zmm10 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm10 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm7, %zmm10 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm7 # 32-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: # ymm7 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm5 # 32-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: # ymm5 = ymm8[1],mem[1],ymm8[3],mem[3] |
| ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm5[2,3],ymm7[2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm5, %zmm10, %zmm5 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm14, %zmm2, %zmm6 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermi2q %zmm3, %zmm8, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm6, %zmm2 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm3 = ymm17[1],ymm22[1],ymm17[3],ymm22[3] |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm12[1],ymm0[3],ymm12[3] |
| ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm3[2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm2, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12] |
| ; AVX512F-ONLY-SLOW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm1, %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm4, %zmm11 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm4, %zmm0, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm2 = zmm22[0],zmm9[0],zmm22[2],zmm9[2],zmm22[4],zmm9[4],zmm22[6],zmm9[6] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3136(%rdi), %zmm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3072(%rdi), %zmm15 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm15, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm4, %zmm0, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3264(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3200(%rdi), %zmm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12] |
| ; AVX512F-ONLY-SLOW-NEXT: # ymm6 = mem[0,1,0,1] |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm3, %zmm2, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 576(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 512(%rdi), %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 704(%rdi), %zmm21 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 640(%rdi), %zmm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm21, %zmm6, %zmm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm25, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm24, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm12[0],zmm23[0],zmm12[2],zmm23[2],zmm12[4],zmm23[4],zmm12[6],zmm23[6] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 (%rdi), %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 192(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 128(%rdi), %zmm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm1[0],zmm19[0],zmm1[2],zmm19[2],zmm1[4],zmm19[4],zmm1[6],zmm19[6] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1600(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1536(%rdi), %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1728(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1664(%rdi), %zmm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm18, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm26, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm1[0],zmm10[0],zmm1[2],zmm10[2],zmm1[4],zmm10[4],zmm1[6],zmm10[6] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1088(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1024(%rdi), %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1216(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1152(%rdi), %zmm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm27, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm28, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm30[0],zmm29[0],zmm30[2],zmm29[2],zmm30[4],zmm29[4],zmm30[6],zmm29[6] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2624(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2560(%rdi), %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2752(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2688(%rdi), %zmm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2112(%rdi), %zmm7 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2048(%rdi), %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2240(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2176(%rdi), %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm26, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm5, %zmm0, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm4 = zmm16[0],zmm30[0],zmm16[2],zmm30[2],zmm16[4],zmm30[4],zmm16[6],zmm30[6] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm14, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3648(%rdi), %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3584(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, (%rsp) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermi2q %zmm3, %zmm1, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3776(%rdi), %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3712(%rdi), %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermi2q %zmm3, %zmm1, %zmm6 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm1 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: # zmm1 = zmm8[0],mem[0],zmm8[2],mem[2],zmm8[4],mem[4],zmm8[6],mem[6] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [5,13,5,13,5,13,5,13] |
| ; AVX512F-ONLY-SLOW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm25, %zmm0, %zmm15 |
| ; AVX512F-ONLY-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [5,13,5,13] |
| ; AVX512F-ONLY-SLOW-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm24, %zmm1, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm15[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm11, %zmm0, %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm22[1],zmm9[1],zmm22[3],zmm9[3],zmm22[5],zmm9[5],zmm22[7],zmm9[7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm13, %zmm4 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm21, %zmm1, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm23, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm18, %zmm0, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm17, %zmm19 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm17[1],zmm19[1],zmm17[3],zmm19[3],zmm17[5],zmm19[5],zmm17[7],zmm19[7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm14, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm16, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm28, %zmm1, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm4 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: # zmm4 = zmm12[1],mem[1],zmm12[3],mem[3],zmm12[5],mem[5],zmm12[7],mem[7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm13, %zmm1, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm11, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm29, %zmm1, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm10, %zmm27 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm4[1],zmm10[1],zmm4[3],zmm10[3],zmm4[5],zmm10[5],zmm4[7],zmm10[7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm9, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm10, %zmm1, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm8, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm7, %zmm1, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm26, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm5, %zmm0, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm5[1],zmm30[1],zmm5[3],zmm30[3],zmm5[5],zmm30[5],zmm5[7],zmm30[7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 (%rsp), %zmm6 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermi2q %zmm31, %zmm6, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermi2q %zmm30, %zmm3, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm1 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: # zmm1 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14] |
| ; AVX512F-ONLY-SLOW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm25, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14] |
| ; AVX512F-ONLY-SLOW-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm24, %zmm1, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm22, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm3 {%k1} # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: # zmm3 {%k1} = zmm22[0],mem[0],zmm22[2],mem[2],zmm22[4],mem[4],zmm22[6],mem[6] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm21, %zmm1, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm17, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm19, %zmm0, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm23[0],zmm18[0],zmm23[2],zmm18[2],zmm23[4],zmm18[4],zmm23[6],zmm18[6] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm16, %zmm0, %zmm14 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm28, %zmm1, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm12, %zmm18 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm28, %zmm0, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm20[0],zmm14[0],zmm20[2],zmm14[2],zmm20[4],zmm14[4],zmm20[6],zmm14[6] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm13, %zmm1, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm15, %zmm0, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm26[0],zmm25[0],zmm26[2],zmm25[2],zmm26[4],zmm25[4],zmm26[6],zmm25[6] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm11, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm29, %zmm1, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm27, %zmm0, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm24[0],zmm29[0],zmm24[2],zmm29[2],zmm24[4],zmm29[4],zmm24[6],zmm29[6] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm9, %zmm0, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm10, %zmm1, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm9, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm10, %zmm0, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm22[0],zmm19[0],zmm22[2],zmm19[2],zmm22[4],zmm19[4],zmm22[6],zmm19[6] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm7, %zmm1, %zmm8 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm8, %zmm0, %zmm5 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm5, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermi2q %zmm31, %zmm6, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermi2q %zmm30, %zmm3, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm1[0],zmm23[0],zmm1[2],zmm23[2],zmm1[4],zmm23[4],zmm1[6],zmm23[6] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm4, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm30 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm5 |
| ; AVX512F-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm16 = [7,15,7,15,7,15,7,15] |
| ; AVX512F-ONLY-SLOW-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm2, %zmm16, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm0 {%k1} = zmm4[1],zmm1[1],zmm4[3],zmm1[3],zmm4[5],zmm1[5],zmm4[7],zmm1[7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm4, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm4, %zmm6 |
| ; AVX512F-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,8,0,8,0,8,0,8] |
| ; AVX512F-ONLY-SLOW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm3, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm13 = [1,9,1,9,1,9,1,9] |
| ; AVX512F-ONLY-SLOW-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm13, %zmm6 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm2, %zmm3, %zmm30 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm2, %zmm13, %zmm5 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm18, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm18, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm28, %zmm16, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm20[1],zmm14[1],zmm20[3],zmm14[3],zmm20[5],zmm14[5],zmm20[7],zmm14[7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm20, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm14, %zmm3, %zmm20 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm14, %zmm13, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm28, %zmm3, %zmm18 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm28, %zmm13, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm12, %zmm28 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm12, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm15, %zmm16, %zmm12 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm26[1],zmm25[1],zmm26[3],zmm25[3],zmm26[5],zmm25[5],zmm26[7],zmm25[7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm26, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm25, %zmm3, %zmm26 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm25, %zmm13, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm15, %zmm3, %zmm28 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm15, %zmm13, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm1, %zmm25 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm1, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm27, %zmm16, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm24, %zmm7 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm24[1],zmm29[1],zmm24[3],zmm29[3],zmm24[5],zmm29[5],zmm24[7],zmm29[7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm24, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm29, %zmm3, %zmm7 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm29, %zmm13, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm27, %zmm3, %zmm25 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm27, %zmm13, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm9, %zmm27 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm9, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm10, %zmm16, %zmm9 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm9 {%k1} = zmm22[1],zmm19[1],zmm22[3],zmm19[3],zmm22[5],zmm19[5],zmm22[7],zmm19[7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm22, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm19, %zmm3, %zmm22 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm19, %zmm13, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm10, %zmm3, %zmm27 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm10, %zmm13, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm31, %zmm29 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm31, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm8, %zmm16, %zmm31 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm31 {%k1} = zmm21[1],zmm17[1],zmm21[3],zmm17[3],zmm21[5],zmm17[5],zmm21[7],zmm17[7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm21, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm21, %zmm19 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm17, %zmm3, %zmm19 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm17, %zmm13, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm8, %zmm3, %zmm29 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm8, %zmm13, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm8, %zmm3, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm21, %zmm22 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm9, %zmm3, %zmm22 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm24, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm23, %zmm3, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermi2q %zmm26, %zmm15, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm8, %zmm13, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm21, %zmm17 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm21, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm9, %zmm13, %zmm17 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm24, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm23, %zmm13, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpermi2q %zmm26, %zmm15, %zmm13 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm26, %zmm16, %zmm15 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm15 {%k1} = zmm24[1],zmm23[1],zmm24[3],zmm23[3],zmm24[5],zmm23[5],zmm24[7],zmm23[7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm15, %zmm21 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm15 |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q %zmm9, %zmm16, %zmm15 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm15 {%k1} = zmm10[1],zmm8[1],zmm10[3],zmm8[3],zmm10[5],zmm8[5],zmm10[7],zmm8[7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm15, %zmm23 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm12 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm5 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm6 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm7 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm8 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm10 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm0 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm14 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} ymm16 = [7,15,7,15] |
| ; AVX512F-ONLY-SLOW-NEXT: # ymm16 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm15 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm9 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm0 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm1 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm2 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm4 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm11 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm11 # 64-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm12[0,1,2,3],ymm15[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm12, %zmm12 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm5[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm5, %zmm5 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm6[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm7[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm8[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm10[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm31, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: # ymm15 = ymm0[0,1,2,3],mem[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm21, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm14[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm23, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm20, %zmm18 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 192(%rdi), %xmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 128(%rdi), %xmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 64(%rdi), %xmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm16 = xmm0[0],xmm1[0] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $1, %xmm2, %ymm16, %ymm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm18, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm30 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 704(%rdi), %xmm8 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 640(%rdi), %xmm20 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm20[0],xmm8[0] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 576(%rdi), %xmm18 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 512(%rdi), %xmm16 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm31 = xmm16[0],xmm18[0] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $1, %xmm2, %ymm31, %ymm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm30, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm25 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 1216(%rdi), %xmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 1152(%rdi), %xmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm31 = xmm1[0],xmm0[0] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1088(%rdi), %xmm23 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 1024(%rdi), %xmm14 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm30 = xmm14[0],xmm23[0] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $1, %xmm31, %ymm30, %ymm30 |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm30, %zmm25, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm28 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1728(%rdi), %xmm30 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 1664(%rdi), %xmm31 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm31[0],xmm30[0] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 1600(%rdi), %xmm12 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 1536(%rdi), %xmm11 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm11[0],xmm12[0] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $1, %xmm2, %ymm25, %ymm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm28, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm19, %zmm29 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2240(%rdi), %xmm19 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2176(%rdi), %xmm21 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm21[0],xmm19[0] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 2112(%rdi), %xmm15 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 2048(%rdi), %xmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm28 = xmm1[0],xmm15[0] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $1, %xmm25, %ymm28, %ymm25 |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm25, %zmm29, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm27 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2752(%rdi), %xmm28 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 2688(%rdi), %xmm29 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm29[0],xmm28[0] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 2624(%rdi), %xmm10 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 2560(%rdi), %xmm9 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm9[0],xmm10[0] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $1, %xmm2, %ymm25, %ymm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm27, %zmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm22 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3264(%rdi), %xmm25 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3200(%rdi), %xmm27 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm27[0],xmm25[0] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 3136(%rdi), %xmm0 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 3072(%rdi), %xmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm26 = xmm2[0],xmm0[0] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $1, %xmm4, %ymm26, %ymm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm4, %zmm22, %zmm22 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm4, %zmm3 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 3776(%rdi), %xmm7 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 3712(%rdi), %xmm26 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm26[0],xmm7[0] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 3648(%rdi), %xmm6 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 3584(%rdi), %xmm5 |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm24 = xmm5[0],xmm6[0] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $1, %xmm4, %ymm24, %ymm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm4, %zmm3, %zmm24 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm17 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm27[1],xmm25[1] |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm0[1] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm2, %ymm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm17, %zmm2 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm0 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm3 = xmm20[1],xmm8[1] |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm16[1],xmm18[1] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm3, %zmm0, %zmm3 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: # xmm4 = xmm4[1],mem[1] |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: # xmm8 = xmm8[1],mem[1] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm8, %ymm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm4, %zmm0, %zmm4 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm8, %zmm0 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm8 = xmm31[1],xmm30[1] |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm11 = xmm11[1],xmm12[1] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm8, %ymm11, %ymm8 |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm8, %zmm0, %zmm8 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm11, %zmm0 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload |
| ; AVX512F-ONLY-SLOW-NEXT: # xmm11 = xmm11[1],mem[1] |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm14[1],xmm23[1] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm11, %ymm12, %ymm11 |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm11, %zmm0, %zmm11 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm12, %zmm0 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm29[1],xmm28[1] |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm9 = xmm9[1],xmm10[1] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm12, %ymm9, %ymm9 |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm9, %zmm0, %zmm9 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm10, %zmm0 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm10 = xmm21[1],xmm19[1] |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm1[1],xmm15[1] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm10, %ymm12, %ymm10 |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm10, %zmm0, %zmm10 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm13 {%k1} |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm7 = xmm26[1],xmm7[1] |
| ; AVX512F-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm6[1] |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm7, %ymm5, %ymm5 |
| ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm5, %zmm13, %zmm1 |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm24, 448(%rsi) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm22, 384(%rsi) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 320(%rsi) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 256(%rsi) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 192(%rsi) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 128(%rsi) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 64(%rsi) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, (%rsi) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm1, 448(%rdx) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm10, 256(%rdx) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm9, 320(%rdx) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm11, 128(%rdx) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm8, 192(%rdx) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm4, (%rdx) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, 64(%rdx) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, 384(%rdx) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 448(%rcx) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 256(%rcx) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 320(%rcx) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 128(%rcx) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 192(%rcx) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, (%rcx) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 64(%rcx) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 384(%rcx) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 448(%r8) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 256(%r8) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 320(%r8) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 128(%r8) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 192(%r8) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, (%r8) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 64(%r8) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 384(%r8) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 448(%r9) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 256(%r9) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 320(%r9) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 128(%r9) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 192(%r9) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, (%r9) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 64(%r9) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 384(%r9) |
| ; AVX512F-ONLY-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512F-ONLY-SLOW-NEXT: addq $6728, %rsp # imm = 0x1A48 |
| ; AVX512F-ONLY-SLOW-NEXT: vzeroupper |
| ; AVX512F-ONLY-SLOW-NEXT: retq |
| ; |
| ; AVX512F-ONLY-FAST-LABEL: load_i64_stride8_vf64: |
| ; AVX512F-ONLY-FAST: # %bb.0: |
| ; AVX512F-ONLY-FAST-NEXT: subq $6728, %rsp # imm = 0x1A48 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3392(%rdi), %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3328(%rdi), %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3520(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3456(%rdi), %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1856(%rdi), %zmm11 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1984(%rdi), %zmm15 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 832(%rdi), %zmm6 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 768(%rdi), %zmm7 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 960(%rdi), %zmm24 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 896(%rdi), %zmm10 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 320(%rdi), %zmm12 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 256(%rdi), %zmm5 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 448(%rdi), %zmm8 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 384(%rdi), %zmm14 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: movb $-64, %al |
| ; AVX512F-ONLY-FAST-NEXT: kmovw %eax, %k1 |
| ; AVX512F-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [2,10,2,10,2,10,2,10] |
| ; AVX512F-ONLY-FAST-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm13, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3264(%rdi), %ymm21 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 3200(%rdi), %ymm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm21[0],ymm0[2],ymm21[2] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 3136(%rdi), %ymm4 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 3072(%rdi), %ymm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm3, (%rsp) # 32-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[2] |
| ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm24, %zmm2, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm7, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm6, %zmm2, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 704(%rdi), %ymm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 640(%rdi), %ymm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 576(%rdi), %ymm25 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 512(%rdi), %ymm23 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm23[0],ymm25[0],ymm23[2],ymm25[2] |
| ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm14, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm8, %zmm2, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm5, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm12, %zmm2, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 192(%rdi), %ymm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 128(%rdi), %ymm31 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm31[0],ymm0[0],ymm31[2],ymm0[2] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 64(%rdi), %ymm20 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 (%rdi), %ymm19 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm19[0],ymm20[0],ymm19[2],ymm20[2] |
| ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1920(%rdi), %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm15, %zmm2, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1792(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm11, %zmm2, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 1728(%rdi), %ymm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 1664(%rdi), %ymm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1600(%rdi), %ymm18 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1536(%rdi), %ymm26 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm26[0],ymm18[0],ymm26[2],ymm18[2] |
| ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1472(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1408(%rdi), %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1344(%rdi), %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1280(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1216(%rdi), %ymm28 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1152(%rdi), %ymm29 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm29[0],ymm28[0],ymm29[2],ymm28[2] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1088(%rdi), %ymm30 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1024(%rdi), %ymm27 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm27[0],ymm30[0],ymm27[2],ymm30[2] |
| ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3008(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2944(%rdi), %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2880(%rdi), %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2816(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 2752(%rdi), %ymm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 2688(%rdi), %ymm11 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm11[0],ymm0[0],ymm11[2],ymm0[2] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2624(%rdi), %ymm16 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 2560(%rdi), %ymm9 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm9[0],ymm16[0],ymm9[2],ymm16[2] |
| ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2496(%rdi), %zmm10 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2432(%rdi), %zmm7 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm7, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm10, %zmm2, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2368(%rdi), %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2304(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 2240(%rdi), %ymm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 2176(%rdi), %ymm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 2112(%rdi), %ymm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 2048(%rdi), %ymm8 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm8[0],ymm3[0],ymm8[2],ymm3[2] |
| ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 4032(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3968(%rdi), %zmm6 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm6, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3904(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3840(%rdi), %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermi2q %zmm1, %zmm3, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm1, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3776(%rdi), %ymm22 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3712(%rdi), %ymm17 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm17[0],ymm22[0],ymm17[2],ymm22[2] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 3648(%rdi), %ymm12 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 3584(%rdi), %ymm0 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm14 = ymm0[0],ymm12[0],ymm0[2],ymm12[2] |
| ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm14 = ymm14[2,3],ymm15[2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm14, %zmm2, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,3,11,3,11,3,11] |
| ; AVX512F-ONLY-FAST-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm1, %zmm14 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm4, %zmm2, %zmm14 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm13, %zmm15 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm15 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm14, %zmm15 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm14 = ymm13[1],ymm21[1],ymm13[3],ymm21[3] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu (%rsp), %ymm13 # 32-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm14[2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm15, %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm24, %zmm2, %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm21, %zmm14 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm23[1],ymm25[1],ymm23[3],ymm25[3] |
| ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm25, %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm24, %zmm2, %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm23, %zmm2, %zmm5 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm13, %zmm5 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm31, %ymm13 # 32-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: # ymm13 = ymm31[1],mem[1],ymm31[3],mem[3] |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm19[1],ymm20[1],ymm19[3],ymm20[3] |
| ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm5, %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm31, %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm20, %zmm2, %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm19, %zmm2, %zmm14 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: # ymm13 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm26[1],ymm18[1],ymm26[3],ymm18[3] |
| ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm18, %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm26, %zmm2, %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm13 = ymm29[1],ymm28[1],ymm29[3],ymm28[3] |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm27[1],ymm30[1],ymm27[3],ymm30[3] |
| ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm27, %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm28, %zmm2, %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm30, %zmm14 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm29, %zmm2, %zmm14 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: # ymm11 = ymm11[1],mem[1],ymm11[3],mem[3] |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm9 = ymm9[1],ymm16[1],ymm9[3],ymm16[3] |
| ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm9 = ymm9[2,3],ymm11[2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm9, %zmm14, %zmm9 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm10, %zmm2, %zmm7 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm16, %zmm10 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm10 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm7, %zmm10 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm7 # 32-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: # ymm7 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm5 # 32-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: # ymm5 = ymm8[1],mem[1],ymm8[3],mem[3] |
| ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm5[2,3],ymm7[2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm5, %zmm10, %zmm5 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm14, %zmm2, %zmm6 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermi2q %zmm3, %zmm8, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm6, %zmm2 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm3 = ymm17[1],ymm22[1],ymm17[3],ymm22[3] |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm12[1],ymm0[3],ymm12[3] |
| ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm3[2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm2, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12] |
| ; AVX512F-ONLY-FAST-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm1, %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm4, %zmm11 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm4, %zmm0, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm2 = zmm22[0],zmm9[0],zmm22[2],zmm9[2],zmm22[4],zmm9[4],zmm22[6],zmm9[6] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3136(%rdi), %zmm4 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3072(%rdi), %zmm15 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm15, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm4, %zmm0, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3264(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3200(%rdi), %zmm4 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12] |
| ; AVX512F-ONLY-FAST-NEXT: # ymm6 = mem[0,1,0,1] |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm3, %zmm2, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 576(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 512(%rdi), %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 704(%rdi), %zmm21 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 640(%rdi), %zmm4 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm21, %zmm6, %zmm4 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm25, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm24, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm12[0],zmm23[0],zmm12[2],zmm23[2],zmm12[4],zmm23[4],zmm12[6],zmm23[6] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 (%rdi), %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 192(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 128(%rdi), %zmm4 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm1[0],zmm19[0],zmm1[2],zmm19[2],zmm1[4],zmm19[4],zmm1[6],zmm19[6] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1600(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1536(%rdi), %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1728(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1664(%rdi), %zmm4 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm18, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm26, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm1[0],zmm10[0],zmm1[2],zmm10[2],zmm1[4],zmm10[4],zmm1[6],zmm10[6] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1088(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1024(%rdi), %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1216(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1152(%rdi), %zmm4 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm27, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm28, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm30[0],zmm29[0],zmm30[2],zmm29[2],zmm30[4],zmm29[4],zmm30[6],zmm29[6] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2624(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2560(%rdi), %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2752(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2688(%rdi), %zmm4 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2112(%rdi), %zmm7 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2048(%rdi), %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2240(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2176(%rdi), %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm26, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm5, %zmm0, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm4 = zmm16[0],zmm30[0],zmm16[2],zmm30[2],zmm16[4],zmm30[4],zmm16[6],zmm30[6] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm14, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3648(%rdi), %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3584(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, (%rsp) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermi2q %zmm3, %zmm1, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3776(%rdi), %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3712(%rdi), %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermi2q %zmm3, %zmm1, %zmm6 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm1 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: # zmm1 = zmm8[0],mem[0],zmm8[2],mem[2],zmm8[4],mem[4],zmm8[6],mem[6] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [5,13,5,13,5,13,5,13] |
| ; AVX512F-ONLY-FAST-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm25, %zmm0, %zmm15 |
| ; AVX512F-ONLY-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [5,13,5,13] |
| ; AVX512F-ONLY-FAST-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm24, %zmm1, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm15[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm11, %zmm0, %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm22[1],zmm9[1],zmm22[3],zmm9[3],zmm22[5],zmm9[5],zmm22[7],zmm9[7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm13, %zmm4 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm21, %zmm1, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm23, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm18, %zmm0, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm17, %zmm19 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm17[1],zmm19[1],zmm17[3],zmm19[3],zmm17[5],zmm19[5],zmm17[7],zmm19[7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm14, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm16, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm28, %zmm1, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm4 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: # zmm4 = zmm12[1],mem[1],zmm12[3],mem[3],zmm12[5],mem[5],zmm12[7],mem[7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm13, %zmm1, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm11, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm29, %zmm1, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm10, %zmm27 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm4[1],zmm10[1],zmm4[3],zmm10[3],zmm4[5],zmm10[5],zmm4[7],zmm10[7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm9, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm10, %zmm1, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm8, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm7, %zmm1, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm26, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm5, %zmm0, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm5[1],zmm30[1],zmm5[3],zmm30[3],zmm5[5],zmm30[5],zmm5[7],zmm30[7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 (%rsp), %zmm6 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermi2q %zmm31, %zmm6, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermi2q %zmm30, %zmm3, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm1 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: # zmm1 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14] |
| ; AVX512F-ONLY-FAST-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm25, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14] |
| ; AVX512F-ONLY-FAST-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm24, %zmm1, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm22, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm3 {%k1} # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: # zmm3 {%k1} = zmm22[0],mem[0],zmm22[2],mem[2],zmm22[4],mem[4],zmm22[6],mem[6] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm21, %zmm1, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm17, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm19, %zmm0, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm23[0],zmm18[0],zmm23[2],zmm18[2],zmm23[4],zmm18[4],zmm23[6],zmm18[6] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm16, %zmm0, %zmm14 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm28, %zmm1, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm12, %zmm18 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm28, %zmm0, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm20[0],zmm14[0],zmm20[2],zmm14[2],zmm20[4],zmm14[4],zmm20[6],zmm14[6] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm13, %zmm1, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm15, %zmm0, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm26[0],zmm25[0],zmm26[2],zmm25[2],zmm26[4],zmm25[4],zmm26[6],zmm25[6] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm11, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm29, %zmm1, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm27, %zmm0, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm24[0],zmm29[0],zmm24[2],zmm29[2],zmm24[4],zmm29[4],zmm24[6],zmm29[6] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm9, %zmm0, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm10, %zmm1, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm9, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm10, %zmm0, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm22[0],zmm19[0],zmm22[2],zmm19[2],zmm22[4],zmm19[4],zmm22[6],zmm19[6] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm7, %zmm1, %zmm8 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm8, %zmm0, %zmm5 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm5, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermi2q %zmm31, %zmm6, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermi2q %zmm30, %zmm3, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm1[0],zmm23[0],zmm1[2],zmm23[2],zmm1[4],zmm23[4],zmm1[6],zmm23[6] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm4, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm30 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm5 |
| ; AVX512F-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm16 = [7,15,7,15,7,15,7,15] |
| ; AVX512F-ONLY-FAST-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm2, %zmm16, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm0 {%k1} = zmm4[1],zmm1[1],zmm4[3],zmm1[3],zmm4[5],zmm1[5],zmm4[7],zmm1[7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm4, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm4, %zmm6 |
| ; AVX512F-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,8,0,8,0,8,0,8] |
| ; AVX512F-ONLY-FAST-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm3, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm13 = [1,9,1,9,1,9,1,9] |
| ; AVX512F-ONLY-FAST-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm13, %zmm6 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm2, %zmm3, %zmm30 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm2, %zmm13, %zmm5 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm18, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm18, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm28, %zmm16, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm20[1],zmm14[1],zmm20[3],zmm14[3],zmm20[5],zmm14[5],zmm20[7],zmm14[7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm20, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm14, %zmm3, %zmm20 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm14, %zmm13, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm28, %zmm3, %zmm18 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm28, %zmm13, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm12, %zmm28 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm12, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm15, %zmm16, %zmm12 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm26[1],zmm25[1],zmm26[3],zmm25[3],zmm26[5],zmm25[5],zmm26[7],zmm25[7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm26, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm25, %zmm3, %zmm26 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm25, %zmm13, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm15, %zmm3, %zmm28 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm15, %zmm13, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm1, %zmm25 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm1, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm27, %zmm16, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm24, %zmm7 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm24[1],zmm29[1],zmm24[3],zmm29[3],zmm24[5],zmm29[5],zmm24[7],zmm29[7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm24, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm29, %zmm3, %zmm7 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm29, %zmm13, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm27, %zmm3, %zmm25 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm27, %zmm13, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm9, %zmm27 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm9, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm10, %zmm16, %zmm9 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm9 {%k1} = zmm22[1],zmm19[1],zmm22[3],zmm19[3],zmm22[5],zmm19[5],zmm22[7],zmm19[7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm22, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm19, %zmm3, %zmm22 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm19, %zmm13, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm10, %zmm3, %zmm27 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm10, %zmm13, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm31, %zmm29 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm31, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm8, %zmm16, %zmm31 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm31 {%k1} = zmm21[1],zmm17[1],zmm21[3],zmm17[3],zmm21[5],zmm17[5],zmm21[7],zmm17[7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm21, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm21, %zmm19 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm17, %zmm3, %zmm19 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm17, %zmm13, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm8, %zmm3, %zmm29 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm8, %zmm13, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm8, %zmm3, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm21, %zmm22 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm9, %zmm3, %zmm22 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm24, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm23, %zmm3, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermi2q %zmm26, %zmm15, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm8, %zmm13, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm21, %zmm17 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm21, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm9, %zmm13, %zmm17 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm24, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm23, %zmm13, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpermi2q %zmm26, %zmm15, %zmm13 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm26, %zmm16, %zmm15 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm15 {%k1} = zmm24[1],zmm23[1],zmm24[3],zmm23[3],zmm24[5],zmm23[5],zmm24[7],zmm23[7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm15, %zmm21 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm15 |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q %zmm9, %zmm16, %zmm15 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm15 {%k1} = zmm10[1],zmm8[1],zmm10[3],zmm8[3],zmm10[5],zmm8[5],zmm10[7],zmm8[7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm15, %zmm23 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm12 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm5 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm6 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm7 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm8 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm10 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm0 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm14 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} ymm16 = [7,15,7,15] |
| ; AVX512F-ONLY-FAST-NEXT: # ymm16 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm15 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm9 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm0 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm1 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm2 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm4 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm11 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm11 # 64-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm12[0,1,2,3],ymm15[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm12, %zmm12 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm5[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm5, %zmm5 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm6[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm7[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm8[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm10[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm31, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: # ymm15 = ymm0[0,1,2,3],mem[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm21, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm14[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm23, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm20, %zmm18 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 192(%rdi), %xmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 128(%rdi), %xmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 64(%rdi), %xmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm16 = xmm0[0],xmm1[0] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $1, %xmm2, %ymm16, %ymm2 |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm18, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm30 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 704(%rdi), %xmm8 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 640(%rdi), %xmm20 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm20[0],xmm8[0] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 576(%rdi), %xmm18 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 512(%rdi), %xmm16 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm31 = xmm16[0],xmm18[0] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $1, %xmm2, %ymm31, %ymm2 |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm30, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm25 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 1216(%rdi), %xmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 1152(%rdi), %xmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm31 = xmm1[0],xmm0[0] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1088(%rdi), %xmm23 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 1024(%rdi), %xmm14 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm30 = xmm14[0],xmm23[0] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $1, %xmm31, %ymm30, %ymm30 |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm30, %zmm25, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm28 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1728(%rdi), %xmm30 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 1664(%rdi), %xmm31 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm31[0],xmm30[0] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 1600(%rdi), %xmm12 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 1536(%rdi), %xmm11 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm11[0],xmm12[0] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $1, %xmm2, %ymm25, %ymm2 |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm28, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm19, %zmm29 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2240(%rdi), %xmm19 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2176(%rdi), %xmm21 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm21[0],xmm19[0] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 2112(%rdi), %xmm15 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 2048(%rdi), %xmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm28 = xmm1[0],xmm15[0] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $1, %xmm25, %ymm28, %ymm25 |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm25, %zmm29, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm27 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2752(%rdi), %xmm28 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 2688(%rdi), %xmm29 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm29[0],xmm28[0] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 2624(%rdi), %xmm10 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 2560(%rdi), %xmm9 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm9[0],xmm10[0] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $1, %xmm2, %ymm25, %ymm2 |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm27, %zmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm22 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3264(%rdi), %xmm25 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3200(%rdi), %xmm27 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm27[0],xmm25[0] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 3136(%rdi), %xmm0 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 3072(%rdi), %xmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm26 = xmm2[0],xmm0[0] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $1, %xmm4, %ymm26, %ymm4 |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm4, %zmm22, %zmm22 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm4, %zmm3 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 3776(%rdi), %xmm7 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 3712(%rdi), %xmm26 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm26[0],xmm7[0] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 3648(%rdi), %xmm6 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa 3584(%rdi), %xmm5 |
| ; AVX512F-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm24 = xmm5[0],xmm6[0] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $1, %xmm4, %ymm24, %ymm4 |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm4, %zmm3, %zmm24 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm17 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm27[1],xmm25[1] |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm0[1] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm4, %ymm2, %ymm2 |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm17, %zmm2 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm0 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm3 = xmm20[1],xmm8[1] |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm16[1],xmm18[1] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm3, %zmm0, %zmm3 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: # xmm4 = xmm4[1],mem[1] |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: # xmm8 = xmm8[1],mem[1] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm4, %ymm8, %ymm4 |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm4, %zmm0, %zmm4 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm8, %zmm0 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm8 = xmm31[1],xmm30[1] |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm11 = xmm11[1],xmm12[1] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm8, %ymm11, %ymm8 |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm8, %zmm0, %zmm8 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm11, %zmm0 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload |
| ; AVX512F-ONLY-FAST-NEXT: # xmm11 = xmm11[1],mem[1] |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm14[1],xmm23[1] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm11, %ymm12, %ymm11 |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm11, %zmm0, %zmm11 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm12, %zmm0 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm29[1],xmm28[1] |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm9 = xmm9[1],xmm10[1] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm12, %ymm9, %ymm9 |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm9, %zmm0, %zmm9 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm10, %zmm0 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm10 = xmm21[1],xmm19[1] |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm1[1],xmm15[1] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm10, %ymm12, %ymm10 |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm10, %zmm0, %zmm10 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm13 {%k1} |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm7 = xmm26[1],xmm7[1] |
| ; AVX512F-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm6[1] |
| ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm7, %ymm5, %ymm5 |
| ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm5, %zmm13, %zmm1 |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm24, 448(%rsi) |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm22, 384(%rsi) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 320(%rsi) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 256(%rsi) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 192(%rsi) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 128(%rsi) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 64(%rsi) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, (%rsi) |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm1, 448(%rdx) |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm10, 256(%rdx) |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm9, 320(%rdx) |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm11, 128(%rdx) |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm8, 192(%rdx) |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm4, (%rdx) |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm3, 64(%rdx) |
| ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm2, 384(%rdx) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 448(%rcx) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 256(%rcx) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 320(%rcx) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 128(%rcx) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 192(%rcx) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, (%rcx) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 64(%rcx) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 384(%rcx) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 448(%r8) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 256(%r8) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 320(%r8) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 128(%r8) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 192(%r8) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, (%r8) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 64(%r8) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 384(%r8) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 448(%r9) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 256(%r9) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 320(%r9) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 128(%r9) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 192(%r9) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, (%r9) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 64(%r9) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 384(%r9) |
| ; AVX512F-ONLY-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512F-ONLY-FAST-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512F-ONLY-FAST-NEXT: addq $6728, %rsp # imm = 0x1A48 |
| ; AVX512F-ONLY-FAST-NEXT: vzeroupper |
| ; AVX512F-ONLY-FAST-NEXT: retq |
| ; |
| ; AVX512DQ-SLOW-LABEL: load_i64_stride8_vf64: |
| ; AVX512DQ-SLOW: # %bb.0: |
| ; AVX512DQ-SLOW-NEXT: subq $6728, %rsp # imm = 0x1A48 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3392(%rdi), %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3328(%rdi), %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3520(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3456(%rdi), %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1856(%rdi), %zmm11 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1984(%rdi), %zmm15 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 832(%rdi), %zmm6 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 768(%rdi), %zmm7 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 960(%rdi), %zmm24 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 896(%rdi), %zmm10 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 320(%rdi), %zmm12 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 256(%rdi), %zmm5 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 448(%rdi), %zmm8 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 384(%rdi), %zmm14 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: movb $-64, %al |
| ; AVX512DQ-SLOW-NEXT: kmovw %eax, %k1 |
| ; AVX512DQ-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [2,10,2,10,2,10,2,10] |
| ; AVX512DQ-SLOW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm13, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3264(%rdi), %ymm21 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 3200(%rdi), %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm21[0],ymm0[2],ymm21[2] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 3136(%rdi), %ymm4 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 3072(%rdi), %ymm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm3, (%rsp) # 32-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[2] |
| ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm24, %zmm2, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm7, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm6, %zmm2, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 704(%rdi), %ymm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 640(%rdi), %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 576(%rdi), %ymm25 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 512(%rdi), %ymm23 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm23[0],ymm25[0],ymm23[2],ymm25[2] |
| ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm14, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm8, %zmm2, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm5, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm12, %zmm2, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 192(%rdi), %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 128(%rdi), %ymm31 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm31[0],ymm0[0],ymm31[2],ymm0[2] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 64(%rdi), %ymm20 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 (%rdi), %ymm19 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm19[0],ymm20[0],ymm19[2],ymm20[2] |
| ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1920(%rdi), %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm3, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm15, %zmm2, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1792(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm11, %zmm2, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 1728(%rdi), %ymm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 1664(%rdi), %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1600(%rdi), %ymm18 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1536(%rdi), %ymm26 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm26[0],ymm18[0],ymm26[2],ymm18[2] |
| ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1472(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1408(%rdi), %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1344(%rdi), %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1280(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1216(%rdi), %ymm28 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1152(%rdi), %ymm29 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm29[0],ymm28[0],ymm29[2],ymm28[2] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1088(%rdi), %ymm30 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1024(%rdi), %ymm27 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm27[0],ymm30[0],ymm27[2],ymm30[2] |
| ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3008(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2944(%rdi), %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2880(%rdi), %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2816(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 2752(%rdi), %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 2688(%rdi), %ymm11 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm11[0],ymm0[0],ymm11[2],ymm0[2] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2624(%rdi), %ymm16 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 2560(%rdi), %ymm9 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm9[0],ymm16[0],ymm9[2],ymm16[2] |
| ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2496(%rdi), %zmm10 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2432(%rdi), %zmm7 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm7, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm10, %zmm2, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2368(%rdi), %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2304(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 2240(%rdi), %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 2176(%rdi), %ymm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 2112(%rdi), %ymm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 2048(%rdi), %ymm8 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm8[0],ymm3[0],ymm8[2],ymm3[2] |
| ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 4032(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3968(%rdi), %zmm6 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm6, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3904(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3840(%rdi), %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermi2q %zmm1, %zmm3, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm1, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3776(%rdi), %ymm22 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3712(%rdi), %ymm17 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm17[0],ymm22[0],ymm17[2],ymm22[2] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 3648(%rdi), %ymm12 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 3584(%rdi), %ymm0 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm14 = ymm0[0],ymm12[0],ymm0[2],ymm12[2] |
| ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm14 = ymm14[2,3],ymm15[2,3] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm14, %zmm2, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,3,11,3,11,3,11] |
| ; AVX512DQ-SLOW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm1, %zmm14 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm4, %zmm2, %zmm14 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm13, %zmm15 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm15 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm14, %zmm15 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm14 = ymm13[1],ymm21[1],ymm13[3],ymm21[3] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu (%rsp), %ymm13 # 32-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm14[2,3] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm15, %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm24, %zmm2, %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm21, %zmm14 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm23[1],ymm25[1],ymm23[3],ymm25[3] |
| ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm25, %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm24, %zmm2, %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm23, %zmm2, %zmm5 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm13, %zmm5 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm31, %ymm13 # 32-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: # ymm13 = ymm31[1],mem[1],ymm31[3],mem[3] |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm19[1],ymm20[1],ymm19[3],ymm20[3] |
| ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm5, %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm31, %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm20, %zmm2, %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm19, %zmm2, %zmm14 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: # ymm13 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm26[1],ymm18[1],ymm26[3],ymm18[3] |
| ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm18, %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm26, %zmm2, %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm13 = ymm29[1],ymm28[1],ymm29[3],ymm28[3] |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm27[1],ymm30[1],ymm27[3],ymm30[3] |
| ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm27, %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm28, %zmm2, %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm30, %zmm14 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm29, %zmm2, %zmm14 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: # ymm11 = ymm11[1],mem[1],ymm11[3],mem[3] |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm9 = ymm9[1],ymm16[1],ymm9[3],ymm16[3] |
| ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm9 = ymm9[2,3],ymm11[2,3] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm9, %zmm14, %zmm9 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm10, %zmm2, %zmm7 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm16, %zmm10 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm10 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm7, %zmm10 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm7 # 32-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: # ymm7 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm5 # 32-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: # ymm5 = ymm8[1],mem[1],ymm8[3],mem[3] |
| ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm5[2,3],ymm7[2,3] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm5, %zmm10, %zmm5 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm14, %zmm2, %zmm6 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermi2q %zmm3, %zmm8, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm6, %zmm2 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm3 = ymm17[1],ymm22[1],ymm17[3],ymm22[3] |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm12[1],ymm0[3],ymm12[3] |
| ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm3[2,3] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm2, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12] |
| ; AVX512DQ-SLOW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm1, %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm4, %zmm11 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm4, %zmm0, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm2 = zmm22[0],zmm9[0],zmm22[2],zmm9[2],zmm22[4],zmm9[4],zmm22[6],zmm9[6] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3136(%rdi), %zmm4 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3072(%rdi), %zmm15 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm15, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm4, %zmm0, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3264(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3200(%rdi), %zmm4 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12] |
| ; AVX512DQ-SLOW-NEXT: # ymm6 = mem[0,1,0,1] |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm3, %zmm2, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 576(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 512(%rdi), %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 704(%rdi), %zmm21 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 640(%rdi), %zmm4 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm21, %zmm6, %zmm4 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm25, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm24, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm12[0],zmm23[0],zmm12[2],zmm23[2],zmm12[4],zmm23[4],zmm12[6],zmm23[6] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 (%rdi), %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 192(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 128(%rdi), %zmm4 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm1[0],zmm19[0],zmm1[2],zmm19[2],zmm1[4],zmm19[4],zmm1[6],zmm19[6] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1600(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1536(%rdi), %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1728(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1664(%rdi), %zmm4 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm18, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm26, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm1[0],zmm10[0],zmm1[2],zmm10[2],zmm1[4],zmm10[4],zmm1[6],zmm10[6] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1088(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1024(%rdi), %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1216(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1152(%rdi), %zmm4 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm27, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm28, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm30[0],zmm29[0],zmm30[2],zmm29[2],zmm30[4],zmm29[4],zmm30[6],zmm29[6] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2624(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2560(%rdi), %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2752(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2688(%rdi), %zmm4 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2112(%rdi), %zmm7 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2048(%rdi), %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2240(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2176(%rdi), %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm26, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm5, %zmm0, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm4 = zmm16[0],zmm30[0],zmm16[2],zmm30[2],zmm16[4],zmm30[4],zmm16[6],zmm30[6] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm14, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3648(%rdi), %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3584(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, (%rsp) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermi2q %zmm3, %zmm1, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3776(%rdi), %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3712(%rdi), %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermi2q %zmm3, %zmm1, %zmm6 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm1 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: # zmm1 = zmm8[0],mem[0],zmm8[2],mem[2],zmm8[4],mem[4],zmm8[6],mem[6] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [5,13,5,13,5,13,5,13] |
| ; AVX512DQ-SLOW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm25, %zmm0, %zmm15 |
| ; AVX512DQ-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [5,13,5,13] |
| ; AVX512DQ-SLOW-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm24, %zmm1, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm15[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm11, %zmm0, %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm22[1],zmm9[1],zmm22[3],zmm9[3],zmm22[5],zmm9[5],zmm22[7],zmm9[7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm13, %zmm4 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm21, %zmm1, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm23, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm18, %zmm0, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm17, %zmm19 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm17[1],zmm19[1],zmm17[3],zmm19[3],zmm17[5],zmm19[5],zmm17[7],zmm19[7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm14, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm16, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm28, %zmm1, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: # zmm4 = zmm12[1],mem[1],zmm12[3],mem[3],zmm12[5],mem[5],zmm12[7],mem[7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm13, %zmm1, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm11, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm29, %zmm1, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm10, %zmm27 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm4[1],zmm10[1],zmm4[3],zmm10[3],zmm4[5],zmm10[5],zmm4[7],zmm10[7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm9, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm10, %zmm1, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm8, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm7, %zmm1, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm26, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm5, %zmm0, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm5[1],zmm30[1],zmm5[3],zmm30[3],zmm5[5],zmm30[5],zmm5[7],zmm30[7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 (%rsp), %zmm6 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermi2q %zmm31, %zmm6, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermi2q %zmm30, %zmm3, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm1 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: # zmm1 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14] |
| ; AVX512DQ-SLOW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm25, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14] |
| ; AVX512DQ-SLOW-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm24, %zmm1, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm22, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm3 {%k1} # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: # zmm3 {%k1} = zmm22[0],mem[0],zmm22[2],mem[2],zmm22[4],mem[4],zmm22[6],mem[6] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm21, %zmm1, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm17, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm19, %zmm0, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm23[0],zmm18[0],zmm23[2],zmm18[2],zmm23[4],zmm18[4],zmm23[6],zmm18[6] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm16, %zmm0, %zmm14 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm28, %zmm1, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm12, %zmm18 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm28, %zmm0, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm20[0],zmm14[0],zmm20[2],zmm14[2],zmm20[4],zmm14[4],zmm20[6],zmm14[6] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm13, %zmm1, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm15, %zmm0, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm26[0],zmm25[0],zmm26[2],zmm25[2],zmm26[4],zmm25[4],zmm26[6],zmm25[6] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm11, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm29, %zmm1, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm27, %zmm0, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm24[0],zmm29[0],zmm24[2],zmm29[2],zmm24[4],zmm29[4],zmm24[6],zmm29[6] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm9, %zmm0, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm10, %zmm1, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm9, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm10, %zmm0, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm22[0],zmm19[0],zmm22[2],zmm19[2],zmm22[4],zmm19[4],zmm22[6],zmm19[6] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm7, %zmm1, %zmm8 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm8, %zmm0, %zmm5 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm5, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vpermi2q %zmm31, %zmm6, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermi2q %zmm30, %zmm3, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm1[0],zmm23[0],zmm1[2],zmm23[2],zmm1[4],zmm23[4],zmm1[6],zmm23[6] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm4, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, %zmm30 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, %zmm5 |
| ; AVX512DQ-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm16 = [7,15,7,15,7,15,7,15] |
| ; AVX512DQ-SLOW-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm2, %zmm16, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm0 {%k1} = zmm4[1],zmm1[1],zmm4[3],zmm1[3],zmm4[5],zmm1[5],zmm4[7],zmm1[7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm4, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm4, %zmm6 |
| ; AVX512DQ-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,8,0,8,0,8,0,8] |
| ; AVX512DQ-SLOW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm1, %zmm3, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm13 = [1,9,1,9,1,9,1,9] |
| ; AVX512DQ-SLOW-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm1, %zmm13, %zmm6 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm2, %zmm3, %zmm30 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm2, %zmm13, %zmm5 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm18, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm18, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm28, %zmm16, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm20[1],zmm14[1],zmm20[3],zmm14[3],zmm20[5],zmm14[5],zmm20[7],zmm14[7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm20, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm14, %zmm3, %zmm20 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm14, %zmm13, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm28, %zmm3, %zmm18 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm28, %zmm13, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm12, %zmm28 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm12, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm15, %zmm16, %zmm12 |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm26[1],zmm25[1],zmm26[3],zmm25[3],zmm26[5],zmm25[5],zmm26[7],zmm25[7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm26, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm25, %zmm3, %zmm26 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm25, %zmm13, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm15, %zmm3, %zmm28 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm15, %zmm13, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm1, %zmm25 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm1, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm27, %zmm16, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm24, %zmm7 |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm24[1],zmm29[1],zmm24[3],zmm29[3],zmm24[5],zmm29[5],zmm24[7],zmm29[7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm24, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm29, %zmm3, %zmm7 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm29, %zmm13, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm27, %zmm3, %zmm25 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm27, %zmm13, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm9, %zmm27 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm9, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm10, %zmm16, %zmm9 |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm9 {%k1} = zmm22[1],zmm19[1],zmm22[3],zmm19[3],zmm22[5],zmm19[5],zmm22[7],zmm19[7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm22, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm19, %zmm3, %zmm22 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm19, %zmm13, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm10, %zmm3, %zmm27 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm10, %zmm13, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm31, %zmm29 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm31, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm8, %zmm16, %zmm31 |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm31 {%k1} = zmm21[1],zmm17[1],zmm21[3],zmm17[3],zmm21[5],zmm17[5],zmm21[7],zmm17[7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm21, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm21, %zmm19 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm17, %zmm3, %zmm19 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm17, %zmm13, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm8, %zmm3, %zmm29 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm8, %zmm13, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm8, %zmm3, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm21, %zmm22 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm9, %zmm3, %zmm22 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm24, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm23, %zmm3, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermi2q %zmm26, %zmm15, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm8, %zmm13, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm21, %zmm17 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm21, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm9, %zmm13, %zmm17 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm24, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm23, %zmm13, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpermi2q %zmm26, %zmm15, %zmm13 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm26, %zmm16, %zmm15 |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm15 {%k1} = zmm24[1],zmm23[1],zmm24[3],zmm23[3],zmm24[5],zmm23[5],zmm24[7],zmm23[7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm15, %zmm21 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, %zmm15 |
| ; AVX512DQ-SLOW-NEXT: vpermt2q %zmm9, %zmm16, %zmm15 |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm15 {%k1} = zmm10[1],zmm8[1],zmm10[3],zmm8[3],zmm10[5],zmm8[5],zmm10[7],zmm8[7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm15, %zmm23 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm12 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm5 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm6 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm7 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm8 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm10 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm0 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm14 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vbroadcasti64x2 {{.*#+}} ymm16 = [7,15,7,15] |
| ; AVX512DQ-SLOW-NEXT: # ymm16 = mem[0,1,0,1] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm15 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm9 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm0 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm1 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm2 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm11 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm11 # 64-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm12[0,1,2,3],ymm15[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm12, %zmm12 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm5[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm5, %zmm5 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm6[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm7[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm8[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm10[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm31, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: # ymm15 = ymm0[0,1,2,3],mem[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm21, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm14[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm23, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm20, %zmm18 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 192(%rdi), %xmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 128(%rdi), %xmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 64(%rdi), %xmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm16 = xmm0[0],xmm1[0] |
| ; AVX512DQ-SLOW-NEXT: vinserti32x4 $1, %xmm2, %ymm16, %ymm2 |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm18, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, %zmm30 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 704(%rdi), %xmm8 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 640(%rdi), %xmm20 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm20[0],xmm8[0] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 576(%rdi), %xmm18 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 512(%rdi), %xmm16 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm31 = xmm16[0],xmm18[0] |
| ; AVX512DQ-SLOW-NEXT: vinserti32x4 $1, %xmm2, %ymm31, %ymm2 |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm30, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, %zmm25 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 1216(%rdi), %xmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 1152(%rdi), %xmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm31 = xmm1[0],xmm0[0] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1088(%rdi), %xmm23 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 1024(%rdi), %xmm14 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm30 = xmm14[0],xmm23[0] |
| ; AVX512DQ-SLOW-NEXT: vinserti32x4 $1, %xmm31, %ymm30, %ymm30 |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm30, %zmm25, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, %zmm28 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1728(%rdi), %xmm30 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 1664(%rdi), %xmm31 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm31[0],xmm30[0] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 1600(%rdi), %xmm12 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 1536(%rdi), %xmm11 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm11[0],xmm12[0] |
| ; AVX512DQ-SLOW-NEXT: vinserti32x4 $1, %xmm2, %ymm25, %ymm2 |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm28, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm19, %zmm29 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2240(%rdi), %xmm19 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2176(%rdi), %xmm21 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm21[0],xmm19[0] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 2112(%rdi), %xmm15 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 2048(%rdi), %xmm1 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm28 = xmm1[0],xmm15[0] |
| ; AVX512DQ-SLOW-NEXT: vinserti32x4 $1, %xmm25, %ymm28, %ymm25 |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm25, %zmm29, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, %zmm27 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2752(%rdi), %xmm28 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 2688(%rdi), %xmm29 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm29[0],xmm28[0] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 2624(%rdi), %xmm10 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 2560(%rdi), %xmm9 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm9[0],xmm10[0] |
| ; AVX512DQ-SLOW-NEXT: vinserti32x4 $1, %xmm2, %ymm25, %ymm2 |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm27, %zmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, %zmm22 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3264(%rdi), %xmm25 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3200(%rdi), %xmm27 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm27[0],xmm25[0] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 3136(%rdi), %xmm0 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 3072(%rdi), %xmm2 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm26 = xmm2[0],xmm0[0] |
| ; AVX512DQ-SLOW-NEXT: vinserti32x4 $1, %xmm4, %ymm26, %ymm4 |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm4, %zmm22, %zmm22 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm4, %zmm3 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 3776(%rdi), %xmm7 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 3712(%rdi), %xmm26 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm26[0],xmm7[0] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 3648(%rdi), %xmm6 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa 3584(%rdi), %xmm5 |
| ; AVX512DQ-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm24 = xmm5[0],xmm6[0] |
| ; AVX512DQ-SLOW-NEXT: vinserti32x4 $1, %xmm4, %ymm24, %ymm4 |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm4, %zmm3, %zmm24 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm3, %zmm17 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm27[1],xmm25[1] |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm0[1] |
| ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm2, %ymm2 |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm17, %zmm2 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm3, %zmm0 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm3 = xmm20[1],xmm8[1] |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm16[1],xmm18[1] |
| ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm3, %zmm0, %zmm3 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: # xmm4 = xmm4[1],mem[1] |
| ; AVX512DQ-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: # xmm8 = xmm8[1],mem[1] |
| ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm8, %ymm4 |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm4, %zmm0, %zmm4 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm8, %zmm0 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm8 = xmm31[1],xmm30[1] |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm11 = xmm11[1],xmm12[1] |
| ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm8, %ymm11, %ymm8 |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm8, %zmm0, %zmm8 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm11, %zmm0 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload |
| ; AVX512DQ-SLOW-NEXT: # xmm11 = xmm11[1],mem[1] |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm14[1],xmm23[1] |
| ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm11, %ymm12, %ymm11 |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm11, %zmm0, %zmm11 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm12, %zmm0 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm29[1],xmm28[1] |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm9 = xmm9[1],xmm10[1] |
| ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm12, %ymm9, %ymm9 |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm9, %zmm0, %zmm9 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm10, %zmm0 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm10 = xmm21[1],xmm19[1] |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm1[1],xmm15[1] |
| ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm10, %ymm12, %ymm10 |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm10, %zmm0, %zmm10 |
| ; AVX512DQ-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, %zmm13 {%k1} |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm7 = xmm26[1],xmm7[1] |
| ; AVX512DQ-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm6[1] |
| ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm7, %ymm5, %ymm5 |
| ; AVX512DQ-SLOW-NEXT: vinserti64x4 $0, %ymm5, %zmm13, %zmm1 |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm24, 448(%rsi) |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm22, 384(%rsi) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 320(%rsi) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 256(%rsi) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 192(%rsi) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 128(%rsi) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 64(%rsi) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, (%rsi) |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm1, 448(%rdx) |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm10, 256(%rdx) |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm9, 320(%rdx) |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm11, 128(%rdx) |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm8, 192(%rdx) |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm4, (%rdx) |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm3, 64(%rdx) |
| ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm2, 384(%rdx) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 448(%rcx) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 256(%rcx) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 320(%rcx) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 128(%rcx) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 192(%rcx) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, (%rcx) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 64(%rcx) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 384(%rcx) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 448(%r8) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 256(%r8) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 320(%r8) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 128(%r8) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 192(%r8) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, (%r8) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 64(%r8) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 384(%r8) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 448(%r9) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 256(%r9) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 320(%r9) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 128(%r9) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 192(%r9) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, (%r9) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 64(%r9) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 384(%r9) |
| ; AVX512DQ-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512DQ-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512DQ-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512DQ-SLOW-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-SLOW-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512DQ-SLOW-NEXT: addq $6728, %rsp # imm = 0x1A48 |
| ; AVX512DQ-SLOW-NEXT: vzeroupper |
| ; AVX512DQ-SLOW-NEXT: retq |
| ; |
| ; AVX512DQ-FAST-LABEL: load_i64_stride8_vf64: |
| ; AVX512DQ-FAST: # %bb.0: |
| ; AVX512DQ-FAST-NEXT: subq $6728, %rsp # imm = 0x1A48 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3392(%rdi), %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3328(%rdi), %zmm13 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3520(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3456(%rdi), %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1856(%rdi), %zmm11 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1984(%rdi), %zmm15 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 832(%rdi), %zmm6 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 768(%rdi), %zmm7 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 960(%rdi), %zmm24 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 896(%rdi), %zmm10 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 320(%rdi), %zmm12 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 256(%rdi), %zmm5 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 448(%rdi), %zmm8 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 384(%rdi), %zmm14 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: movb $-64, %al |
| ; AVX512DQ-FAST-NEXT: kmovw %eax, %k1 |
| ; AVX512DQ-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [2,10,2,10,2,10,2,10] |
| ; AVX512DQ-FAST-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm13, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3264(%rdi), %ymm21 |
| ; AVX512DQ-FAST-NEXT: vmovdqa 3200(%rdi), %ymm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm21[0],ymm0[2],ymm21[2] |
| ; AVX512DQ-FAST-NEXT: vmovdqa 3136(%rdi), %ymm4 |
| ; AVX512DQ-FAST-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa 3072(%rdi), %ymm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu %ymm3, (%rsp) # 32-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[2] |
| ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm24, %zmm2, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm7, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm6, %zmm2, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa 704(%rdi), %ymm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa 640(%rdi), %ymm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 576(%rdi), %ymm25 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 512(%rdi), %ymm23 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm23[0],ymm25[0],ymm23[2],ymm25[2] |
| ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm14, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm8, %zmm2, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm5, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm12, %zmm2, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa 192(%rdi), %ymm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 128(%rdi), %ymm31 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm31[0],ymm0[0],ymm31[2],ymm0[2] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 64(%rdi), %ymm20 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 (%rdi), %ymm19 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm19[0],ymm20[0],ymm19[2],ymm20[2] |
| ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1920(%rdi), %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm3, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm15, %zmm2, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1792(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm11, %zmm2, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa 1728(%rdi), %ymm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa 1664(%rdi), %ymm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1600(%rdi), %ymm18 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1536(%rdi), %ymm26 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm26[0],ymm18[0],ymm26[2],ymm18[2] |
| ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1472(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1408(%rdi), %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1344(%rdi), %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1280(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1216(%rdi), %ymm28 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1152(%rdi), %ymm29 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm29[0],ymm28[0],ymm29[2],ymm28[2] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1088(%rdi), %ymm30 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1024(%rdi), %ymm27 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm27[0],ymm30[0],ymm27[2],ymm30[2] |
| ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3008(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2944(%rdi), %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2880(%rdi), %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2816(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa 2752(%rdi), %ymm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa 2688(%rdi), %ymm11 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm11[0],ymm0[0],ymm11[2],ymm0[2] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2624(%rdi), %ymm16 |
| ; AVX512DQ-FAST-NEXT: vmovdqa 2560(%rdi), %ymm9 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm9[0],ymm16[0],ymm9[2],ymm16[2] |
| ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2496(%rdi), %zmm10 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2432(%rdi), %zmm7 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm7, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm10, %zmm2, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2368(%rdi), %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2304(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa 2240(%rdi), %ymm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa 2176(%rdi), %ymm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX512DQ-FAST-NEXT: vmovdqa 2112(%rdi), %ymm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa 2048(%rdi), %ymm8 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm8[0],ymm3[0],ymm8[2],ymm3[2] |
| ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 4032(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3968(%rdi), %zmm6 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm6, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3904(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3840(%rdi), %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermi2q %zmm1, %zmm3, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm1, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3776(%rdi), %ymm22 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3712(%rdi), %ymm17 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm17[0],ymm22[0],ymm17[2],ymm22[2] |
| ; AVX512DQ-FAST-NEXT: vmovdqa 3648(%rdi), %ymm12 |
| ; AVX512DQ-FAST-NEXT: vmovdqa 3584(%rdi), %ymm0 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm14 = ymm0[0],ymm12[0],ymm0[2],ymm12[2] |
| ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm14 = ymm14[2,3],ymm15[2,3] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm14, %zmm2, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,3,11,3,11,3,11] |
| ; AVX512DQ-FAST-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm1, %zmm14 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm4, %zmm2, %zmm14 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm13, %zmm15 |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm15 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm14, %zmm15 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm14 = ymm13[1],ymm21[1],ymm13[3],ymm21[3] |
| ; AVX512DQ-FAST-NEXT: vmovdqu (%rsp), %ymm13 # 32-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm14[2,3] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm15, %zmm13 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm24, %zmm2, %zmm13 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm21, %zmm14 |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm23[1],ymm25[1],ymm23[3],ymm25[3] |
| ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm25, %zmm13 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm24, %zmm2, %zmm13 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm23, %zmm2, %zmm5 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm13, %zmm5 {%k1} |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm31, %ymm13 # 32-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: # ymm13 = ymm31[1],mem[1],ymm31[3],mem[3] |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm19[1],ymm20[1],ymm19[3],ymm20[3] |
| ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm5, %zmm13 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm31, %zmm13 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm20, %zmm2, %zmm13 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm19, %zmm2, %zmm14 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: # ymm13 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm26[1],ymm18[1],ymm26[3],ymm18[3] |
| ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm18, %zmm13 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm26, %zmm2, %zmm13 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm13 = ymm29[1],ymm28[1],ymm29[3],ymm28[3] |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm27[1],ymm30[1],ymm27[3],ymm30[3] |
| ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm27, %zmm13 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm28, %zmm2, %zmm13 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm30, %zmm14 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm29, %zmm2, %zmm14 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: # ymm11 = ymm11[1],mem[1],ymm11[3],mem[3] |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm9 = ymm9[1],ymm16[1],ymm9[3],ymm16[3] |
| ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm9 = ymm9[2,3],ymm11[2,3] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm9, %zmm14, %zmm9 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm10, %zmm2, %zmm7 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm16, %zmm10 |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm10 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm7, %zmm10 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm7 # 32-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: # ymm7 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm5 # 32-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: # ymm5 = ymm8[1],mem[1],ymm8[3],mem[3] |
| ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm5[2,3],ymm7[2,3] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm5, %zmm10, %zmm5 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm14, %zmm2, %zmm6 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermi2q %zmm3, %zmm8, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm6, %zmm2 {%k1} |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm3 = ymm17[1],ymm22[1],ymm17[3],ymm22[3] |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm12[1],ymm0[3],ymm12[3] |
| ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm3[2,3] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm2, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12] |
| ; AVX512DQ-FAST-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm1, %zmm13 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm4, %zmm11 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm4, %zmm0, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm2 = zmm22[0],zmm9[0],zmm22[2],zmm9[2],zmm22[4],zmm9[4],zmm22[6],zmm9[6] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3136(%rdi), %zmm4 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3072(%rdi), %zmm15 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm15, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm4, %zmm0, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3264(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3200(%rdi), %zmm4 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12] |
| ; AVX512DQ-FAST-NEXT: # ymm6 = mem[0,1,0,1] |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm3, %zmm2, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 576(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 512(%rdi), %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 704(%rdi), %zmm21 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 640(%rdi), %zmm4 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm21, %zmm6, %zmm4 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm25, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm24, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm12[0],zmm23[0],zmm12[2],zmm23[2],zmm12[4],zmm23[4],zmm12[6],zmm23[6] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 (%rdi), %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 192(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 128(%rdi), %zmm4 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm1[0],zmm19[0],zmm1[2],zmm19[2],zmm1[4],zmm19[4],zmm1[6],zmm19[6] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1600(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1536(%rdi), %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1728(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1664(%rdi), %zmm4 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm18, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm26, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm1[0],zmm10[0],zmm1[2],zmm10[2],zmm1[4],zmm10[4],zmm1[6],zmm10[6] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1088(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1024(%rdi), %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1216(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1152(%rdi), %zmm4 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm27, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm28, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm30[0],zmm29[0],zmm30[2],zmm29[2],zmm30[4],zmm29[4],zmm30[6],zmm29[6] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2624(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2560(%rdi), %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2752(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2688(%rdi), %zmm4 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2112(%rdi), %zmm7 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2048(%rdi), %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2240(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2176(%rdi), %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm26, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm5, %zmm0, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm4 = zmm16[0],zmm30[0],zmm16[2],zmm30[2],zmm16[4],zmm30[4],zmm16[6],zmm30[6] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm14, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3648(%rdi), %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3584(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, (%rsp) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermi2q %zmm3, %zmm1, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3776(%rdi), %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3712(%rdi), %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermi2q %zmm3, %zmm1, %zmm6 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm1 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: # zmm1 = zmm8[0],mem[0],zmm8[2],mem[2],zmm8[4],mem[4],zmm8[6],mem[6] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [5,13,5,13,5,13,5,13] |
| ; AVX512DQ-FAST-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm25, %zmm0, %zmm15 |
| ; AVX512DQ-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [5,13,5,13] |
| ; AVX512DQ-FAST-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm24, %zmm1, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm15[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm11, %zmm0, %zmm13 |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm22[1],zmm9[1],zmm22[3],zmm9[3],zmm22[5],zmm9[5],zmm22[7],zmm9[7] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm13, %zmm4 {%k1} |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm21, %zmm1, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm23, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm18, %zmm0, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm17, %zmm19 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm17[1],zmm19[1],zmm17[3],zmm19[3],zmm17[5],zmm19[5],zmm17[7],zmm19[7] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm14, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm16, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm28, %zmm1, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: # zmm4 = zmm12[1],mem[1],zmm12[3],mem[3],zmm12[5],mem[5],zmm12[7],mem[7] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm13, %zmm1, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm11, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm29, %zmm1, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm10, %zmm27 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm4[1],zmm10[1],zmm4[3],zmm10[3],zmm4[5],zmm10[5],zmm4[7],zmm10[7] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm9, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm10, %zmm1, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm8, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm7, %zmm1, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm26, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm5, %zmm0, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm5[1],zmm30[1],zmm5[3],zmm30[3],zmm5[5],zmm30[5],zmm5[7],zmm30[7] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 (%rsp), %zmm6 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermi2q %zmm31, %zmm6, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermi2q %zmm30, %zmm3, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm1 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: # zmm1 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14] |
| ; AVX512DQ-FAST-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm25, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14] |
| ; AVX512DQ-FAST-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm24, %zmm1, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm22, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm3 {%k1} # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: # zmm3 {%k1} = zmm22[0],mem[0],zmm22[2],mem[2],zmm22[4],mem[4],zmm22[6],mem[6] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm21, %zmm1, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm17, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm19, %zmm0, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm23[0],zmm18[0],zmm23[2],zmm18[2],zmm23[4],zmm18[4],zmm23[6],zmm18[6] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm16, %zmm0, %zmm14 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm28, %zmm1, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm12, %zmm18 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm28, %zmm0, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm20[0],zmm14[0],zmm20[2],zmm14[2],zmm20[4],zmm14[4],zmm20[6],zmm14[6] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm13, %zmm1, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm15, %zmm0, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm26[0],zmm25[0],zmm26[2],zmm25[2],zmm26[4],zmm25[4],zmm26[6],zmm25[6] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm11, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm29, %zmm1, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm27, %zmm0, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm24[0],zmm29[0],zmm24[2],zmm29[2],zmm24[4],zmm29[4],zmm24[6],zmm29[6] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm9, %zmm0, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm10, %zmm1, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm9, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm10, %zmm0, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm22[0],zmm19[0],zmm22[2],zmm19[2],zmm22[4],zmm19[4],zmm22[6],zmm19[6] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm7, %zmm1, %zmm8 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm8, %zmm0, %zmm5 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm5, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vpermi2q %zmm31, %zmm6, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermi2q %zmm30, %zmm3, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm1[0],zmm23[0],zmm1[2],zmm23[2],zmm1[4],zmm23[4],zmm1[6],zmm23[6] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm4, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, %zmm30 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, %zmm5 |
| ; AVX512DQ-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm16 = [7,15,7,15,7,15,7,15] |
| ; AVX512DQ-FAST-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm2, %zmm16, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm0 {%k1} = zmm4[1],zmm1[1],zmm4[3],zmm1[3],zmm4[5],zmm1[5],zmm4[7],zmm1[7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm4, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm4, %zmm6 |
| ; AVX512DQ-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,8,0,8,0,8,0,8] |
| ; AVX512DQ-FAST-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm1, %zmm3, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm13 = [1,9,1,9,1,9,1,9] |
| ; AVX512DQ-FAST-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm1, %zmm13, %zmm6 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm2, %zmm3, %zmm30 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm2, %zmm13, %zmm5 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm18, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm18, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm28, %zmm16, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm20[1],zmm14[1],zmm20[3],zmm14[3],zmm20[5],zmm14[5],zmm20[7],zmm14[7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm20, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm14, %zmm3, %zmm20 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm14, %zmm13, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm28, %zmm3, %zmm18 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm28, %zmm13, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm12, %zmm28 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm12, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm15, %zmm16, %zmm12 |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm26[1],zmm25[1],zmm26[3],zmm25[3],zmm26[5],zmm25[5],zmm26[7],zmm25[7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm26, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm25, %zmm3, %zmm26 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm25, %zmm13, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm15, %zmm3, %zmm28 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm15, %zmm13, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm1, %zmm25 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm1, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm27, %zmm16, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm24, %zmm7 |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm24[1],zmm29[1],zmm24[3],zmm29[3],zmm24[5],zmm29[5],zmm24[7],zmm29[7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm24, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm29, %zmm3, %zmm7 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm29, %zmm13, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm27, %zmm3, %zmm25 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm27, %zmm13, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm9, %zmm27 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm9, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm10, %zmm16, %zmm9 |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm9 {%k1} = zmm22[1],zmm19[1],zmm22[3],zmm19[3],zmm22[5],zmm19[5],zmm22[7],zmm19[7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm22, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm19, %zmm3, %zmm22 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm19, %zmm13, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm10, %zmm3, %zmm27 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm10, %zmm13, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm31, %zmm29 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm31, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm8, %zmm16, %zmm31 |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm31 {%k1} = zmm21[1],zmm17[1],zmm21[3],zmm17[3],zmm21[5],zmm17[5],zmm21[7],zmm17[7] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm21, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm21, %zmm19 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm17, %zmm3, %zmm19 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm17, %zmm13, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm8, %zmm3, %zmm29 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm8, %zmm13, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm8, %zmm3, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm21, %zmm22 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm9, %zmm3, %zmm22 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm24, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm23, %zmm3, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermi2q %zmm26, %zmm15, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm8, %zmm13, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm21, %zmm17 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm21, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm9, %zmm13, %zmm17 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm24, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm23, %zmm13, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpermi2q %zmm26, %zmm15, %zmm13 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm26, %zmm16, %zmm15 |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm15 {%k1} = zmm24[1],zmm23[1],zmm24[3],zmm23[3],zmm24[5],zmm23[5],zmm24[7],zmm23[7] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm15, %zmm21 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, %zmm15 |
| ; AVX512DQ-FAST-NEXT: vpermt2q %zmm9, %zmm16, %zmm15 |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm15 {%k1} = zmm10[1],zmm8[1],zmm10[3],zmm8[3],zmm10[5],zmm8[5],zmm10[7],zmm8[7] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm15, %zmm23 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm12 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm5 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm6 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm7 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm8 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm10 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm0 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm14 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vbroadcasti64x2 {{.*#+}} ymm16 = [7,15,7,15] |
| ; AVX512DQ-FAST-NEXT: # ymm16 = mem[0,1,0,1] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm15 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm9 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm0 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm1 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm2 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm11 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm11 # 64-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm12[0,1,2,3],ymm15[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm12, %zmm12 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm5[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm5, %zmm5 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm6[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm7[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm8[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm10[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm31, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: # ymm15 = ymm0[0,1,2,3],mem[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm21, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm14[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm23, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm20, %zmm18 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa 192(%rdi), %xmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa 128(%rdi), %xmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX512DQ-FAST-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa 64(%rdi), %xmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm16 = xmm0[0],xmm1[0] |
| ; AVX512DQ-FAST-NEXT: vinserti32x4 $1, %xmm2, %ymm16, %ymm2 |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm18, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, %zmm30 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa 704(%rdi), %xmm8 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 640(%rdi), %xmm20 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm20[0],xmm8[0] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 576(%rdi), %xmm18 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 512(%rdi), %xmm16 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm31 = xmm16[0],xmm18[0] |
| ; AVX512DQ-FAST-NEXT: vinserti32x4 $1, %xmm2, %ymm31, %ymm2 |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm30, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, %zmm25 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa 1216(%rdi), %xmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa 1152(%rdi), %xmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm31 = xmm1[0],xmm0[0] |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1088(%rdi), %xmm23 |
| ; AVX512DQ-FAST-NEXT: vmovdqa 1024(%rdi), %xmm14 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm30 = xmm14[0],xmm23[0] |
| ; AVX512DQ-FAST-NEXT: vinserti32x4 $1, %xmm31, %ymm30, %ymm30 |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm30, %zmm25, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, %zmm28 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1728(%rdi), %xmm30 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 1664(%rdi), %xmm31 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm31[0],xmm30[0] |
| ; AVX512DQ-FAST-NEXT: vmovdqa 1600(%rdi), %xmm12 |
| ; AVX512DQ-FAST-NEXT: vmovdqa 1536(%rdi), %xmm11 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm11[0],xmm12[0] |
| ; AVX512DQ-FAST-NEXT: vinserti32x4 $1, %xmm2, %ymm25, %ymm2 |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm28, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm19, %zmm29 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2240(%rdi), %xmm19 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2176(%rdi), %xmm21 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm21[0],xmm19[0] |
| ; AVX512DQ-FAST-NEXT: vmovdqa 2112(%rdi), %xmm15 |
| ; AVX512DQ-FAST-NEXT: vmovdqa 2048(%rdi), %xmm1 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm28 = xmm1[0],xmm15[0] |
| ; AVX512DQ-FAST-NEXT: vinserti32x4 $1, %xmm25, %ymm28, %ymm25 |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm25, %zmm29, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, %zmm27 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2752(%rdi), %xmm28 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 2688(%rdi), %xmm29 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm29[0],xmm28[0] |
| ; AVX512DQ-FAST-NEXT: vmovdqa 2624(%rdi), %xmm10 |
| ; AVX512DQ-FAST-NEXT: vmovdqa 2560(%rdi), %xmm9 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm9[0],xmm10[0] |
| ; AVX512DQ-FAST-NEXT: vinserti32x4 $1, %xmm2, %ymm25, %ymm2 |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm27, %zmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, %zmm22 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3264(%rdi), %xmm25 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3200(%rdi), %xmm27 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm27[0],xmm25[0] |
| ; AVX512DQ-FAST-NEXT: vmovdqa 3136(%rdi), %xmm0 |
| ; AVX512DQ-FAST-NEXT: vmovdqa 3072(%rdi), %xmm2 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm26 = xmm2[0],xmm0[0] |
| ; AVX512DQ-FAST-NEXT: vinserti32x4 $1, %xmm4, %ymm26, %ymm4 |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm4, %zmm22, %zmm22 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm4, %zmm3 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa 3776(%rdi), %xmm7 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 3712(%rdi), %xmm26 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm26[0],xmm7[0] |
| ; AVX512DQ-FAST-NEXT: vmovdqa 3648(%rdi), %xmm6 |
| ; AVX512DQ-FAST-NEXT: vmovdqa 3584(%rdi), %xmm5 |
| ; AVX512DQ-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm24 = xmm5[0],xmm6[0] |
| ; AVX512DQ-FAST-NEXT: vinserti32x4 $1, %xmm4, %ymm24, %ymm4 |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm4, %zmm3, %zmm24 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm3, %zmm17 {%k1} |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm27[1],xmm25[1] |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm0[1] |
| ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm4, %ymm2, %ymm2 |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm17, %zmm2 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm3, %zmm0 {%k1} |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm3 = xmm20[1],xmm8[1] |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm16[1],xmm18[1] |
| ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm3, %zmm0, %zmm3 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: # xmm4 = xmm4[1],mem[1] |
| ; AVX512DQ-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: # xmm8 = xmm8[1],mem[1] |
| ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm4, %ymm8, %ymm4 |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm4, %zmm0, %zmm4 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm8, %zmm0 {%k1} |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm8 = xmm31[1],xmm30[1] |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm11 = xmm11[1],xmm12[1] |
| ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm8, %ymm11, %ymm8 |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm8, %zmm0, %zmm8 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm11, %zmm0 {%k1} |
| ; AVX512DQ-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload |
| ; AVX512DQ-FAST-NEXT: # xmm11 = xmm11[1],mem[1] |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm14[1],xmm23[1] |
| ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm11, %ymm12, %ymm11 |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm11, %zmm0, %zmm11 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm12, %zmm0 {%k1} |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm29[1],xmm28[1] |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm9 = xmm9[1],xmm10[1] |
| ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm12, %ymm9, %ymm9 |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm9, %zmm0, %zmm9 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm10, %zmm0 {%k1} |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm10 = xmm21[1],xmm19[1] |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm1[1],xmm15[1] |
| ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm10, %ymm12, %ymm10 |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm10, %zmm0, %zmm10 |
| ; AVX512DQ-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, %zmm13 {%k1} |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm7 = xmm26[1],xmm7[1] |
| ; AVX512DQ-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm6[1] |
| ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm7, %ymm5, %ymm5 |
| ; AVX512DQ-FAST-NEXT: vinserti64x4 $0, %ymm5, %zmm13, %zmm1 |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm24, 448(%rsi) |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm22, 384(%rsi) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 320(%rsi) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 256(%rsi) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 192(%rsi) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 128(%rsi) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 64(%rsi) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, (%rsi) |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm1, 448(%rdx) |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm10, 256(%rdx) |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm9, 320(%rdx) |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm11, 128(%rdx) |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm8, 192(%rdx) |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm4, (%rdx) |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm3, 64(%rdx) |
| ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm2, 384(%rdx) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 448(%rcx) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 256(%rcx) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 320(%rcx) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 128(%rcx) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 192(%rcx) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, (%rcx) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 64(%rcx) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 384(%rcx) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 448(%r8) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 256(%r8) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 320(%r8) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 128(%r8) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 192(%r8) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, (%r8) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 64(%r8) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 384(%r8) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 448(%r9) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 256(%r9) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 320(%r9) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 128(%r9) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 192(%r9) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, (%r9) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 64(%r9) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 384(%r9) |
| ; AVX512DQ-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512DQ-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512DQ-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512DQ-FAST-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQ-FAST-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512DQ-FAST-NEXT: addq $6728, %rsp # imm = 0x1A48 |
| ; AVX512DQ-FAST-NEXT: vzeroupper |
| ; AVX512DQ-FAST-NEXT: retq |
| ; |
| ; AVX512BW-ONLY-SLOW-LABEL: load_i64_stride8_vf64: |
| ; AVX512BW-ONLY-SLOW: # %bb.0: |
| ; AVX512BW-ONLY-SLOW-NEXT: subq $6728, %rsp # imm = 0x1A48 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3392(%rdi), %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3328(%rdi), %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3520(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3456(%rdi), %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1856(%rdi), %zmm11 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1984(%rdi), %zmm15 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 832(%rdi), %zmm6 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 768(%rdi), %zmm7 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 960(%rdi), %zmm24 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 896(%rdi), %zmm10 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 320(%rdi), %zmm12 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 256(%rdi), %zmm5 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 448(%rdi), %zmm8 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 384(%rdi), %zmm14 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: movb $-64, %al |
| ; AVX512BW-ONLY-SLOW-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [2,10,2,10,2,10,2,10] |
| ; AVX512BW-ONLY-SLOW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm13, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3264(%rdi), %ymm21 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 3200(%rdi), %ymm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm21[0],ymm0[2],ymm21[2] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 3136(%rdi), %ymm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 3072(%rdi), %ymm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu %ymm3, (%rsp) # 32-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[2] |
| ; AVX512BW-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm24, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm7, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm6, %zmm2, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 704(%rdi), %ymm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 640(%rdi), %ymm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 576(%rdi), %ymm25 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 512(%rdi), %ymm23 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm23[0],ymm25[0],ymm23[2],ymm25[2] |
| ; AVX512BW-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm14, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm8, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm5, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm12, %zmm2, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 192(%rdi), %ymm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 128(%rdi), %ymm31 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm31[0],ymm0[0],ymm31[2],ymm0[2] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 64(%rdi), %ymm20 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 (%rdi), %ymm19 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm19[0],ymm20[0],ymm19[2],ymm20[2] |
| ; AVX512BW-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1920(%rdi), %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm15, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1792(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm11, %zmm2, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 1728(%rdi), %ymm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 1664(%rdi), %ymm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1600(%rdi), %ymm18 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1536(%rdi), %ymm26 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm26[0],ymm18[0],ymm26[2],ymm18[2] |
| ; AVX512BW-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1472(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1408(%rdi), %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1344(%rdi), %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1280(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1216(%rdi), %ymm28 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1152(%rdi), %ymm29 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm29[0],ymm28[0],ymm29[2],ymm28[2] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1088(%rdi), %ymm30 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1024(%rdi), %ymm27 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm27[0],ymm30[0],ymm27[2],ymm30[2] |
| ; AVX512BW-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3008(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2944(%rdi), %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2880(%rdi), %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2816(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 2752(%rdi), %ymm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 2688(%rdi), %ymm11 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm11[0],ymm0[0],ymm11[2],ymm0[2] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2624(%rdi), %ymm16 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 2560(%rdi), %ymm9 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm9[0],ymm16[0],ymm9[2],ymm16[2] |
| ; AVX512BW-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2496(%rdi), %zmm10 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2432(%rdi), %zmm7 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm7, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm10, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2368(%rdi), %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2304(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 2240(%rdi), %ymm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 2176(%rdi), %ymm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 2112(%rdi), %ymm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 2048(%rdi), %ymm8 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm8[0],ymm3[0],ymm8[2],ymm3[2] |
| ; AVX512BW-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 4032(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3968(%rdi), %zmm6 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm6, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3904(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3840(%rdi), %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermi2q %zmm1, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm1, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3776(%rdi), %ymm22 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3712(%rdi), %ymm17 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm17[0],ymm22[0],ymm17[2],ymm22[2] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 3648(%rdi), %ymm12 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 3584(%rdi), %ymm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm14 = ymm0[0],ymm12[0],ymm0[2],ymm12[2] |
| ; AVX512BW-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm14 = ymm14[2,3],ymm15[2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm14, %zmm2, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,3,11,3,11,3,11] |
| ; AVX512BW-ONLY-SLOW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm1, %zmm14 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm4, %zmm2, %zmm14 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm13, %zmm15 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm15 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm14, %zmm15 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm14 = ymm13[1],ymm21[1],ymm13[3],ymm21[3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu (%rsp), %ymm13 # 32-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm14[2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm15, %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm24, %zmm2, %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm21, %zmm14 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm23[1],ymm25[1],ymm23[3],ymm25[3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm25, %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm24, %zmm2, %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm23, %zmm2, %zmm5 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm13, %zmm5 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm31, %ymm13 # 32-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: # ymm13 = ymm31[1],mem[1],ymm31[3],mem[3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm19[1],ymm20[1],ymm19[3],ymm20[3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm5, %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm31, %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm20, %zmm2, %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm19, %zmm2, %zmm14 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: # ymm13 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm26[1],ymm18[1],ymm26[3],ymm18[3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm18, %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm26, %zmm2, %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm13 = ymm29[1],ymm28[1],ymm29[3],ymm28[3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm27[1],ymm30[1],ymm27[3],ymm30[3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm27, %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm28, %zmm2, %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm30, %zmm14 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm29, %zmm2, %zmm14 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: # ymm11 = ymm11[1],mem[1],ymm11[3],mem[3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm9 = ymm9[1],ymm16[1],ymm9[3],ymm16[3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm9 = ymm9[2,3],ymm11[2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm9, %zmm14, %zmm9 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm10, %zmm2, %zmm7 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm16, %zmm10 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm10 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm7, %zmm10 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm7 # 32-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: # ymm7 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm5 # 32-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: # ymm5 = ymm8[1],mem[1],ymm8[3],mem[3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm5[2,3],ymm7[2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm5, %zmm10, %zmm5 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm14, %zmm2, %zmm6 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermi2q %zmm3, %zmm8, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm6, %zmm2 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm3 = ymm17[1],ymm22[1],ymm17[3],ymm22[3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm12[1],ymm0[3],ymm12[3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm3[2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12] |
| ; AVX512BW-ONLY-SLOW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm1, %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm4, %zmm11 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm4, %zmm0, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm2 = zmm22[0],zmm9[0],zmm22[2],zmm9[2],zmm22[4],zmm9[4],zmm22[6],zmm9[6] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3136(%rdi), %zmm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3072(%rdi), %zmm15 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm15, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm4, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3264(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3200(%rdi), %zmm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12] |
| ; AVX512BW-ONLY-SLOW-NEXT: # ymm6 = mem[0,1,0,1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm3, %zmm2, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 576(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 512(%rdi), %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 704(%rdi), %zmm21 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 640(%rdi), %zmm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm21, %zmm6, %zmm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm25, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm24, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm12[0],zmm23[0],zmm12[2],zmm23[2],zmm12[4],zmm23[4],zmm12[6],zmm23[6] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 (%rdi), %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 192(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 128(%rdi), %zmm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm1[0],zmm19[0],zmm1[2],zmm19[2],zmm1[4],zmm19[4],zmm1[6],zmm19[6] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1600(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1536(%rdi), %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1728(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1664(%rdi), %zmm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm18, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm26, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm1[0],zmm10[0],zmm1[2],zmm10[2],zmm1[4],zmm10[4],zmm1[6],zmm10[6] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1088(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1024(%rdi), %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1216(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1152(%rdi), %zmm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm27, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm28, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm30[0],zmm29[0],zmm30[2],zmm29[2],zmm30[4],zmm29[4],zmm30[6],zmm29[6] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2624(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2560(%rdi), %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2752(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2688(%rdi), %zmm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2112(%rdi), %zmm7 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2048(%rdi), %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2240(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2176(%rdi), %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm26, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm5, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm4 = zmm16[0],zmm30[0],zmm16[2],zmm30[2],zmm16[4],zmm30[4],zmm16[6],zmm30[6] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm14, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3648(%rdi), %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3584(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, (%rsp) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermi2q %zmm3, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3776(%rdi), %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3712(%rdi), %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermi2q %zmm3, %zmm1, %zmm6 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm1 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: # zmm1 = zmm8[0],mem[0],zmm8[2],mem[2],zmm8[4],mem[4],zmm8[6],mem[6] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [5,13,5,13,5,13,5,13] |
| ; AVX512BW-ONLY-SLOW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm25, %zmm0, %zmm15 |
| ; AVX512BW-ONLY-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [5,13,5,13] |
| ; AVX512BW-ONLY-SLOW-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm24, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm15[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm11, %zmm0, %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm22[1],zmm9[1],zmm22[3],zmm9[3],zmm22[5],zmm9[5],zmm22[7],zmm9[7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm13, %zmm4 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm21, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm23, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm18, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm17, %zmm19 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm17[1],zmm19[1],zmm17[3],zmm19[3],zmm17[5],zmm19[5],zmm17[7],zmm19[7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm14, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm16, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm28, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm4 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: # zmm4 = zmm12[1],mem[1],zmm12[3],mem[3],zmm12[5],mem[5],zmm12[7],mem[7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm13, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm11, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm29, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm10, %zmm27 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm4[1],zmm10[1],zmm4[3],zmm10[3],zmm4[5],zmm10[5],zmm4[7],zmm10[7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm9, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm10, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm8, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm7, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm26, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm5, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm5[1],zmm30[1],zmm5[3],zmm30[3],zmm5[5],zmm30[5],zmm5[7],zmm30[7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 (%rsp), %zmm6 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermi2q %zmm31, %zmm6, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermi2q %zmm30, %zmm3, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm1 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: # zmm1 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14] |
| ; AVX512BW-ONLY-SLOW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm25, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14] |
| ; AVX512BW-ONLY-SLOW-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm24, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm22, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm3 {%k1} # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: # zmm3 {%k1} = zmm22[0],mem[0],zmm22[2],mem[2],zmm22[4],mem[4],zmm22[6],mem[6] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm21, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm17, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm19, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm23[0],zmm18[0],zmm23[2],zmm18[2],zmm23[4],zmm18[4],zmm23[6],zmm18[6] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm16, %zmm0, %zmm14 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm28, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm12, %zmm18 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm28, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm20[0],zmm14[0],zmm20[2],zmm14[2],zmm20[4],zmm14[4],zmm20[6],zmm14[6] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm13, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm15, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm26[0],zmm25[0],zmm26[2],zmm25[2],zmm26[4],zmm25[4],zmm26[6],zmm25[6] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm11, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm29, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm27, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm24[0],zmm29[0],zmm24[2],zmm29[2],zmm24[4],zmm29[4],zmm24[6],zmm29[6] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm9, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm10, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm9, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm10, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm22[0],zmm19[0],zmm22[2],zmm19[2],zmm22[4],zmm19[4],zmm22[6],zmm19[6] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm7, %zmm1, %zmm8 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm8, %zmm0, %zmm5 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm5, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermi2q %zmm31, %zmm6, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermi2q %zmm30, %zmm3, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm1[0],zmm23[0],zmm1[2],zmm23[2],zmm1[4],zmm23[4],zmm1[6],zmm23[6] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm4, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm30 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm5 |
| ; AVX512BW-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm16 = [7,15,7,15,7,15,7,15] |
| ; AVX512BW-ONLY-SLOW-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm2, %zmm16, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm0 {%k1} = zmm4[1],zmm1[1],zmm4[3],zmm1[3],zmm4[5],zmm1[5],zmm4[7],zmm1[7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm4, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm4, %zmm6 |
| ; AVX512BW-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,8,0,8,0,8,0,8] |
| ; AVX512BW-ONLY-SLOW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm3, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm13 = [1,9,1,9,1,9,1,9] |
| ; AVX512BW-ONLY-SLOW-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm1, %zmm13, %zmm6 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm2, %zmm3, %zmm30 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm2, %zmm13, %zmm5 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm18, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm18, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm28, %zmm16, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm20[1],zmm14[1],zmm20[3],zmm14[3],zmm20[5],zmm14[5],zmm20[7],zmm14[7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm20, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm14, %zmm3, %zmm20 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm14, %zmm13, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm28, %zmm3, %zmm18 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm28, %zmm13, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm12, %zmm28 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm12, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm15, %zmm16, %zmm12 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm26[1],zmm25[1],zmm26[3],zmm25[3],zmm26[5],zmm25[5],zmm26[7],zmm25[7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm26, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm25, %zmm3, %zmm26 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm25, %zmm13, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm15, %zmm3, %zmm28 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm15, %zmm13, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm1, %zmm25 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm1, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm27, %zmm16, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm24, %zmm7 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm24[1],zmm29[1],zmm24[3],zmm29[3],zmm24[5],zmm29[5],zmm24[7],zmm29[7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm24, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm29, %zmm3, %zmm7 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm29, %zmm13, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm27, %zmm3, %zmm25 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm27, %zmm13, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm9, %zmm27 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm9, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm10, %zmm16, %zmm9 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm9 {%k1} = zmm22[1],zmm19[1],zmm22[3],zmm19[3],zmm22[5],zmm19[5],zmm22[7],zmm19[7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm22, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm19, %zmm3, %zmm22 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm19, %zmm13, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm10, %zmm3, %zmm27 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm10, %zmm13, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm31, %zmm29 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm31, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm8, %zmm16, %zmm31 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm31 {%k1} = zmm21[1],zmm17[1],zmm21[3],zmm17[3],zmm21[5],zmm17[5],zmm21[7],zmm17[7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm21, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm21, %zmm19 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm17, %zmm3, %zmm19 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm17, %zmm13, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm8, %zmm3, %zmm29 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm8, %zmm13, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm8, %zmm3, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm21, %zmm22 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm9, %zmm3, %zmm22 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm24, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm23, %zmm3, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermi2q %zmm26, %zmm15, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm8, %zmm13, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm21, %zmm17 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm21, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm9, %zmm13, %zmm17 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm24, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm23, %zmm13, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermi2q %zmm26, %zmm15, %zmm13 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm26, %zmm16, %zmm15 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm15 {%k1} = zmm24[1],zmm23[1],zmm24[3],zmm23[3],zmm24[5],zmm23[5],zmm24[7],zmm23[7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm15, %zmm21 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm15 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q %zmm9, %zmm16, %zmm15 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm15 {%k1} = zmm10[1],zmm8[1],zmm10[3],zmm8[3],zmm10[5],zmm8[5],zmm10[7],zmm8[7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm15, %zmm23 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm12 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm5 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm6 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm7 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm8 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm10 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm0 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm14 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} ymm16 = [7,15,7,15] |
| ; AVX512BW-ONLY-SLOW-NEXT: # ymm16 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm15 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm9 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm0 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm1 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm2 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm4 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm11 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm11 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm12[0,1,2,3],ymm15[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm12, %zmm12 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm5[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm5, %zmm5 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm6[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm7[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm8[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm10[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm31, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: # ymm15 = ymm0[0,1,2,3],mem[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm21, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm14[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm23, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm20, %zmm18 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 192(%rdi), %xmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 128(%rdi), %xmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 64(%rdi), %xmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm16 = xmm0[0],xmm1[0] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti32x4 $1, %xmm2, %ymm16, %ymm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm18, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm30 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 704(%rdi), %xmm8 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 640(%rdi), %xmm20 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm20[0],xmm8[0] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 576(%rdi), %xmm18 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 512(%rdi), %xmm16 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm31 = xmm16[0],xmm18[0] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti32x4 $1, %xmm2, %ymm31, %ymm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm30, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm25 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 1216(%rdi), %xmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 1152(%rdi), %xmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm31 = xmm1[0],xmm0[0] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1088(%rdi), %xmm23 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 1024(%rdi), %xmm14 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm30 = xmm14[0],xmm23[0] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti32x4 $1, %xmm31, %ymm30, %ymm30 |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm30, %zmm25, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm28 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1728(%rdi), %xmm30 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 1664(%rdi), %xmm31 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm31[0],xmm30[0] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 1600(%rdi), %xmm12 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 1536(%rdi), %xmm11 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm11[0],xmm12[0] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti32x4 $1, %xmm2, %ymm25, %ymm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm28, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm19, %zmm29 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2240(%rdi), %xmm19 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2176(%rdi), %xmm21 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm21[0],xmm19[0] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 2112(%rdi), %xmm15 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 2048(%rdi), %xmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm28 = xmm1[0],xmm15[0] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti32x4 $1, %xmm25, %ymm28, %ymm25 |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm25, %zmm29, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm27 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2752(%rdi), %xmm28 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 2688(%rdi), %xmm29 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm29[0],xmm28[0] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 2624(%rdi), %xmm10 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 2560(%rdi), %xmm9 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm9[0],xmm10[0] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti32x4 $1, %xmm2, %ymm25, %ymm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm27, %zmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm22 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3264(%rdi), %xmm25 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3200(%rdi), %xmm27 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm27[0],xmm25[0] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 3136(%rdi), %xmm0 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 3072(%rdi), %xmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm26 = xmm2[0],xmm0[0] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti32x4 $1, %xmm4, %ymm26, %ymm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm4, %zmm22, %zmm22 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm4, %zmm3 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 3776(%rdi), %xmm7 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 3712(%rdi), %xmm26 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm26[0],xmm7[0] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 3648(%rdi), %xmm6 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 3584(%rdi), %xmm5 |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm24 = xmm5[0],xmm6[0] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti32x4 $1, %xmm4, %ymm24, %ymm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm4, %zmm3, %zmm24 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm17 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm27[1],xmm25[1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm0[1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm2, %ymm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm17, %zmm2 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, %zmm0 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm3 = xmm20[1],xmm8[1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm16[1],xmm18[1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm3, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: # xmm4 = xmm4[1],mem[1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: # xmm8 = xmm8[1],mem[1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm8, %ymm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm4, %zmm0, %zmm4 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm8, %zmm0 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm8 = xmm31[1],xmm30[1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm11 = xmm11[1],xmm12[1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti128 $1, %xmm8, %ymm11, %ymm8 |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm8, %zmm0, %zmm8 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm11, %zmm0 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: # xmm11 = xmm11[1],mem[1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm14[1],xmm23[1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti128 $1, %xmm11, %ymm12, %ymm11 |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm11, %zmm0, %zmm11 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm12, %zmm0 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm29[1],xmm28[1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm9 = xmm9[1],xmm10[1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti128 $1, %xmm12, %ymm9, %ymm9 |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm9, %zmm0, %zmm9 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm10, %zmm0 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm10 = xmm21[1],xmm19[1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm1[1],xmm15[1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti128 $1, %xmm10, %ymm12, %ymm10 |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm10, %zmm0, %zmm10 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, %zmm13 {%k1} |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm7 = xmm26[1],xmm7[1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm6[1] |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti128 $1, %xmm7, %ymm5, %ymm5 |
| ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $0, %ymm5, %zmm13, %zmm1 |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm24, 448(%rsi) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm22, 384(%rsi) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 320(%rsi) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 256(%rsi) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 192(%rsi) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 128(%rsi) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 64(%rsi) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, (%rsi) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm1, 448(%rdx) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm10, 256(%rdx) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm9, 320(%rdx) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm11, 128(%rdx) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm8, 192(%rdx) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm4, (%rdx) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, 64(%rdx) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, 384(%rdx) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 448(%rcx) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 256(%rcx) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 320(%rcx) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 128(%rcx) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 192(%rcx) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, (%rcx) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 64(%rcx) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 384(%rcx) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 448(%r8) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 256(%r8) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 320(%r8) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 128(%r8) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 192(%r8) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, (%r8) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 64(%r8) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 384(%r8) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 448(%r9) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 256(%r9) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 320(%r9) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 128(%r9) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 192(%r9) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, (%r9) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 64(%r9) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 384(%r9) |
| ; AVX512BW-ONLY-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-SLOW-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512BW-ONLY-SLOW-NEXT: addq $6728, %rsp # imm = 0x1A48 |
| ; AVX512BW-ONLY-SLOW-NEXT: vzeroupper |
| ; AVX512BW-ONLY-SLOW-NEXT: retq |
| ; |
| ; AVX512BW-ONLY-FAST-LABEL: load_i64_stride8_vf64: |
| ; AVX512BW-ONLY-FAST: # %bb.0: |
| ; AVX512BW-ONLY-FAST-NEXT: subq $6728, %rsp # imm = 0x1A48 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3392(%rdi), %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3328(%rdi), %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3520(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3456(%rdi), %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1856(%rdi), %zmm11 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1984(%rdi), %zmm15 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 832(%rdi), %zmm6 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 768(%rdi), %zmm7 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 960(%rdi), %zmm24 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 896(%rdi), %zmm10 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 320(%rdi), %zmm12 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 256(%rdi), %zmm5 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 448(%rdi), %zmm8 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 384(%rdi), %zmm14 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: movb $-64, %al |
| ; AVX512BW-ONLY-FAST-NEXT: kmovd %eax, %k1 |
| ; AVX512BW-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [2,10,2,10,2,10,2,10] |
| ; AVX512BW-ONLY-FAST-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm13, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3264(%rdi), %ymm21 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 3200(%rdi), %ymm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm21[0],ymm0[2],ymm21[2] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 3136(%rdi), %ymm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 3072(%rdi), %ymm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu %ymm3, (%rsp) # 32-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[2] |
| ; AVX512BW-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm24, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm7, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm6, %zmm2, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 704(%rdi), %ymm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 640(%rdi), %ymm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 576(%rdi), %ymm25 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 512(%rdi), %ymm23 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm23[0],ymm25[0],ymm23[2],ymm25[2] |
| ; AVX512BW-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm14, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm8, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm5, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm12, %zmm2, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 192(%rdi), %ymm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 128(%rdi), %ymm31 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm31[0],ymm0[0],ymm31[2],ymm0[2] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 64(%rdi), %ymm20 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 (%rdi), %ymm19 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm19[0],ymm20[0],ymm19[2],ymm20[2] |
| ; AVX512BW-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1920(%rdi), %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm15, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1792(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm11, %zmm2, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 1728(%rdi), %ymm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 1664(%rdi), %ymm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1600(%rdi), %ymm18 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1536(%rdi), %ymm26 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm26[0],ymm18[0],ymm26[2],ymm18[2] |
| ; AVX512BW-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1472(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1408(%rdi), %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1344(%rdi), %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1280(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1216(%rdi), %ymm28 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1152(%rdi), %ymm29 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm29[0],ymm28[0],ymm29[2],ymm28[2] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1088(%rdi), %ymm30 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1024(%rdi), %ymm27 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm27[0],ymm30[0],ymm27[2],ymm30[2] |
| ; AVX512BW-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3008(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2944(%rdi), %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2880(%rdi), %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2816(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 2752(%rdi), %ymm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 2688(%rdi), %ymm11 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm11[0],ymm0[0],ymm11[2],ymm0[2] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2624(%rdi), %ymm16 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 2560(%rdi), %ymm9 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm9[0],ymm16[0],ymm9[2],ymm16[2] |
| ; AVX512BW-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2496(%rdi), %zmm10 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2432(%rdi), %zmm7 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm7, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm10, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2368(%rdi), %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2304(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 2240(%rdi), %ymm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 2176(%rdi), %ymm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 2112(%rdi), %ymm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 2048(%rdi), %ymm8 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm8[0],ymm3[0],ymm8[2],ymm3[2] |
| ; AVX512BW-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 4032(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3968(%rdi), %zmm6 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm6, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3904(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3840(%rdi), %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermi2q %zmm1, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm1, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3776(%rdi), %ymm22 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3712(%rdi), %ymm17 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm17[0],ymm22[0],ymm17[2],ymm22[2] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 3648(%rdi), %ymm12 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 3584(%rdi), %ymm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm14 = ymm0[0],ymm12[0],ymm0[2],ymm12[2] |
| ; AVX512BW-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm14 = ymm14[2,3],ymm15[2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm14, %zmm2, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,3,11,3,11,3,11] |
| ; AVX512BW-ONLY-FAST-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm1, %zmm14 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm4, %zmm2, %zmm14 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm13, %zmm15 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm15 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm14, %zmm15 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm14 = ymm13[1],ymm21[1],ymm13[3],ymm21[3] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu (%rsp), %ymm13 # 32-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX512BW-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm14[2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm15, %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm24, %zmm2, %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm21, %zmm14 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm23[1],ymm25[1],ymm23[3],ymm25[3] |
| ; AVX512BW-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm25, %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm24, %zmm2, %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm23, %zmm2, %zmm5 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm13, %zmm5 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm31, %ymm13 # 32-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: # ymm13 = ymm31[1],mem[1],ymm31[3],mem[3] |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm19[1],ymm20[1],ymm19[3],ymm20[3] |
| ; AVX512BW-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm5, %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm31, %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm20, %zmm2, %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm19, %zmm2, %zmm14 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: # ymm13 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm26[1],ymm18[1],ymm26[3],ymm18[3] |
| ; AVX512BW-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm18, %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm26, %zmm2, %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm13 = ymm29[1],ymm28[1],ymm29[3],ymm28[3] |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm27[1],ymm30[1],ymm27[3],ymm30[3] |
| ; AVX512BW-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm27, %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm28, %zmm2, %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm30, %zmm14 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm29, %zmm2, %zmm14 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: # ymm11 = ymm11[1],mem[1],ymm11[3],mem[3] |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm9 = ymm9[1],ymm16[1],ymm9[3],ymm16[3] |
| ; AVX512BW-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm9 = ymm9[2,3],ymm11[2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm9, %zmm14, %zmm9 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm10, %zmm2, %zmm7 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm16, %zmm10 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm10 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm7, %zmm10 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm7 # 32-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: # ymm7 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm5 # 32-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: # ymm5 = ymm8[1],mem[1],ymm8[3],mem[3] |
| ; AVX512BW-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm5[2,3],ymm7[2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm5, %zmm10, %zmm5 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm14, %zmm2, %zmm6 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermi2q %zmm3, %zmm8, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm6, %zmm2 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm3 = ymm17[1],ymm22[1],ymm17[3],ymm22[3] |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm12[1],ymm0[3],ymm12[3] |
| ; AVX512BW-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm3[2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm2, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12] |
| ; AVX512BW-ONLY-FAST-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm1, %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm4, %zmm11 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm4, %zmm0, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm2 = zmm22[0],zmm9[0],zmm22[2],zmm9[2],zmm22[4],zmm9[4],zmm22[6],zmm9[6] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3136(%rdi), %zmm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3072(%rdi), %zmm15 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm15, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm4, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3264(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3200(%rdi), %zmm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12] |
| ; AVX512BW-ONLY-FAST-NEXT: # ymm6 = mem[0,1,0,1] |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm3, %zmm2, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 576(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 512(%rdi), %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 704(%rdi), %zmm21 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 640(%rdi), %zmm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm21, %zmm6, %zmm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm25, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm24, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm12[0],zmm23[0],zmm12[2],zmm23[2],zmm12[4],zmm23[4],zmm12[6],zmm23[6] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 (%rdi), %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 192(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 128(%rdi), %zmm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm1[0],zmm19[0],zmm1[2],zmm19[2],zmm1[4],zmm19[4],zmm1[6],zmm19[6] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1600(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1536(%rdi), %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1728(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1664(%rdi), %zmm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm18, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm26, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm1[0],zmm10[0],zmm1[2],zmm10[2],zmm1[4],zmm10[4],zmm1[6],zmm10[6] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1088(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1024(%rdi), %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1216(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1152(%rdi), %zmm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm27, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm28, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm30[0],zmm29[0],zmm30[2],zmm29[2],zmm30[4],zmm29[4],zmm30[6],zmm29[6] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2624(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2560(%rdi), %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2752(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2688(%rdi), %zmm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2112(%rdi), %zmm7 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2048(%rdi), %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2240(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2176(%rdi), %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm26, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm5, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm4 = zmm16[0],zmm30[0],zmm16[2],zmm30[2],zmm16[4],zmm30[4],zmm16[6],zmm30[6] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm14, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3648(%rdi), %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3584(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, (%rsp) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermi2q %zmm3, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3776(%rdi), %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3712(%rdi), %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermi2q %zmm3, %zmm1, %zmm6 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm1 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: # zmm1 = zmm8[0],mem[0],zmm8[2],mem[2],zmm8[4],mem[4],zmm8[6],mem[6] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [5,13,5,13,5,13,5,13] |
| ; AVX512BW-ONLY-FAST-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm25, %zmm0, %zmm15 |
| ; AVX512BW-ONLY-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [5,13,5,13] |
| ; AVX512BW-ONLY-FAST-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm24, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm15[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm11, %zmm0, %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm22[1],zmm9[1],zmm22[3],zmm9[3],zmm22[5],zmm9[5],zmm22[7],zmm9[7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm13, %zmm4 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm21, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm23, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm18, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm17, %zmm19 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm17[1],zmm19[1],zmm17[3],zmm19[3],zmm17[5],zmm19[5],zmm17[7],zmm19[7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm14, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm16, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm28, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm4 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: # zmm4 = zmm12[1],mem[1],zmm12[3],mem[3],zmm12[5],mem[5],zmm12[7],mem[7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm13, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm11, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm29, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm10, %zmm27 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm4[1],zmm10[1],zmm4[3],zmm10[3],zmm4[5],zmm10[5],zmm4[7],zmm10[7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm9, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm10, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm8, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm7, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm26, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm5, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm5[1],zmm30[1],zmm5[3],zmm30[3],zmm5[5],zmm30[5],zmm5[7],zmm30[7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 (%rsp), %zmm6 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermi2q %zmm31, %zmm6, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermi2q %zmm30, %zmm3, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm1 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: # zmm1 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14] |
| ; AVX512BW-ONLY-FAST-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm25, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14] |
| ; AVX512BW-ONLY-FAST-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm24, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm22, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm3 {%k1} # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: # zmm3 {%k1} = zmm22[0],mem[0],zmm22[2],mem[2],zmm22[4],mem[4],zmm22[6],mem[6] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm21, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm17, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm19, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm23[0],zmm18[0],zmm23[2],zmm18[2],zmm23[4],zmm18[4],zmm23[6],zmm18[6] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm16, %zmm0, %zmm14 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm28, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm12, %zmm18 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm28, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm20[0],zmm14[0],zmm20[2],zmm14[2],zmm20[4],zmm14[4],zmm20[6],zmm14[6] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm13, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm15, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm26[0],zmm25[0],zmm26[2],zmm25[2],zmm26[4],zmm25[4],zmm26[6],zmm25[6] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm11, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm29, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm27, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm24[0],zmm29[0],zmm24[2],zmm29[2],zmm24[4],zmm29[4],zmm24[6],zmm29[6] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm9, %zmm0, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm10, %zmm1, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm9, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm10, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm22[0],zmm19[0],zmm22[2],zmm19[2],zmm22[4],zmm19[4],zmm22[6],zmm19[6] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm7, %zmm1, %zmm8 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm8, %zmm0, %zmm5 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm5, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermi2q %zmm31, %zmm6, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermi2q %zmm30, %zmm3, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm1[0],zmm23[0],zmm1[2],zmm23[2],zmm1[4],zmm23[4],zmm1[6],zmm23[6] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm4, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm30 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm5 |
| ; AVX512BW-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm16 = [7,15,7,15,7,15,7,15] |
| ; AVX512BW-ONLY-FAST-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm2, %zmm16, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm0 {%k1} = zmm4[1],zmm1[1],zmm4[3],zmm1[3],zmm4[5],zmm1[5],zmm4[7],zmm1[7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm4, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm4, %zmm6 |
| ; AVX512BW-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,8,0,8,0,8,0,8] |
| ; AVX512BW-ONLY-FAST-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm3, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm13 = [1,9,1,9,1,9,1,9] |
| ; AVX512BW-ONLY-FAST-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm1, %zmm13, %zmm6 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm2, %zmm3, %zmm30 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm2, %zmm13, %zmm5 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm18, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm18, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm28, %zmm16, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm20[1],zmm14[1],zmm20[3],zmm14[3],zmm20[5],zmm14[5],zmm20[7],zmm14[7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm20, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm14, %zmm3, %zmm20 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm14, %zmm13, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm28, %zmm3, %zmm18 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm28, %zmm13, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm12, %zmm28 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm12, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm15, %zmm16, %zmm12 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm26[1],zmm25[1],zmm26[3],zmm25[3],zmm26[5],zmm25[5],zmm26[7],zmm25[7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm26, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm25, %zmm3, %zmm26 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm25, %zmm13, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm15, %zmm3, %zmm28 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm15, %zmm13, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm1, %zmm25 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm1, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm27, %zmm16, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm24, %zmm7 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm24[1],zmm29[1],zmm24[3],zmm29[3],zmm24[5],zmm29[5],zmm24[7],zmm29[7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm24, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm29, %zmm3, %zmm7 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm29, %zmm13, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm27, %zmm3, %zmm25 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm27, %zmm13, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm9, %zmm27 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm9, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm10, %zmm16, %zmm9 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm9 {%k1} = zmm22[1],zmm19[1],zmm22[3],zmm19[3],zmm22[5],zmm19[5],zmm22[7],zmm19[7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm22, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm19, %zmm3, %zmm22 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm19, %zmm13, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm10, %zmm3, %zmm27 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm10, %zmm13, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm31, %zmm29 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm31, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm8, %zmm16, %zmm31 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm31 {%k1} = zmm21[1],zmm17[1],zmm21[3],zmm17[3],zmm21[5],zmm17[5],zmm21[7],zmm17[7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm21, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm21, %zmm19 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm17, %zmm3, %zmm19 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm17, %zmm13, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm8, %zmm3, %zmm29 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm8, %zmm13, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm8, %zmm3, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm21, %zmm22 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm9, %zmm3, %zmm22 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm24, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm23, %zmm3, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermi2q %zmm26, %zmm15, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm8, %zmm13, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm21, %zmm17 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm21, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm9, %zmm13, %zmm17 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm24, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm23, %zmm13, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpermi2q %zmm26, %zmm15, %zmm13 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm26, %zmm16, %zmm15 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm15 {%k1} = zmm24[1],zmm23[1],zmm24[3],zmm23[3],zmm24[5],zmm23[5],zmm24[7],zmm23[7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm15, %zmm21 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm15 |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q %zmm9, %zmm16, %zmm15 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm15 {%k1} = zmm10[1],zmm8[1],zmm10[3],zmm8[3],zmm10[5],zmm8[5],zmm10[7],zmm8[7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm15, %zmm23 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm12 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm5 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm6 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm7 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm8 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm10 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm0 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm14 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} ymm16 = [7,15,7,15] |
| ; AVX512BW-ONLY-FAST-NEXT: # ymm16 = mem[0,1,2,3,0,1,2,3] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm15 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm9 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm0 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm1 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm2 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm4 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm11 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm11 # 64-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm12[0,1,2,3],ymm15[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm12, %zmm12 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm5[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm5, %zmm5 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm6[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm7[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm8[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm10[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm31, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: # ymm15 = ymm0[0,1,2,3],mem[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm21, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm14[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm23, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm20, %zmm18 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 192(%rdi), %xmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 128(%rdi), %xmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 64(%rdi), %xmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm16 = xmm0[0],xmm1[0] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti32x4 $1, %xmm2, %ymm16, %ymm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm18, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm30 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 704(%rdi), %xmm8 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 640(%rdi), %xmm20 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm20[0],xmm8[0] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 576(%rdi), %xmm18 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 512(%rdi), %xmm16 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm31 = xmm16[0],xmm18[0] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti32x4 $1, %xmm2, %ymm31, %ymm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm30, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm25 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 1216(%rdi), %xmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 1152(%rdi), %xmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm31 = xmm1[0],xmm0[0] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1088(%rdi), %xmm23 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 1024(%rdi), %xmm14 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm30 = xmm14[0],xmm23[0] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti32x4 $1, %xmm31, %ymm30, %ymm30 |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm30, %zmm25, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm28 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1728(%rdi), %xmm30 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 1664(%rdi), %xmm31 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm31[0],xmm30[0] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 1600(%rdi), %xmm12 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 1536(%rdi), %xmm11 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm11[0],xmm12[0] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti32x4 $1, %xmm2, %ymm25, %ymm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm28, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm19, %zmm29 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2240(%rdi), %xmm19 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2176(%rdi), %xmm21 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm21[0],xmm19[0] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 2112(%rdi), %xmm15 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 2048(%rdi), %xmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm28 = xmm1[0],xmm15[0] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti32x4 $1, %xmm25, %ymm28, %ymm25 |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm25, %zmm29, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm27 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2752(%rdi), %xmm28 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 2688(%rdi), %xmm29 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm29[0],xmm28[0] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 2624(%rdi), %xmm10 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 2560(%rdi), %xmm9 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm9[0],xmm10[0] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti32x4 $1, %xmm2, %ymm25, %ymm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm27, %zmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm22 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3264(%rdi), %xmm25 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3200(%rdi), %xmm27 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm27[0],xmm25[0] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 3136(%rdi), %xmm0 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 3072(%rdi), %xmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm26 = xmm2[0],xmm0[0] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti32x4 $1, %xmm4, %ymm26, %ymm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm4, %zmm22, %zmm22 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm4, %zmm3 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 3776(%rdi), %xmm7 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 3712(%rdi), %xmm26 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm26[0],xmm7[0] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 3648(%rdi), %xmm6 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 3584(%rdi), %xmm5 |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm24 = xmm5[0],xmm6[0] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti32x4 $1, %xmm4, %ymm24, %ymm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm4, %zmm3, %zmm24 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm17 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm27[1],xmm25[1] |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm0[1] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti128 $1, %xmm4, %ymm2, %ymm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm17, %zmm2 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm0 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm3 = xmm20[1],xmm8[1] |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm16[1],xmm18[1] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm3, %zmm0, %zmm3 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: # xmm4 = xmm4[1],mem[1] |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: # xmm8 = xmm8[1],mem[1] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti128 $1, %xmm4, %ymm8, %ymm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm4, %zmm0, %zmm4 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm8, %zmm0 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm8 = xmm31[1],xmm30[1] |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm11 = xmm11[1],xmm12[1] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti128 $1, %xmm8, %ymm11, %ymm8 |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm8, %zmm0, %zmm8 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm11, %zmm0 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload |
| ; AVX512BW-ONLY-FAST-NEXT: # xmm11 = xmm11[1],mem[1] |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm14[1],xmm23[1] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti128 $1, %xmm11, %ymm12, %ymm11 |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm11, %zmm0, %zmm11 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm12, %zmm0 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm29[1],xmm28[1] |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm9 = xmm9[1],xmm10[1] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti128 $1, %xmm12, %ymm9, %ymm9 |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm9, %zmm0, %zmm9 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm10, %zmm0 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm10 = xmm21[1],xmm19[1] |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm1[1],xmm15[1] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti128 $1, %xmm10, %ymm12, %ymm10 |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm10, %zmm0, %zmm10 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, %zmm13 {%k1} |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm7 = xmm26[1],xmm7[1] |
| ; AVX512BW-ONLY-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm6[1] |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti128 $1, %xmm7, %ymm5, %ymm5 |
| ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $0, %ymm5, %zmm13, %zmm1 |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm24, 448(%rsi) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm22, 384(%rsi) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 320(%rsi) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 256(%rsi) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 192(%rsi) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 128(%rsi) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 64(%rsi) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, (%rsi) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm1, 448(%rdx) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm10, 256(%rdx) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm9, 320(%rdx) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm11, 128(%rdx) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm8, 192(%rdx) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm4, (%rdx) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm3, 64(%rdx) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm2, 384(%rdx) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 448(%rcx) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 256(%rcx) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 320(%rcx) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 128(%rcx) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 192(%rcx) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, (%rcx) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 64(%rcx) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 384(%rcx) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 448(%r8) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 256(%r8) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 320(%r8) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 128(%r8) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 192(%r8) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, (%r8) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 64(%r8) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 384(%r8) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 448(%r9) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 256(%r9) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 320(%r9) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 128(%r9) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 192(%r9) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, (%r9) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 64(%r9) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 384(%r9) |
| ; AVX512BW-ONLY-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512BW-ONLY-FAST-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512BW-ONLY-FAST-NEXT: addq $6728, %rsp # imm = 0x1A48 |
| ; AVX512BW-ONLY-FAST-NEXT: vzeroupper |
| ; AVX512BW-ONLY-FAST-NEXT: retq |
| ; |
| ; AVX512DQBW-SLOW-LABEL: load_i64_stride8_vf64: |
| ; AVX512DQBW-SLOW: # %bb.0: |
| ; AVX512DQBW-SLOW-NEXT: subq $6728, %rsp # imm = 0x1A48 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3392(%rdi), %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3328(%rdi), %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3520(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3456(%rdi), %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1856(%rdi), %zmm11 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1984(%rdi), %zmm15 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 832(%rdi), %zmm6 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 768(%rdi), %zmm7 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 960(%rdi), %zmm24 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 896(%rdi), %zmm10 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 320(%rdi), %zmm12 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 256(%rdi), %zmm5 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 448(%rdi), %zmm8 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 384(%rdi), %zmm14 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: movb $-64, %al |
| ; AVX512DQBW-SLOW-NEXT: kmovd %eax, %k1 |
| ; AVX512DQBW-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [2,10,2,10,2,10,2,10] |
| ; AVX512DQBW-SLOW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm13, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3264(%rdi), %ymm21 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 3200(%rdi), %ymm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm21[0],ymm0[2],ymm21[2] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 3136(%rdi), %ymm4 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 3072(%rdi), %ymm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu %ymm3, (%rsp) # 32-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[2] |
| ; AVX512DQBW-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm24, %zmm2, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm7, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm6, %zmm2, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 704(%rdi), %ymm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 640(%rdi), %ymm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 576(%rdi), %ymm25 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 512(%rdi), %ymm23 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm23[0],ymm25[0],ymm23[2],ymm25[2] |
| ; AVX512DQBW-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm14, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm8, %zmm2, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm5, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm12, %zmm2, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 192(%rdi), %ymm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 128(%rdi), %ymm31 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm31[0],ymm0[0],ymm31[2],ymm0[2] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 64(%rdi), %ymm20 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 (%rdi), %ymm19 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm19[0],ymm20[0],ymm19[2],ymm20[2] |
| ; AVX512DQBW-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1920(%rdi), %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm3, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm15, %zmm2, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1792(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm11, %zmm2, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 1728(%rdi), %ymm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 1664(%rdi), %ymm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1600(%rdi), %ymm18 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1536(%rdi), %ymm26 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm26[0],ymm18[0],ymm26[2],ymm18[2] |
| ; AVX512DQBW-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1472(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1408(%rdi), %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1344(%rdi), %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1280(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1216(%rdi), %ymm28 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1152(%rdi), %ymm29 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm29[0],ymm28[0],ymm29[2],ymm28[2] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1088(%rdi), %ymm30 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1024(%rdi), %ymm27 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm27[0],ymm30[0],ymm27[2],ymm30[2] |
| ; AVX512DQBW-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3008(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2944(%rdi), %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2880(%rdi), %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2816(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 2752(%rdi), %ymm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 2688(%rdi), %ymm11 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm11[0],ymm0[0],ymm11[2],ymm0[2] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2624(%rdi), %ymm16 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 2560(%rdi), %ymm9 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm9[0],ymm16[0],ymm9[2],ymm16[2] |
| ; AVX512DQBW-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2496(%rdi), %zmm10 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2432(%rdi), %zmm7 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm7, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm10, %zmm2, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2368(%rdi), %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2304(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 2240(%rdi), %ymm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 2176(%rdi), %ymm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 2112(%rdi), %ymm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 2048(%rdi), %ymm8 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm8[0],ymm3[0],ymm8[2],ymm3[2] |
| ; AVX512DQBW-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 4032(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3968(%rdi), %zmm6 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm6, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3904(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3840(%rdi), %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermi2q %zmm1, %zmm3, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm1, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3776(%rdi), %ymm22 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3712(%rdi), %ymm17 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm17[0],ymm22[0],ymm17[2],ymm22[2] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 3648(%rdi), %ymm12 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 3584(%rdi), %ymm0 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} ymm14 = ymm0[0],ymm12[0],ymm0[2],ymm12[2] |
| ; AVX512DQBW-SLOW-NEXT: vperm2i128 {{.*#+}} ymm14 = ymm14[2,3],ymm15[2,3] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm14, %zmm2, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,3,11,3,11,3,11] |
| ; AVX512DQBW-SLOW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm1, %zmm14 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm4, %zmm2, %zmm14 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm13, %zmm15 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm15 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm14, %zmm15 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm14 = ymm13[1],ymm21[1],ymm13[3],ymm21[3] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu (%rsp), %ymm13 # 32-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX512DQBW-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm14[2,3] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm15, %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm24, %zmm2, %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm21, %zmm14 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm23[1],ymm25[1],ymm23[3],ymm25[3] |
| ; AVX512DQBW-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm25, %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm24, %zmm2, %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm23, %zmm2, %zmm5 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm13, %zmm5 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm31, %ymm13 # 32-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: # ymm13 = ymm31[1],mem[1],ymm31[3],mem[3] |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm19[1],ymm20[1],ymm19[3],ymm20[3] |
| ; AVX512DQBW-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm5, %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm31, %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm20, %zmm2, %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm19, %zmm2, %zmm14 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: # ymm13 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm26[1],ymm18[1],ymm26[3],ymm18[3] |
| ; AVX512DQBW-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm18, %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm26, %zmm2, %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm13 = ymm29[1],ymm28[1],ymm29[3],ymm28[3] |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm27[1],ymm30[1],ymm27[3],ymm30[3] |
| ; AVX512DQBW-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm27, %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm28, %zmm2, %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm30, %zmm14 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm29, %zmm2, %zmm14 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: # ymm11 = ymm11[1],mem[1],ymm11[3],mem[3] |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm9 = ymm9[1],ymm16[1],ymm9[3],ymm16[3] |
| ; AVX512DQBW-SLOW-NEXT: vperm2i128 {{.*#+}} ymm9 = ymm9[2,3],ymm11[2,3] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm9, %zmm14, %zmm9 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm10, %zmm2, %zmm7 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm16, %zmm10 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm10 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm7, %zmm10 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm7 # 32-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: # ymm7 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm5 # 32-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: # ymm5 = ymm8[1],mem[1],ymm8[3],mem[3] |
| ; AVX512DQBW-SLOW-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm5[2,3],ymm7[2,3] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm5, %zmm10, %zmm5 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm14, %zmm2, %zmm6 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermi2q %zmm3, %zmm8, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm6, %zmm2 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm3 = ymm17[1],ymm22[1],ymm17[3],ymm22[3] |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm12[1],ymm0[3],ymm12[3] |
| ; AVX512DQBW-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm3[2,3] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm2, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12] |
| ; AVX512DQBW-SLOW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm1, %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm4, %zmm11 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm4, %zmm0, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm2 = zmm22[0],zmm9[0],zmm22[2],zmm9[2],zmm22[4],zmm9[4],zmm22[6],zmm9[6] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3136(%rdi), %zmm4 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3072(%rdi), %zmm15 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm15, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm4, %zmm0, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3264(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3200(%rdi), %zmm4 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12] |
| ; AVX512DQBW-SLOW-NEXT: # ymm6 = mem[0,1,0,1] |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm3, %zmm2, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 576(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 512(%rdi), %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 704(%rdi), %zmm21 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 640(%rdi), %zmm4 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm21, %zmm6, %zmm4 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm25, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm24, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm12[0],zmm23[0],zmm12[2],zmm23[2],zmm12[4],zmm23[4],zmm12[6],zmm23[6] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 (%rdi), %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 192(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 128(%rdi), %zmm4 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm1[0],zmm19[0],zmm1[2],zmm19[2],zmm1[4],zmm19[4],zmm1[6],zmm19[6] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1600(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1536(%rdi), %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1728(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1664(%rdi), %zmm4 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm18, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm26, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm1[0],zmm10[0],zmm1[2],zmm10[2],zmm1[4],zmm10[4],zmm1[6],zmm10[6] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1088(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1024(%rdi), %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1216(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1152(%rdi), %zmm4 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm27, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm28, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm30[0],zmm29[0],zmm30[2],zmm29[2],zmm30[4],zmm29[4],zmm30[6],zmm29[6] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2624(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2560(%rdi), %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2752(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2688(%rdi), %zmm4 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2112(%rdi), %zmm7 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2048(%rdi), %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2240(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2176(%rdi), %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm1, %zmm6, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm26, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm5, %zmm0, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm4 = zmm16[0],zmm30[0],zmm16[2],zmm30[2],zmm16[4],zmm30[4],zmm16[6],zmm30[6] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm14, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3648(%rdi), %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3584(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, (%rsp) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermi2q %zmm3, %zmm1, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3776(%rdi), %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3712(%rdi), %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermi2q %zmm3, %zmm1, %zmm6 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm1 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: # zmm1 = zmm8[0],mem[0],zmm8[2],mem[2],zmm8[4],mem[4],zmm8[6],mem[6] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [5,13,5,13,5,13,5,13] |
| ; AVX512DQBW-SLOW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm25, %zmm0, %zmm15 |
| ; AVX512DQBW-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [5,13,5,13] |
| ; AVX512DQBW-SLOW-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm24, %zmm1, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm15[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm11, %zmm0, %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm22[1],zmm9[1],zmm22[3],zmm9[3],zmm22[5],zmm9[5],zmm22[7],zmm9[7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm13, %zmm4 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm21, %zmm1, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm23, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm18, %zmm0, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm17, %zmm19 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm17[1],zmm19[1],zmm17[3],zmm19[3],zmm17[5],zmm19[5],zmm17[7],zmm19[7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm14, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm16, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm28, %zmm1, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: # zmm4 = zmm12[1],mem[1],zmm12[3],mem[3],zmm12[5],mem[5],zmm12[7],mem[7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm13, %zmm1, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm11, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm29, %zmm1, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm10, %zmm27 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm4[1],zmm10[1],zmm4[3],zmm10[3],zmm4[5],zmm10[5],zmm4[7],zmm10[7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm9, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm10, %zmm1, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm8, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm7, %zmm1, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm26, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm5, %zmm0, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm5[1],zmm30[1],zmm5[3],zmm30[3],zmm5[5],zmm30[5],zmm5[7],zmm30[7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 (%rsp), %zmm6 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermi2q %zmm31, %zmm6, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermi2q %zmm30, %zmm3, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm1 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: # zmm1 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14] |
| ; AVX512DQBW-SLOW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm25, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14] |
| ; AVX512DQBW-SLOW-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm24, %zmm1, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm22, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm3 {%k1} # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: # zmm3 {%k1} = zmm22[0],mem[0],zmm22[2],mem[2],zmm22[4],mem[4],zmm22[6],mem[6] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm21, %zmm1, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm17, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm19, %zmm0, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm23[0],zmm18[0],zmm23[2],zmm18[2],zmm23[4],zmm18[4],zmm23[6],zmm18[6] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm16, %zmm0, %zmm14 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm28, %zmm1, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm12, %zmm18 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm28, %zmm0, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm20[0],zmm14[0],zmm20[2],zmm14[2],zmm20[4],zmm14[4],zmm20[6],zmm14[6] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm13, %zmm1, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm15, %zmm0, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm26[0],zmm25[0],zmm26[2],zmm25[2],zmm26[4],zmm25[4],zmm26[6],zmm25[6] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm11, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm29, %zmm1, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm27, %zmm0, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm24[0],zmm29[0],zmm24[2],zmm29[2],zmm24[4],zmm29[4],zmm24[6],zmm29[6] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm9, %zmm0, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm10, %zmm1, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm9, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm10, %zmm0, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm22[0],zmm19[0],zmm22[2],zmm19[2],zmm22[4],zmm19[4],zmm22[6],zmm19[6] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm7, %zmm1, %zmm8 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm8, %zmm0, %zmm5 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm5, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermi2q %zmm31, %zmm6, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermi2q %zmm30, %zmm3, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm1[0],zmm23[0],zmm1[2],zmm23[2],zmm1[4],zmm23[4],zmm1[6],zmm23[6] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm0, %zmm4, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm30 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm5 |
| ; AVX512DQBW-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm16 = [7,15,7,15,7,15,7,15] |
| ; AVX512DQBW-SLOW-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm2, %zmm16, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm0 {%k1} = zmm4[1],zmm1[1],zmm4[3],zmm1[3],zmm4[5],zmm1[5],zmm4[7],zmm1[7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm4, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm4, %zmm6 |
| ; AVX512DQBW-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,8,0,8,0,8,0,8] |
| ; AVX512DQBW-SLOW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm1, %zmm3, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm13 = [1,9,1,9,1,9,1,9] |
| ; AVX512DQBW-SLOW-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm1, %zmm13, %zmm6 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm2, %zmm3, %zmm30 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm2, %zmm13, %zmm5 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm18, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm18, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm28, %zmm16, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm20[1],zmm14[1],zmm20[3],zmm14[3],zmm20[5],zmm14[5],zmm20[7],zmm14[7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm20, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm14, %zmm3, %zmm20 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm14, %zmm13, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm28, %zmm3, %zmm18 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm28, %zmm13, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm12, %zmm28 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm12, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm15, %zmm16, %zmm12 |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm26[1],zmm25[1],zmm26[3],zmm25[3],zmm26[5],zmm25[5],zmm26[7],zmm25[7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm26, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm25, %zmm3, %zmm26 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm25, %zmm13, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm15, %zmm3, %zmm28 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm15, %zmm13, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm1, %zmm25 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm1, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm27, %zmm16, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm24, %zmm7 |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm24[1],zmm29[1],zmm24[3],zmm29[3],zmm24[5],zmm29[5],zmm24[7],zmm29[7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm24, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm29, %zmm3, %zmm7 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm29, %zmm13, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm27, %zmm3, %zmm25 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm27, %zmm13, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm9, %zmm27 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm9, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm10, %zmm16, %zmm9 |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm9 {%k1} = zmm22[1],zmm19[1],zmm22[3],zmm19[3],zmm22[5],zmm19[5],zmm22[7],zmm19[7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm22, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm19, %zmm3, %zmm22 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm19, %zmm13, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm10, %zmm3, %zmm27 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm10, %zmm13, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm31, %zmm29 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm31, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm8, %zmm16, %zmm31 |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm31 {%k1} = zmm21[1],zmm17[1],zmm21[3],zmm17[3],zmm21[5],zmm17[5],zmm21[7],zmm17[7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm21, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm21, %zmm19 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm17, %zmm3, %zmm19 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm17, %zmm13, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm8, %zmm3, %zmm29 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm8, %zmm13, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm8, %zmm3, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm21, %zmm22 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm9, %zmm3, %zmm22 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm24, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm23, %zmm3, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermi2q %zmm26, %zmm15, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm8, %zmm13, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm21, %zmm17 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm21, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm9, %zmm13, %zmm17 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm24, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm23, %zmm13, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpermi2q %zmm26, %zmm15, %zmm13 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm26, %zmm16, %zmm15 |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm15 {%k1} = zmm24[1],zmm23[1],zmm24[3],zmm23[3],zmm24[5],zmm23[5],zmm24[7],zmm23[7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm15, %zmm21 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm15 |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q %zmm9, %zmm16, %zmm15 |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} zmm15 {%k1} = zmm10[1],zmm8[1],zmm10[3],zmm8[3],zmm10[5],zmm8[5],zmm10[7],zmm8[7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm15, %zmm23 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm12 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm5 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm6 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm7 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm8 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm10 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm0 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm14 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vbroadcasti64x2 {{.*#+}} ymm16 = [7,15,7,15] |
| ; AVX512DQBW-SLOW-NEXT: # ymm16 = mem[0,1,0,1] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm15 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm9 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm0 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm1 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm2 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm11 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm11 # 64-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm12[0,1,2,3],ymm15[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm12, %zmm12 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm5[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm5, %zmm5 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm6[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm7[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm8[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm10[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm31, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: # ymm15 = ymm0[0,1,2,3],mem[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm21, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm14[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm15, %zmm23, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm20, %zmm18 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 192(%rdi), %xmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 128(%rdi), %xmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 64(%rdi), %xmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm16 = xmm0[0],xmm1[0] |
| ; AVX512DQBW-SLOW-NEXT: vinserti32x4 $1, %xmm2, %ymm16, %ymm2 |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm18, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm30 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 704(%rdi), %xmm8 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 640(%rdi), %xmm20 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm20[0],xmm8[0] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 576(%rdi), %xmm18 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 512(%rdi), %xmm16 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm31 = xmm16[0],xmm18[0] |
| ; AVX512DQBW-SLOW-NEXT: vinserti32x4 $1, %xmm2, %ymm31, %ymm2 |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm30, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm25 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 1216(%rdi), %xmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 1152(%rdi), %xmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm31 = xmm1[0],xmm0[0] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1088(%rdi), %xmm23 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 1024(%rdi), %xmm14 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm30 = xmm14[0],xmm23[0] |
| ; AVX512DQBW-SLOW-NEXT: vinserti32x4 $1, %xmm31, %ymm30, %ymm30 |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm30, %zmm25, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm28 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1728(%rdi), %xmm30 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 1664(%rdi), %xmm31 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm31[0],xmm30[0] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 1600(%rdi), %xmm12 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 1536(%rdi), %xmm11 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm11[0],xmm12[0] |
| ; AVX512DQBW-SLOW-NEXT: vinserti32x4 $1, %xmm2, %ymm25, %ymm2 |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm28, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm19, %zmm29 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2240(%rdi), %xmm19 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2176(%rdi), %xmm21 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm21[0],xmm19[0] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 2112(%rdi), %xmm15 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 2048(%rdi), %xmm1 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm28 = xmm1[0],xmm15[0] |
| ; AVX512DQBW-SLOW-NEXT: vinserti32x4 $1, %xmm25, %ymm28, %ymm25 |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm25, %zmm29, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm27 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2752(%rdi), %xmm28 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 2688(%rdi), %xmm29 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm29[0],xmm28[0] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 2624(%rdi), %xmm10 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 2560(%rdi), %xmm9 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm9[0],xmm10[0] |
| ; AVX512DQBW-SLOW-NEXT: vinserti32x4 $1, %xmm2, %ymm25, %ymm2 |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm27, %zmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm22 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3264(%rdi), %xmm25 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3200(%rdi), %xmm27 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm27[0],xmm25[0] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 3136(%rdi), %xmm0 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 3072(%rdi), %xmm2 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm26 = xmm2[0],xmm0[0] |
| ; AVX512DQBW-SLOW-NEXT: vinserti32x4 $1, %xmm4, %ymm26, %ymm4 |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm4, %zmm22, %zmm22 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm4, %zmm3 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 3776(%rdi), %xmm7 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 3712(%rdi), %xmm26 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm26[0],xmm7[0] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 3648(%rdi), %xmm6 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa 3584(%rdi), %xmm5 |
| ; AVX512DQBW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm24 = xmm5[0],xmm6[0] |
| ; AVX512DQBW-SLOW-NEXT: vinserti32x4 $1, %xmm4, %ymm24, %ymm4 |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm4, %zmm3, %zmm24 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm3, %zmm17 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm27[1],xmm25[1] |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm0[1] |
| ; AVX512DQBW-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm2, %ymm2 |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm2, %zmm17, %zmm2 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm3, %zmm0 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm3 = xmm20[1],xmm8[1] |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm16[1],xmm18[1] |
| ; AVX512DQBW-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm3, %zmm0, %zmm3 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: # xmm4 = xmm4[1],mem[1] |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: # xmm8 = xmm8[1],mem[1] |
| ; AVX512DQBW-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm8, %ymm4 |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm4, %zmm0, %zmm4 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm8, %zmm0 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm8 = xmm31[1],xmm30[1] |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm11 = xmm11[1],xmm12[1] |
| ; AVX512DQBW-SLOW-NEXT: vinserti128 $1, %xmm8, %ymm11, %ymm8 |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm8, %zmm0, %zmm8 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm11, %zmm0 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload |
| ; AVX512DQBW-SLOW-NEXT: # xmm11 = xmm11[1],mem[1] |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm14[1],xmm23[1] |
| ; AVX512DQBW-SLOW-NEXT: vinserti128 $1, %xmm11, %ymm12, %ymm11 |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm11, %zmm0, %zmm11 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm12, %zmm0 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm29[1],xmm28[1] |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm9 = xmm9[1],xmm10[1] |
| ; AVX512DQBW-SLOW-NEXT: vinserti128 $1, %xmm12, %ymm9, %ymm9 |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm9, %zmm0, %zmm9 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm10, %zmm0 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm10 = xmm21[1],xmm19[1] |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm1[1],xmm15[1] |
| ; AVX512DQBW-SLOW-NEXT: vinserti128 $1, %xmm10, %ymm12, %ymm10 |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm10, %zmm0, %zmm10 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, %zmm13 {%k1} |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm7 = xmm26[1],xmm7[1] |
| ; AVX512DQBW-SLOW-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm6[1] |
| ; AVX512DQBW-SLOW-NEXT: vinserti128 $1, %xmm7, %ymm5, %ymm5 |
| ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $0, %ymm5, %zmm13, %zmm1 |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm24, 448(%rsi) |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm22, 384(%rsi) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 320(%rsi) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 256(%rsi) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 192(%rsi) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 128(%rsi) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 64(%rsi) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, (%rsi) |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm1, 448(%rdx) |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm10, 256(%rdx) |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm9, 320(%rdx) |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm11, 128(%rdx) |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm8, 192(%rdx) |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm4, (%rdx) |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm3, 64(%rdx) |
| ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm2, 384(%rdx) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 448(%rcx) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 256(%rcx) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 320(%rcx) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 128(%rcx) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 192(%rcx) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, (%rcx) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 64(%rcx) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 384(%rcx) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 448(%r8) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 256(%r8) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 320(%r8) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 128(%r8) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 192(%r8) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, (%r8) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 64(%r8) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 384(%r8) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 448(%r9) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 256(%r9) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 320(%r9) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 128(%r9) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 192(%r9) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, (%r9) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 64(%r9) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 384(%r9) |
| ; AVX512DQBW-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512DQBW-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512DQBW-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512DQBW-SLOW-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512DQBW-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-SLOW-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512DQBW-SLOW-NEXT: addq $6728, %rsp # imm = 0x1A48 |
| ; AVX512DQBW-SLOW-NEXT: vzeroupper |
| ; AVX512DQBW-SLOW-NEXT: retq |
| ; |
| ; AVX512DQBW-FAST-LABEL: load_i64_stride8_vf64: |
| ; AVX512DQBW-FAST: # %bb.0: |
| ; AVX512DQBW-FAST-NEXT: subq $6728, %rsp # imm = 0x1A48 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3392(%rdi), %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3328(%rdi), %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3520(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3456(%rdi), %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1856(%rdi), %zmm11 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1984(%rdi), %zmm15 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 832(%rdi), %zmm6 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 768(%rdi), %zmm7 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 960(%rdi), %zmm24 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 896(%rdi), %zmm10 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 320(%rdi), %zmm12 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 256(%rdi), %zmm5 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 448(%rdi), %zmm8 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 384(%rdi), %zmm14 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: movb $-64, %al |
| ; AVX512DQBW-FAST-NEXT: kmovd %eax, %k1 |
| ; AVX512DQBW-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [2,10,2,10,2,10,2,10] |
| ; AVX512DQBW-FAST-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm13, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3264(%rdi), %ymm21 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 3200(%rdi), %ymm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm21[0],ymm0[2],ymm21[2] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 3136(%rdi), %ymm4 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 3072(%rdi), %ymm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu %ymm3, (%rsp) # 32-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[2] |
| ; AVX512DQBW-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm24, %zmm2, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm7, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm6, %zmm2, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 704(%rdi), %ymm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 640(%rdi), %ymm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 576(%rdi), %ymm25 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 512(%rdi), %ymm23 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm23[0],ymm25[0],ymm23[2],ymm25[2] |
| ; AVX512DQBW-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm14, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm8, %zmm2, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm5, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm12, %zmm2, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 192(%rdi), %ymm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 128(%rdi), %ymm31 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm31[0],ymm0[0],ymm31[2],ymm0[2] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 64(%rdi), %ymm20 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 (%rdi), %ymm19 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm19[0],ymm20[0],ymm19[2],ymm20[2] |
| ; AVX512DQBW-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1920(%rdi), %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm3, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm15, %zmm2, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1792(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm11, %zmm2, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 1728(%rdi), %ymm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 1664(%rdi), %ymm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1600(%rdi), %ymm18 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1536(%rdi), %ymm26 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm26[0],ymm18[0],ymm26[2],ymm18[2] |
| ; AVX512DQBW-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1472(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1408(%rdi), %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1344(%rdi), %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1280(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1216(%rdi), %ymm28 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1152(%rdi), %ymm29 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm29[0],ymm28[0],ymm29[2],ymm28[2] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1088(%rdi), %ymm30 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1024(%rdi), %ymm27 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm27[0],ymm30[0],ymm27[2],ymm30[2] |
| ; AVX512DQBW-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3008(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2944(%rdi), %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2880(%rdi), %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2816(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 2752(%rdi), %ymm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 2688(%rdi), %ymm11 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm11[0],ymm0[0],ymm11[2],ymm0[2] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2624(%rdi), %ymm16 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 2560(%rdi), %ymm9 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm9[0],ymm16[0],ymm9[2],ymm16[2] |
| ; AVX512DQBW-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2496(%rdi), %zmm10 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2432(%rdi), %zmm7 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm7, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm10, %zmm2, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2368(%rdi), %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2304(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm3, %zmm2, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 2240(%rdi), %ymm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 2176(%rdi), %ymm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 2112(%rdi), %ymm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 2048(%rdi), %ymm8 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm8[0],ymm3[0],ymm8[2],ymm3[2] |
| ; AVX512DQBW-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 4032(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3968(%rdi), %zmm6 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm6, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3904(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3840(%rdi), %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermi2q %zmm1, %zmm3, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm1, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3776(%rdi), %ymm22 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3712(%rdi), %ymm17 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm15 = ymm17[0],ymm22[0],ymm17[2],ymm22[2] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 3648(%rdi), %ymm12 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 3584(%rdi), %ymm0 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} ymm14 = ymm0[0],ymm12[0],ymm0[2],ymm12[2] |
| ; AVX512DQBW-FAST-NEXT: vperm2i128 {{.*#+}} ymm14 = ymm14[2,3],ymm15[2,3] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm14, %zmm2, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,3,11,3,11,3,11] |
| ; AVX512DQBW-FAST-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm1, %zmm14 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm4, %zmm2, %zmm14 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm13, %zmm15 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm15 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm14, %zmm15 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm14 = ymm13[1],ymm21[1],ymm13[3],ymm21[3] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu (%rsp), %ymm13 # 32-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX512DQBW-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm14[2,3] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm15, %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm24, %zmm2, %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm21, %zmm14 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: # ymm13 = ymm13[1],mem[1],ymm13[3],mem[3] |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm23[1],ymm25[1],ymm23[3],ymm25[3] |
| ; AVX512DQBW-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm25, %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm24, %zmm2, %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm23, %zmm2, %zmm5 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm13, %zmm5 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm31, %ymm13 # 32-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: # ymm13 = ymm31[1],mem[1],ymm31[3],mem[3] |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm19[1],ymm20[1],ymm19[3],ymm20[3] |
| ; AVX512DQBW-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm5, %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm31, %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm20, %zmm2, %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm19, %zmm2, %zmm14 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: # ymm13 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm26[1],ymm18[1],ymm26[3],ymm18[3] |
| ; AVX512DQBW-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm18, %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm26, %zmm2, %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm13 = ymm29[1],ymm28[1],ymm29[3],ymm28[3] |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm15 = ymm27[1],ymm30[1],ymm27[3],ymm30[3] |
| ; AVX512DQBW-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm13, %zmm14, %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm27, %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm28, %zmm2, %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm30, %zmm14 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm29, %zmm2, %zmm14 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm13, %zmm14 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: # ymm11 = ymm11[1],mem[1],ymm11[3],mem[3] |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm9 = ymm9[1],ymm16[1],ymm9[3],ymm16[3] |
| ; AVX512DQBW-FAST-NEXT: vperm2i128 {{.*#+}} ymm9 = ymm9[2,3],ymm11[2,3] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm9, %zmm14, %zmm9 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm10, %zmm2, %zmm7 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm16, %zmm10 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm10 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm7, %zmm10 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm7 # 32-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: # ymm7 = ymm5[1],mem[1],ymm5[3],mem[3] |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm5 # 32-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: # ymm5 = ymm8[1],mem[1],ymm8[3],mem[3] |
| ; AVX512DQBW-FAST-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm5[2,3],ymm7[2,3] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm5, %zmm10, %zmm5 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm14, %zmm2, %zmm6 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermi2q %zmm3, %zmm8, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm6, %zmm2 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm3 = ymm17[1],ymm22[1],ymm17[3],ymm22[3] |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm12[1],ymm0[3],ymm12[3] |
| ; AVX512DQBW-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm3[2,3] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm2, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12] |
| ; AVX512DQBW-FAST-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm1, %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm4, %zmm11 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm4, %zmm0, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm2 = zmm22[0],zmm9[0],zmm22[2],zmm9[2],zmm22[4],zmm9[4],zmm22[6],zmm9[6] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3136(%rdi), %zmm4 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3072(%rdi), %zmm15 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm15, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm4, %zmm0, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3264(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3200(%rdi), %zmm4 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12] |
| ; AVX512DQBW-FAST-NEXT: # ymm6 = mem[0,1,0,1] |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm3, %zmm2, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 576(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 512(%rdi), %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 704(%rdi), %zmm21 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 640(%rdi), %zmm4 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm21, %zmm6, %zmm4 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm25, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm24, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm12[0],zmm23[0],zmm12[2],zmm23[2],zmm12[4],zmm23[4],zmm12[6],zmm23[6] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 64(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 (%rdi), %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 192(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 128(%rdi), %zmm4 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm1[0],zmm19[0],zmm1[2],zmm19[2],zmm1[4],zmm19[4],zmm1[6],zmm19[6] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1600(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1536(%rdi), %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1728(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1664(%rdi), %zmm4 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm18, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm26, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm1[0],zmm10[0],zmm1[2],zmm10[2],zmm1[4],zmm10[4],zmm1[6],zmm10[6] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1088(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1024(%rdi), %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1216(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1152(%rdi), %zmm4 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm27, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm28, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm30[0],zmm29[0],zmm30[2],zmm29[2],zmm30[4],zmm29[4],zmm30[6],zmm29[6] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2624(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2560(%rdi), %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm1, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2752(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2688(%rdi), %zmm4 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm4 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2112(%rdi), %zmm7 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2048(%rdi), %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2240(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2176(%rdi), %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm1, %zmm6, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm26, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm5, %zmm0, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm4 = zmm16[0],zmm30[0],zmm16[2],zmm30[2],zmm16[4],zmm30[4],zmm16[6],zmm30[6] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm14, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3648(%rdi), %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3584(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, (%rsp) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermi2q %zmm3, %zmm1, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3776(%rdi), %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3712(%rdi), %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermi2q %zmm3, %zmm1, %zmm6 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm1 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: # zmm1 = zmm8[0],mem[0],zmm8[2],mem[2],zmm8[4],mem[4],zmm8[6],mem[6] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [5,13,5,13,5,13,5,13] |
| ; AVX512DQBW-FAST-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm25, %zmm0, %zmm15 |
| ; AVX512DQBW-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [5,13,5,13] |
| ; AVX512DQBW-FAST-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm24, %zmm1, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm15[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm11, %zmm0, %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm22[1],zmm9[1],zmm22[3],zmm9[3],zmm22[5],zmm9[5],zmm22[7],zmm9[7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm13, %zmm4 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm21, %zmm1, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm23, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm18, %zmm0, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm17, %zmm19 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm17[1],zmm19[1],zmm17[3],zmm19[3],zmm17[5],zmm19[5],zmm17[7],zmm19[7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm14, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm16, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm28, %zmm1, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: # zmm4 = zmm12[1],mem[1],zmm12[3],mem[3],zmm12[5],mem[5],zmm12[7],mem[7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm13, %zmm1, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm11, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm29, %zmm1, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm10, %zmm27 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm4[1],zmm10[1],zmm4[3],zmm10[3],zmm4[5],zmm10[5],zmm4[7],zmm10[7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm9, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm10, %zmm1, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm7, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm8, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm7, %zmm1, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm26, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm5, %zmm0, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm4 = zmm5[1],zmm30[1],zmm5[3],zmm30[3],zmm5[5],zmm30[5],zmm5[7],zmm30[7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm3, %zmm4 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm31, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 (%rsp), %zmm6 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermi2q %zmm31, %zmm6, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermi2q %zmm30, %zmm3, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm1 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: # zmm1 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm1, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14] |
| ; AVX512DQBW-FAST-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm25, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14] |
| ; AVX512DQBW-FAST-NEXT: # ymm1 = mem[0,1,0,1] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm24, %zmm1, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm22, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm3 {%k1} # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: # zmm3 {%k1} = zmm22[0],mem[0],zmm22[2],mem[2],zmm22[4],mem[4],zmm22[6],mem[6] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm20, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm21, %zmm1, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm17, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm19, %zmm0, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm23[0],zmm18[0],zmm23[2],zmm18[2],zmm23[4],zmm18[4],zmm23[6],zmm18[6] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm16, %zmm0, %zmm14 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm28, %zmm1, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm12, %zmm18 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm28, %zmm0, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm20[0],zmm14[0],zmm20[2],zmm14[2],zmm20[4],zmm14[4],zmm20[6],zmm14[6] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm15, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm13, %zmm1, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm12, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm15, %zmm0, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm26[0],zmm25[0],zmm26[2],zmm25[2],zmm26[4],zmm25[4],zmm26[6],zmm25[6] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm11, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm29, %zmm1, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm27, %zmm0, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm24[0],zmm29[0],zmm24[2],zmm29[2],zmm24[4],zmm29[4],zmm24[6],zmm29[6] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm9, %zmm0, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm10, %zmm1, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm9, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm10, %zmm0, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm22[0],zmm19[0],zmm22[2],zmm19[2],zmm22[4],zmm19[4],zmm22[6],zmm19[6] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm3, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm7, %zmm1, %zmm8 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm8, %zmm0, %zmm5 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm5, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vpermi2q %zmm31, %zmm6, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermi2q %zmm30, %zmm3, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm1[0],zmm23[0],zmm1[2],zmm23[2],zmm1[4],zmm23[4],zmm1[6],zmm23[6] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm0, %zmm4, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, %zmm30 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, %zmm5 |
| ; AVX512DQBW-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm16 = [7,15,7,15,7,15,7,15] |
| ; AVX512DQBW-FAST-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm2, %zmm16, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm0 {%k1} = zmm4[1],zmm1[1],zmm4[3],zmm1[3],zmm4[5],zmm1[5],zmm4[7],zmm1[7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm4, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm4, %zmm6 |
| ; AVX512DQBW-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,8,0,8,0,8,0,8] |
| ; AVX512DQBW-FAST-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm1, %zmm3, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm13 = [1,9,1,9,1,9,1,9] |
| ; AVX512DQBW-FAST-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm1, %zmm13, %zmm6 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm2, %zmm3, %zmm30 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm2, %zmm13, %zmm5 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm18, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm18, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm28, %zmm16, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm20[1],zmm14[1],zmm20[3],zmm14[3],zmm20[5],zmm14[5],zmm20[7],zmm14[7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm20, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm14, %zmm3, %zmm20 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm14, %zmm13, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm28, %zmm3, %zmm18 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm28, %zmm13, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm12, %zmm28 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm12, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm15, %zmm16, %zmm12 |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm26[1],zmm25[1],zmm26[3],zmm25[3],zmm26[5],zmm25[5],zmm26[7],zmm25[7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm26, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm25, %zmm3, %zmm26 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm25, %zmm13, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm15, %zmm3, %zmm28 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm15, %zmm13, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm1, %zmm25 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm1, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm27, %zmm16, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm24, %zmm7 |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm24[1],zmm29[1],zmm24[3],zmm29[3],zmm24[5],zmm29[5],zmm24[7],zmm29[7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm24, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm29, %zmm3, %zmm7 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm29, %zmm13, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm27, %zmm3, %zmm25 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm27, %zmm13, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm9, %zmm27 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm9, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm10, %zmm16, %zmm9 |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm9 {%k1} = zmm22[1],zmm19[1],zmm22[3],zmm19[3],zmm22[5],zmm19[5],zmm22[7],zmm19[7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm22, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm19, %zmm3, %zmm22 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm19, %zmm13, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm10, %zmm3, %zmm27 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm10, %zmm13, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm31, %zmm29 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm31, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm8, %zmm16, %zmm31 |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm31 {%k1} = zmm21[1],zmm17[1],zmm21[3],zmm17[3],zmm21[5],zmm17[5],zmm21[7],zmm17[7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm21, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm21, %zmm19 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm17, %zmm3, %zmm19 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm17, %zmm13, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm8, %zmm3, %zmm29 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm8, %zmm13, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm8, %zmm3, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm21, %zmm22 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm9, %zmm3, %zmm22 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm24, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm23, %zmm3, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermi2q %zmm26, %zmm15, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm10, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm8, %zmm13, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm21, %zmm17 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm21, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm9, %zmm13, %zmm17 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm24, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm23, %zmm13, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpermi2q %zmm26, %zmm15, %zmm13 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm26, %zmm16, %zmm15 |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm15 {%k1} = zmm24[1],zmm23[1],zmm24[3],zmm23[3],zmm24[5],zmm23[5],zmm24[7],zmm23[7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm15, %zmm21 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, %zmm15 |
| ; AVX512DQBW-FAST-NEXT: vpermt2q %zmm9, %zmm16, %zmm15 |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} zmm15 {%k1} = zmm10[1],zmm8[1],zmm10[3],zmm8[3],zmm10[5],zmm8[5],zmm10[7],zmm8[7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm15, %zmm23 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm12 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm5 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm6 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm7 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm8 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm10 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm0 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm14 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vbroadcasti64x2 {{.*#+}} ymm16 = [7,15,7,15] |
| ; AVX512DQBW-FAST-NEXT: # ymm16 = mem[0,1,0,1] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm15 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm9 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm0 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm1 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm2 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm4 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm11 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm11 # 64-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm12[0,1,2,3],ymm15[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm12, %zmm12 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm5[0,1,2,3],ymm9[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm5, %zmm5 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm6[0,1,2,3],ymm0[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm7[0,1,2,3],ymm1[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm8[0,1,2,3],ymm2[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm0, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm10[0,1,2,3],ymm4[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm31, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: # ymm15 = ymm0[0,1,2,3],mem[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm21, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm14[0,1,2,3],ymm11[4,5,6,7] |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm15, %zmm23, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm20, %zmm18 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 192(%rdi), %xmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 128(%rdi), %xmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm1[0],xmm0[0] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa (%rdi), %xmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 64(%rdi), %xmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm16 = xmm0[0],xmm1[0] |
| ; AVX512DQBW-FAST-NEXT: vinserti32x4 $1, %xmm2, %ymm16, %ymm2 |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm18, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, %zmm30 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 704(%rdi), %xmm8 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 640(%rdi), %xmm20 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm20[0],xmm8[0] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 576(%rdi), %xmm18 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 512(%rdi), %xmm16 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm31 = xmm16[0],xmm18[0] |
| ; AVX512DQBW-FAST-NEXT: vinserti32x4 $1, %xmm2, %ymm31, %ymm2 |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm30, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, %zmm25 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 1216(%rdi), %xmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 1152(%rdi), %xmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm31 = xmm1[0],xmm0[0] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1088(%rdi), %xmm23 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 1024(%rdi), %xmm14 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm30 = xmm14[0],xmm23[0] |
| ; AVX512DQBW-FAST-NEXT: vinserti32x4 $1, %xmm31, %ymm30, %ymm30 |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm30, %zmm25, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, %zmm28 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1728(%rdi), %xmm30 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 1664(%rdi), %xmm31 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm31[0],xmm30[0] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 1600(%rdi), %xmm12 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 1536(%rdi), %xmm11 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm11[0],xmm12[0] |
| ; AVX512DQBW-FAST-NEXT: vinserti32x4 $1, %xmm2, %ymm25, %ymm2 |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm28, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm19, %zmm29 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2240(%rdi), %xmm19 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2176(%rdi), %xmm21 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm21[0],xmm19[0] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 2112(%rdi), %xmm15 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 2048(%rdi), %xmm1 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm28 = xmm1[0],xmm15[0] |
| ; AVX512DQBW-FAST-NEXT: vinserti32x4 $1, %xmm25, %ymm28, %ymm25 |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm25, %zmm29, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, %zmm27 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2752(%rdi), %xmm28 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 2688(%rdi), %xmm29 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm29[0],xmm28[0] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 2624(%rdi), %xmm10 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 2560(%rdi), %xmm9 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm25 = xmm9[0],xmm10[0] |
| ; AVX512DQBW-FAST-NEXT: vinserti32x4 $1, %xmm2, %ymm25, %ymm2 |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm27, %zmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, %zmm22 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3264(%rdi), %xmm25 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3200(%rdi), %xmm27 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm27[0],xmm25[0] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 3136(%rdi), %xmm0 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 3072(%rdi), %xmm2 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm26 = xmm2[0],xmm0[0] |
| ; AVX512DQBW-FAST-NEXT: vinserti32x4 $1, %xmm4, %ymm26, %ymm4 |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm4, %zmm22, %zmm22 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm4, %zmm3 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 3776(%rdi), %xmm7 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 3712(%rdi), %xmm26 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm26[0],xmm7[0] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 3648(%rdi), %xmm6 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa 3584(%rdi), %xmm5 |
| ; AVX512DQBW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm24 = xmm5[0],xmm6[0] |
| ; AVX512DQBW-FAST-NEXT: vinserti32x4 $1, %xmm4, %ymm24, %ymm4 |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm4, %zmm3, %zmm24 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm3, %zmm17 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm27[1],xmm25[1] |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm0[1] |
| ; AVX512DQBW-FAST-NEXT: vinserti128 $1, %xmm4, %ymm2, %ymm2 |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm2, %zmm17, %zmm2 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm3, %zmm0 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm3 = xmm20[1],xmm8[1] |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm16[1],xmm18[1] |
| ; AVX512DQBW-FAST-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm3, %zmm0, %zmm3 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: # xmm4 = xmm4[1],mem[1] |
| ; AVX512DQBW-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: # xmm8 = xmm8[1],mem[1] |
| ; AVX512DQBW-FAST-NEXT: vinserti128 $1, %xmm4, %ymm8, %ymm4 |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm4, %zmm0, %zmm4 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm8, %zmm0 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm8 = xmm31[1],xmm30[1] |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm11 = xmm11[1],xmm12[1] |
| ; AVX512DQBW-FAST-NEXT: vinserti128 $1, %xmm8, %ymm11, %ymm8 |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm8, %zmm0, %zmm8 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm11, %zmm0 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload |
| ; AVX512DQBW-FAST-NEXT: # xmm11 = xmm11[1],mem[1] |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm14[1],xmm23[1] |
| ; AVX512DQBW-FAST-NEXT: vinserti128 $1, %xmm11, %ymm12, %ymm11 |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm11, %zmm0, %zmm11 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm12, %zmm0 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm29[1],xmm28[1] |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm9 = xmm9[1],xmm10[1] |
| ; AVX512DQBW-FAST-NEXT: vinserti128 $1, %xmm12, %ymm9, %ymm9 |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm9, %zmm0, %zmm9 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm10, %zmm0 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm10 = xmm21[1],xmm19[1] |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm1[1],xmm15[1] |
| ; AVX512DQBW-FAST-NEXT: vinserti128 $1, %xmm10, %ymm12, %ymm10 |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm10, %zmm0, %zmm10 |
| ; AVX512DQBW-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, %zmm13 {%k1} |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm7 = xmm26[1],xmm7[1] |
| ; AVX512DQBW-FAST-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm6[1] |
| ; AVX512DQBW-FAST-NEXT: vinserti128 $1, %xmm7, %ymm5, %ymm5 |
| ; AVX512DQBW-FAST-NEXT: vinserti64x4 $0, %ymm5, %zmm13, %zmm1 |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm24, 448(%rsi) |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm22, 384(%rsi) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 320(%rsi) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 256(%rsi) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 192(%rsi) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 128(%rsi) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 64(%rsi) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, (%rsi) |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm1, 448(%rdx) |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm10, 256(%rdx) |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm9, 320(%rdx) |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm11, 128(%rdx) |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm8, 192(%rdx) |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm4, (%rdx) |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm3, 64(%rdx) |
| ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm2, 384(%rdx) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 448(%rcx) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 256(%rcx) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 320(%rcx) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 128(%rcx) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 192(%rcx) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, (%rcx) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 64(%rcx) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 384(%rcx) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 448(%r8) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 256(%r8) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 320(%r8) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 128(%r8) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 192(%r8) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, (%r8) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 64(%r8) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 384(%r8) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 448(%r9) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 256(%r9) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 320(%r9) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 128(%r9) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 192(%r9) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, (%r9) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 64(%r9) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 384(%r9) |
| ; AVX512DQBW-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512DQBW-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512DQBW-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512DQBW-FAST-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 384(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 448(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 256(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 320(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 128(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 192(%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, (%rax) |
| ; AVX512DQBW-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; AVX512DQBW-FAST-NEXT: vmovaps %zmm0, 64(%rax) |
| ; AVX512DQBW-FAST-NEXT: addq $6728, %rsp # imm = 0x1A48 |
| ; AVX512DQBW-FAST-NEXT: vzeroupper |
| ; AVX512DQBW-FAST-NEXT: retq |
| %wide.vec = load <512 x i64>, ptr %in.vec, align 64 |
| %strided.vec0 = shufflevector <512 x i64> %wide.vec, <512 x i64> poison, <64 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 40, i32 48, i32 56, i32 64, i32 72, i32 80, i32 88, i32 96, i32 104, i32 112, i32 120, i32 128, i32 136, i32 144, i32 152, i32 160, i32 168, i32 176, i32 184, i32 192, i32 200, i32 208, i32 216, i32 224, i32 232, i32 240, i32 248, i32 256, i32 264, i32 272, i32 280, i32 288, i32 296, i32 304, i32 312, i32 320, i32 328, i32 336, i32 344, i32 352, i32 360, i32 368, i32 376, i32 384, i32 392, i32 400, i32 408, i32 416, i32 424, i32 432, i32 440, i32 448, i32 456, i32 464, i32 472, i32 480, i32 488, i32 496, i32 504> |
| %strided.vec1 = shufflevector <512 x i64> %wide.vec, <512 x i64> poison, <64 x i32> <i32 1, i32 9, i32 17, i32 25, i32 33, i32 41, i32 49, i32 57, i32 65, i32 73, i32 81, i32 89, i32 97, i32 105, i32 113, i32 121, i32 129, i32 137, i32 145, i32 153, i32 161, i32 169, i32 177, i32 185, i32 193, i32 201, i32 209, i32 217, i32 225, i32 233, i32 241, i32 249, i32 257, i32 265, i32 273, i32 281, i32 289, i32 297, i32 305, i32 313, i32 321, i32 329, i32 337, i32 345, i32 353, i32 361, i32 369, i32 377, i32 385, i32 393, i32 401, i32 409, i32 417, i32 425, i32 433, i32 441, i32 449, i32 457, i32 465, i32 473, i32 481, i32 489, i32 497, i32 505> |
| %strided.vec2 = shufflevector <512 x i64> %wide.vec, <512 x i64> poison, <64 x i32> <i32 2, i32 10, i32 18, i32 26, i32 34, i32 42, i32 50, i32 58, i32 66, i32 74, i32 82, i32 90, i32 98, i32 106, i32 114, i32 122, i32 130, i32 138, i32 146, i32 154, i32 162, i32 170, i32 178, i32 186, i32 194, i32 202, i32 210, i32 218, i32 226, i32 234, i32 242, i32 250, i32 258, i32 266, i32 274, i32 282, i32 290, i32 298, i32 306, i32 314, i32 322, i32 330, i32 338, i32 346, i32 354, i32 362, i32 370, i32 378, i32 386, i32 394, i32 402, i32 410, i32 418, i32 426, i32 434, i32 442, i32 450, i32 458, i32 466, i32 474, i32 482, i32 490, i32 498, i32 506> |
| %strided.vec3 = shufflevector <512 x i64> %wide.vec, <512 x i64> poison, <64 x i32> <i32 3, i32 11, i32 19, i32 27, i32 35, i32 43, i32 51, i32 59, i32 67, i32 75, i32 83, i32 91, i32 99, i32 107, i32 115, i32 123, i32 131, i32 139, i32 147, i32 155, i32 163, i32 171, i32 179, i32 187, i32 195, i32 203, i32 211, i32 219, i32 227, i32 235, i32 243, i32 251, i32 259, i32 267, i32 275, i32 283, i32 291, i32 299, i32 307, i32 315, i32 323, i32 331, i32 339, i32 347, i32 355, i32 363, i32 371, i32 379, i32 387, i32 395, i32 403, i32 411, i32 419, i32 427, i32 435, i32 443, i32 451, i32 459, i32 467, i32 475, i32 483, i32 491, i32 499, i32 507> |
| %strided.vec4 = shufflevector <512 x i64> %wide.vec, <512 x i64> poison, <64 x i32> <i32 4, i32 12, i32 20, i32 28, i32 36, i32 44, i32 52, i32 60, i32 68, i32 76, i32 84, i32 92, i32 100, i32 108, i32 116, i32 124, i32 132, i32 140, i32 148, i32 156, i32 164, i32 172, i32 180, i32 188, i32 196, i32 204, i32 212, i32 220, i32 228, i32 236, i32 244, i32 252, i32 260, i32 268, i32 276, i32 284, i32 292, i32 300, i32 308, i32 316, i32 324, i32 332, i32 340, i32 348, i32 356, i32 364, i32 372, i32 380, i32 388, i32 396, i32 404, i32 412, i32 420, i32 428, i32 436, i32 444, i32 452, i32 460, i32 468, i32 476, i32 484, i32 492, i32 500, i32 508> |
| %strided.vec5 = shufflevector <512 x i64> %wide.vec, <512 x i64> poison, <64 x i32> <i32 5, i32 13, i32 21, i32 29, i32 37, i32 45, i32 53, i32 61, i32 69, i32 77, i32 85, i32 93, i32 101, i32 109, i32 117, i32 125, i32 133, i32 141, i32 149, i32 157, i32 165, i32 173, i32 181, i32 189, i32 197, i32 205, i32 213, i32 221, i32 229, i32 237, i32 245, i32 253, i32 261, i32 269, i32 277, i32 285, i32 293, i32 301, i32 309, i32 317, i32 325, i32 333, i32 341, i32 349, i32 357, i32 365, i32 373, i32 381, i32 389, i32 397, i32 405, i32 413, i32 421, i32 429, i32 437, i32 445, i32 453, i32 461, i32 469, i32 477, i32 485, i32 493, i32 501, i32 509> |
| %strided.vec6 = shufflevector <512 x i64> %wide.vec, <512 x i64> poison, <64 x i32> <i32 6, i32 14, i32 22, i32 30, i32 38, i32 46, i32 54, i32 62, i32 70, i32 78, i32 86, i32 94, i32 102, i32 110, i32 118, i32 126, i32 134, i32 142, i32 150, i32 158, i32 166, i32 174, i32 182, i32 190, i32 198, i32 206, i32 214, i32 222, i32 230, i32 238, i32 246, i32 254, i32 262, i32 270, i32 278, i32 286, i32 294, i32 302, i32 310, i32 318, i32 326, i32 334, i32 342, i32 350, i32 358, i32 366, i32 374, i32 382, i32 390, i32 398, i32 406, i32 414, i32 422, i32 430, i32 438, i32 446, i32 454, i32 462, i32 470, i32 478, i32 486, i32 494, i32 502, i32 510> |
| %strided.vec7 = shufflevector <512 x i64> %wide.vec, <512 x i64> poison, <64 x i32> <i32 7, i32 15, i32 23, i32 31, i32 39, i32 47, i32 55, i32 63, i32 71, i32 79, i32 87, i32 95, i32 103, i32 111, i32 119, i32 127, i32 135, i32 143, i32 151, i32 159, i32 167, i32 175, i32 183, i32 191, i32 199, i32 207, i32 215, i32 223, i32 231, i32 239, i32 247, i32 255, i32 263, i32 271, i32 279, i32 287, i32 295, i32 303, i32 311, i32 319, i32 327, i32 335, i32 343, i32 351, i32 359, i32 367, i32 375, i32 383, i32 391, i32 399, i32 407, i32 415, i32 423, i32 431, i32 439, i32 447, i32 455, i32 463, i32 471, i32 479, i32 487, i32 495, i32 503, i32 511> |
| store <64 x i64> %strided.vec0, ptr %out.vec0, align 64 |
| store <64 x i64> %strided.vec1, ptr %out.vec1, align 64 |
| store <64 x i64> %strided.vec2, ptr %out.vec2, align 64 |
| store <64 x i64> %strided.vec3, ptr %out.vec3, align 64 |
| store <64 x i64> %strided.vec4, ptr %out.vec4, align 64 |
| store <64 x i64> %strided.vec5, ptr %out.vec5, align 64 |
| store <64 x i64> %strided.vec6, ptr %out.vec6, align 64 |
| store <64 x i64> %strided.vec7, ptr %out.vec7, align 64 |
| ret void |
| } |
| ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
| ; AVX: {{.*}} |
| ; AVX1: {{.*}} |
| ; AVX2: {{.*}} |
| ; AVX2-FAST: {{.*}} |
| ; AVX2-FAST-PERLANE: {{.*}} |
| ; AVX2-SLOW: {{.*}} |
| ; AVX512BW-FAST: {{.*}} |
| ; AVX512BW-SLOW: {{.*}} |
| ; AVX512F-FAST: {{.*}} |
| ; AVX512F-SLOW: {{.*}} |
| ; FALLBACK0: {{.*}} |
| ; FALLBACK1: {{.*}} |
| ; FALLBACK10: {{.*}} |
| ; FALLBACK11: {{.*}} |
| ; FALLBACK12: {{.*}} |
| ; FALLBACK2: {{.*}} |
| ; FALLBACK3: {{.*}} |
| ; FALLBACK4: {{.*}} |
| ; FALLBACK5: {{.*}} |
| ; FALLBACK6: {{.*}} |
| ; FALLBACK7: {{.*}} |
| ; FALLBACK8: {{.*}} |
| ; FALLBACK9: {{.*}} |