blob: 3fca052ab2375dc01c0b3f7781a975ab8c1f3a43 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
; These patterns are produced by LoopVectorizer for interleaved loads.
define void @load_i64_stride3_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
; SSE-LABEL: load_i64_stride3_vf2:
; SSE: # %bb.0:
; SSE-NEXT: movapd (%rdi), %xmm0
; SSE-NEXT: movapd 16(%rdi), %xmm1
; SSE-NEXT: movapd 32(%rdi), %xmm2
; SSE-NEXT: movapd %xmm1, %xmm3
; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm2[0]
; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
; SSE-NEXT: movapd %xmm3, (%rsi)
; SSE-NEXT: movapd %xmm0, (%rdx)
; SSE-NEXT: movapd %xmm2, (%rcx)
; SSE-NEXT: retq
;
; AVX1-ONLY-LABEL: load_i64_stride3_vf2:
; AVX1-ONLY: # %bb.0:
; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm0
; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm2
; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],mem[4,5,6,7]
; AVX1-ONLY-NEXT: vmovdqa %xmm3, (%rsi)
; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%rdx)
; AVX1-ONLY-NEXT: vmovdqa %xmm1, (%rcx)
; AVX1-ONLY-NEXT: retq
;
; AVX2-ONLY-LABEL: load_i64_stride3_vf2:
; AVX2-ONLY: # %bb.0:
; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm0
; AVX2-ONLY-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm2
; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0,1],xmm1[2,3]
; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vmovdqa %xmm3, (%rsi)
; AVX2-ONLY-NEXT: vmovdqa %xmm0, (%rdx)
; AVX2-ONLY-NEXT: vmovdqa %xmm1, (%rcx)
; AVX2-ONLY-NEXT: retq
;
; AVX512F-SLOW-LABEL: load_i64_stride3_vf2:
; AVX512F-SLOW: # %bb.0:
; AVX512F-SLOW-NEXT: vpermpd {{.*#+}} zmm0 = mem[0,3,2,3,4,7,6,7]
; AVX512F-SLOW-NEXT: vmovdqa 32(%rdi), %xmm1
; AVX512F-SLOW-NEXT: vpalignr {{.*#+}} xmm2 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
; AVX512F-SLOW-NEXT: vmovaps %xmm0, (%rsi)
; AVX512F-SLOW-NEXT: vmovdqa %xmm2, (%rdx)
; AVX512F-SLOW-NEXT: vmovdqa %xmm1, (%rcx)
; AVX512F-SLOW-NEXT: vzeroupper
; AVX512F-SLOW-NEXT: retq
;
; AVX512F-FAST-LABEL: load_i64_stride3_vf2:
; AVX512F-FAST: # %bb.0:
; AVX512F-FAST-NEXT: vmovaps {{.*#+}} xmm0 = [1,4]
; AVX512F-FAST-NEXT: vmovaps (%rdi), %zmm1
; AVX512F-FAST-NEXT: vpermpd %zmm1, %zmm0, %zmm0
; AVX512F-FAST-NEXT: vpermpd {{.*#+}} zmm1 = zmm1[0,3,2,3,4,7,6,7]
; AVX512F-FAST-NEXT: vmovaps 16(%rdi), %xmm2
; AVX512F-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
; AVX512F-FAST-NEXT: vmovaps %xmm1, (%rsi)
; AVX512F-FAST-NEXT: vmovaps %xmm0, (%rdx)
; AVX512F-FAST-NEXT: vmovaps %xmm2, (%rcx)
; AVX512F-FAST-NEXT: vzeroupper
; AVX512F-FAST-NEXT: retq
;
; AVX512BW-SLOW-LABEL: load_i64_stride3_vf2:
; AVX512BW-SLOW: # %bb.0:
; AVX512BW-SLOW-NEXT: vpermpd {{.*#+}} zmm0 = mem[0,3,2,3,4,7,6,7]
; AVX512BW-SLOW-NEXT: vmovdqa 32(%rdi), %xmm1
; AVX512BW-SLOW-NEXT: vpalignr {{.*#+}} xmm2 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; AVX512BW-SLOW-NEXT: vpblendd {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
; AVX512BW-SLOW-NEXT: vmovaps %xmm0, (%rsi)
; AVX512BW-SLOW-NEXT: vmovdqa %xmm2, (%rdx)
; AVX512BW-SLOW-NEXT: vmovdqa %xmm1, (%rcx)
; AVX512BW-SLOW-NEXT: vzeroupper
; AVX512BW-SLOW-NEXT: retq
;
; AVX512BW-FAST-LABEL: load_i64_stride3_vf2:
; AVX512BW-FAST: # %bb.0:
; AVX512BW-FAST-NEXT: vmovaps {{.*#+}} xmm0 = [1,4]
; AVX512BW-FAST-NEXT: vmovaps (%rdi), %zmm1
; AVX512BW-FAST-NEXT: vpermpd %zmm1, %zmm0, %zmm0
; AVX512BW-FAST-NEXT: vpermpd {{.*#+}} zmm1 = zmm1[0,3,2,3,4,7,6,7]
; AVX512BW-FAST-NEXT: vmovaps 16(%rdi), %xmm2
; AVX512BW-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
; AVX512BW-FAST-NEXT: vmovaps %xmm1, (%rsi)
; AVX512BW-FAST-NEXT: vmovaps %xmm0, (%rdx)
; AVX512BW-FAST-NEXT: vmovaps %xmm2, (%rcx)
; AVX512BW-FAST-NEXT: vzeroupper
; AVX512BW-FAST-NEXT: retq
%wide.vec = load <6 x i64>, ptr %in.vec, align 64
%strided.vec0 = shufflevector <6 x i64> %wide.vec, <6 x i64> poison, <2 x i32> <i32 0, i32 3>
%strided.vec1 = shufflevector <6 x i64> %wide.vec, <6 x i64> poison, <2 x i32> <i32 1, i32 4>
%strided.vec2 = shufflevector <6 x i64> %wide.vec, <6 x i64> poison, <2 x i32> <i32 2, i32 5>
store <2 x i64> %strided.vec0, ptr %out.vec0, align 64
store <2 x i64> %strided.vec1, ptr %out.vec1, align 64
store <2 x i64> %strided.vec2, ptr %out.vec2, align 64
ret void
}
define void @load_i64_stride3_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
; SSE-LABEL: load_i64_stride3_vf4:
; SSE: # %bb.0:
; SSE-NEXT: movapd 80(%rdi), %xmm0
; SSE-NEXT: movapd (%rdi), %xmm1
; SSE-NEXT: movapd 16(%rdi), %xmm2
; SSE-NEXT: movapd 32(%rdi), %xmm3
; SSE-NEXT: movapd 48(%rdi), %xmm4
; SSE-NEXT: movapd 64(%rdi), %xmm5
; SSE-NEXT: movapd %xmm5, %xmm6
; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm4[0],xmm6[1]
; SSE-NEXT: movapd %xmm2, %xmm7
; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm1[0],xmm7[1]
; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm0[0]
; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm3[0]
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1]
; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm2[0],xmm3[1]
; SSE-NEXT: movapd %xmm6, 16(%rsi)
; SSE-NEXT: movapd %xmm7, (%rsi)
; SSE-NEXT: movapd %xmm4, 16(%rdx)
; SSE-NEXT: movapd %xmm1, (%rdx)
; SSE-NEXT: movapd %xmm0, 16(%rcx)
; SSE-NEXT: movapd %xmm3, (%rcx)
; SSE-NEXT: retq
;
; AVX1-ONLY-LABEL: load_i64_stride3_vf4:
; AVX1-ONLY: # %bb.0:
; AVX1-ONLY-NEXT: vmovapd 32(%rdi), %ymm0
; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm1
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = mem[0,1],ymm0[2,3]
; AVX1-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm1, %ymm1
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm2[0],ymm1[1],ymm2[2],ymm1[3]
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm2 = ymm2[1],ymm0[0],ymm2[3],ymm0[2]
; AVX1-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm4
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3]
; AVX1-ONLY-NEXT: vmovapd %ymm3, (%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm2, (%rdx)
; AVX1-ONLY-NEXT: vmovapd %ymm0, (%rcx)
; AVX1-ONLY-NEXT: vzeroupper
; AVX1-ONLY-NEXT: retq
;
; AVX2-ONLY-LABEL: load_i64_stride3_vf4:
; AVX2-ONLY: # %bb.0:
; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm0
; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm1
; AVX2-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm2
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm3 = ymm1[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm0[4,5,6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm1
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX2-ONLY-NEXT: vmovaps 16(%rdi), %xmm1
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm3 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps %ymm2, (%rsi)
; AVX2-ONLY-NEXT: vmovaps %ymm0, (%rdx)
; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rcx)
; AVX2-ONLY-NEXT: vzeroupper
; AVX2-ONLY-NEXT: retq
;
; AVX512-LABEL: load_i64_stride3_vf4:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [0,3,6,9]
; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [1,4,7,10]
; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [2,5,8,11]
; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm4
; AVX512-NEXT: vmovdqa %ymm2, (%rsi)
; AVX512-NEXT: vmovdqa %ymm3, (%rdx)
; AVX512-NEXT: vmovdqa %ymm4, (%rcx)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%wide.vec = load <12 x i64>, ptr %in.vec, align 64
%strided.vec0 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
%strided.vec1 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
%strided.vec2 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
store <4 x i64> %strided.vec0, ptr %out.vec0, align 64
store <4 x i64> %strided.vec1, ptr %out.vec1, align 64
store <4 x i64> %strided.vec2, ptr %out.vec2, align 64
ret void
}
define void @load_i64_stride3_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
; SSE-LABEL: load_i64_stride3_vf8:
; SSE: # %bb.0:
; SSE-NEXT: movapd 128(%rdi), %xmm2
; SSE-NEXT: movapd 176(%rdi), %xmm1
; SSE-NEXT: movapd 80(%rdi), %xmm0
; SSE-NEXT: movapd 96(%rdi), %xmm3
; SSE-NEXT: movapd 112(%rdi), %xmm8
; SSE-NEXT: movapd 144(%rdi), %xmm5
; SSE-NEXT: movapd 160(%rdi), %xmm9
; SSE-NEXT: movapd (%rdi), %xmm6
; SSE-NEXT: movapd 16(%rdi), %xmm10
; SSE-NEXT: movapd 32(%rdi), %xmm4
; SSE-NEXT: movapd 48(%rdi), %xmm7
; SSE-NEXT: movapd 64(%rdi), %xmm11
; SSE-NEXT: movapd %xmm11, %xmm12
; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm7[0],xmm12[1]
; SSE-NEXT: movapd %xmm9, %xmm13
; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm5[0],xmm13[1]
; SSE-NEXT: movapd %xmm8, %xmm14
; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm3[0],xmm14[1]
; SSE-NEXT: movapd %xmm10, %xmm15
; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm6[0],xmm15[1]
; SSE-NEXT: shufpd {{.*#+}} xmm7 = xmm7[1],xmm0[0]
; SSE-NEXT: shufpd {{.*#+}} xmm5 = xmm5[1],xmm1[0]
; SSE-NEXT: shufpd {{.*#+}} xmm3 = xmm3[1],xmm2[0]
; SSE-NEXT: shufpd {{.*#+}} xmm6 = xmm6[1],xmm4[0]
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm11[0],xmm0[1]
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm9[0],xmm1[1]
; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm8[0],xmm2[1]
; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm10[0],xmm4[1]
; SSE-NEXT: movapd %xmm14, 32(%rsi)
; SSE-NEXT: movapd %xmm13, 48(%rsi)
; SSE-NEXT: movapd %xmm15, (%rsi)
; SSE-NEXT: movapd %xmm12, 16(%rsi)
; SSE-NEXT: movapd %xmm3, 32(%rdx)
; SSE-NEXT: movapd %xmm5, 48(%rdx)
; SSE-NEXT: movapd %xmm6, (%rdx)
; SSE-NEXT: movapd %xmm7, 16(%rdx)
; SSE-NEXT: movapd %xmm2, 32(%rcx)
; SSE-NEXT: movapd %xmm1, 48(%rcx)
; SSE-NEXT: movapd %xmm4, (%rcx)
; SSE-NEXT: movapd %xmm0, 16(%rcx)
; SSE-NEXT: retq
;
; AVX1-ONLY-LABEL: load_i64_stride3_vf8:
; AVX1-ONLY: # %bb.0:
; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm0
; AVX1-ONLY-NEXT: vmovapd 32(%rdi), %ymm1
; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm2
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = mem[0,1],ymm1[2,3]
; AVX1-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm2, %ymm2
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm3[0],ymm2[1],ymm3[2],ymm2[3]
; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm5
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = mem[0,1],ymm0[2,3]
; AVX1-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm5, %ymm5
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm6[0],ymm5[1],ymm6[2],ymm5[3]
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[1],ymm1[0],ymm3[3],ymm1[2]
; AVX1-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm8
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm8[3]
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm6[1],ymm0[0],ymm6[3],ymm0[2]
; AVX1-ONLY-NEXT: vbroadcastsd 176(%rdi), %ymm8
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0,1,2],ymm8[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2],ymm0[3]
; AVX1-ONLY-NEXT: vmovapd %ymm7, 32(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm4, (%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm6, 32(%rdx)
; AVX1-ONLY-NEXT: vmovapd %ymm3, (%rdx)
; AVX1-ONLY-NEXT: vmovapd %ymm0, 32(%rcx)
; AVX1-ONLY-NEXT: vmovapd %ymm1, (%rcx)
; AVX1-ONLY-NEXT: vzeroupper
; AVX1-ONLY-NEXT: retq
;
; AVX2-ONLY-LABEL: load_i64_stride3_vf8:
; AVX2-ONLY: # %bb.0:
; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm0
; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm1
; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm2
; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm3
; AVX2-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm4
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm5 = ymm3[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm2[4,5,6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm5
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm6 = ymm1[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm0[4,5,6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm3
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 176(%rdi), %ymm1
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX2-ONLY-NEXT: vmovaps 16(%rdi), %xmm1
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm3 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 112(%rdi), %xmm3
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm6 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps %ymm5, 32(%rsi)
; AVX2-ONLY-NEXT: vmovaps %ymm4, (%rsi)
; AVX2-ONLY-NEXT: vmovaps %ymm0, 32(%rdx)
; AVX2-ONLY-NEXT: vmovaps %ymm2, (%rdx)
; AVX2-ONLY-NEXT: vmovaps %ymm3, 32(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rcx)
; AVX2-ONLY-NEXT: vzeroupper
; AVX2-ONLY-NEXT: retq
;
; AVX512-LABEL: load_i64_stride3_vf8:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm2
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <0,3,6,9,12,15,u,u>
; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,10,13]
; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <1,4,7,10,13,u,u,u>
; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,8,11,14]
; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm5
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <10,13,0,3,6,u,u,u>
; AVX512-NEXT: vpermi2q %zmm0, %zmm1, %zmm3
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,9,12,15]
; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm0
; AVX512-NEXT: vmovdqa64 %zmm4, (%rsi)
; AVX512-NEXT: vmovdqa64 %zmm5, (%rdx)
; AVX512-NEXT: vmovdqa64 %zmm0, (%rcx)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%wide.vec = load <24 x i64>, ptr %in.vec, align 64
%strided.vec0 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
%strided.vec1 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
%strided.vec2 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
store <8 x i64> %strided.vec0, ptr %out.vec0, align 64
store <8 x i64> %strided.vec1, ptr %out.vec1, align 64
store <8 x i64> %strided.vec2, ptr %out.vec2, align 64
ret void
}
define void @load_i64_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
; SSE-LABEL: load_i64_stride3_vf16:
; SSE: # %bb.0:
; SSE-NEXT: subq $24, %rsp
; SSE-NEXT: movapd 272(%rdi), %xmm0
; SSE-NEXT: movapd 224(%rdi), %xmm2
; SSE-NEXT: movapd 368(%rdi), %xmm1
; SSE-NEXT: movapd 320(%rdi), %xmm3
; SSE-NEXT: movapd 128(%rdi), %xmm4
; SSE-NEXT: movapd 240(%rdi), %xmm5
; SSE-NEXT: movapd 256(%rdi), %xmm10
; SSE-NEXT: movapd 192(%rdi), %xmm6
; SSE-NEXT: movapd 208(%rdi), %xmm12
; SSE-NEXT: movapd 336(%rdi), %xmm7
; SSE-NEXT: movapd 352(%rdi), %xmm14
; SSE-NEXT: movapd 288(%rdi), %xmm11
; SSE-NEXT: movapd 304(%rdi), %xmm15
; SSE-NEXT: movapd 96(%rdi), %xmm9
; SSE-NEXT: movapd 112(%rdi), %xmm13
; SSE-NEXT: movapd %xmm15, %xmm8
; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm11[0],xmm8[1]
; SSE-NEXT: movapd %xmm8, (%rsp) # 16-byte Spill
; SSE-NEXT: shufpd {{.*#+}} xmm11 = xmm11[1],xmm3[0]
; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm15[0],xmm3[1]
; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd %xmm14, %xmm15
; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm7[0],xmm15[1]
; SSE-NEXT: shufpd {{.*#+}} xmm7 = xmm7[1],xmm1[0]
; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm14[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd %xmm12, %xmm14
; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm6[0],xmm14[1]
; SSE-NEXT: shufpd {{.*#+}} xmm6 = xmm6[1],xmm2[0]
; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm12[0],xmm2[1]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd %xmm10, %xmm12
; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm5[0],xmm12[1]
; SSE-NEXT: shufpd {{.*#+}} xmm5 = xmm5[1],xmm0[0]
; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm10[0],xmm0[1]
; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd %xmm13, %xmm10
; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm9[0],xmm10[1]
; SSE-NEXT: shufpd {{.*#+}} xmm9 = xmm9[1],xmm4[0]
; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm13[0],xmm4[1]
; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 144(%rdi), %xmm13
; SSE-NEXT: movapd 160(%rdi), %xmm1
; SSE-NEXT: movapd %xmm1, %xmm8
; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm13[0],xmm8[1]
; SSE-NEXT: movapd 176(%rdi), %xmm6
; SSE-NEXT: shufpd {{.*#+}} xmm13 = xmm13[1],xmm6[0]
; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm1[0],xmm6[1]
; SSE-NEXT: movapd 48(%rdi), %xmm1
; SSE-NEXT: movapd 64(%rdi), %xmm4
; SSE-NEXT: movapd %xmm4, %xmm3
; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
; SSE-NEXT: movapd 80(%rdi), %xmm2
; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm2[0]
; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm4[0],xmm2[1]
; SSE-NEXT: movapd (%rdi), %xmm4
; SSE-NEXT: movapd 16(%rdi), %xmm7
; SSE-NEXT: movapd %xmm7, %xmm5
; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm4[0],xmm5[1]
; SSE-NEXT: movapd 32(%rdi), %xmm0
; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm0[0]
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
; SSE-NEXT: movapd %xmm12, 80(%rsi)
; SSE-NEXT: movapd %xmm3, 16(%rsi)
; SSE-NEXT: movapd %xmm14, 64(%rsi)
; SSE-NEXT: movapd %xmm5, (%rsi)
; SSE-NEXT: movapd %xmm15, 112(%rsi)
; SSE-NEXT: movapd %xmm8, 48(%rsi)
; SSE-NEXT: movaps (%rsp), %xmm3 # 16-byte Reload
; SSE-NEXT: movaps %xmm3, 96(%rsi)
; SSE-NEXT: movapd %xmm10, 32(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: movaps %xmm3, 80(%rdx)
; SSE-NEXT: movapd %xmm1, 16(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: movaps %xmm1, 64(%rdx)
; SSE-NEXT: movapd %xmm4, (%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: movaps %xmm1, 112(%rdx)
; SSE-NEXT: movapd %xmm13, 48(%rdx)
; SSE-NEXT: movapd %xmm11, 96(%rdx)
; SSE-NEXT: movapd %xmm9, 32(%rdx)
; SSE-NEXT: movapd %xmm2, 16(%rcx)
; SSE-NEXT: movapd %xmm0, (%rcx)
; SSE-NEXT: movapd %xmm6, 48(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 32(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 80(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 64(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 112(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 96(%rcx)
; SSE-NEXT: addq $24, %rsp
; SSE-NEXT: retq
;
; AVX1-ONLY-LABEL: load_i64_stride3_vf16:
; AVX1-ONLY: # %bb.0:
; AVX1-ONLY-NEXT: vmovapd 32(%rdi), %ymm1
; AVX1-ONLY-NEXT: vmovapd 224(%rdi), %ymm0
; AVX1-ONLY-NEXT: vmovapd 320(%rdi), %ymm2
; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm3
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = mem[0,1],ymm3[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm5 = mem[0,1],ymm2[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = mem[0,1],ymm0[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = mem[0,1],ymm1[2,3]
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm8 = ymm4[1],ymm3[0],ymm4[3],ymm3[2]
; AVX1-ONLY-NEXT: vbroadcastsd 176(%rdi), %ymm9
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm8 = ymm8[0,1,2],ymm9[3]
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm9 = ymm5[1],ymm2[0],ymm5[3],ymm2[2]
; AVX1-ONLY-NEXT: vbroadcastsd 368(%rdi), %ymm10
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0,1,2],ymm10[3]
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm10 = ymm7[1],ymm1[0],ymm7[3],ymm1[2]
; AVX1-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm11
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm10 = ymm10[0,1,2],ymm11[3]
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm11 = ymm6[1],ymm0[0],ymm6[3],ymm0[2]
; AVX1-ONLY-NEXT: vbroadcastsd 272(%rdi), %ymm12
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3]
; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm12
; AVX1-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm12, %ymm12
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm4[0],ymm12[1],ymm4[2],ymm12[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm12[0],ymm3[1],ymm12[2],ymm3[3]
; AVX1-ONLY-NEXT: vmovaps 304(%rdi), %xmm12
; AVX1-ONLY-NEXT: vinsertf128 $1, 352(%rdi), %ymm12, %ymm12
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2],ymm12[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm12[0],ymm2[1],ymm12[2],ymm2[3]
; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm12
; AVX1-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm12, %ymm12
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0],ymm12[1],ymm7[2],ymm12[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm12[0],ymm1[1],ymm12[2],ymm1[3]
; AVX1-ONLY-NEXT: vmovaps 208(%rdi), %xmm12
; AVX1-ONLY-NEXT: vinsertf128 $1, 256(%rdi), %ymm12, %ymm12
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0],ymm12[1],ymm6[2],ymm12[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm12[0],ymm0[1],ymm12[2],ymm0[3]
; AVX1-ONLY-NEXT: vmovapd %ymm7, (%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm6, 64(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm5, 96(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm4, 32(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm11, 64(%rdx)
; AVX1-ONLY-NEXT: vmovapd %ymm10, (%rdx)
; AVX1-ONLY-NEXT: vmovapd %ymm9, 96(%rdx)
; AVX1-ONLY-NEXT: vmovapd %ymm8, 32(%rdx)
; AVX1-ONLY-NEXT: vmovapd %ymm0, 64(%rcx)
; AVX1-ONLY-NEXT: vmovapd %ymm1, (%rcx)
; AVX1-ONLY-NEXT: vmovapd %ymm2, 96(%rcx)
; AVX1-ONLY-NEXT: vmovapd %ymm3, 32(%rcx)
; AVX1-ONLY-NEXT: vzeroupper
; AVX1-ONLY-NEXT: retq
;
; AVX2-ONLY-LABEL: load_i64_stride3_vf16:
; AVX2-ONLY: # %bb.0:
; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm8
; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm9
; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm6
; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %ymm7
; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %ymm5
; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %ymm10
; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm3
; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm11
; AVX2-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = ymm11[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm1
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm2 = ymm10[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 256(%rdi), %ymm0, %ymm2
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm4 = ymm7[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5],ymm2[6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm4
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm12 = ymm9[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm8[4,5,6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm12[0,1,2,3,4,5],ymm4[6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm11[2,3],ymm3[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 176(%rdi), %ymm11
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm11[6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm10[2,3],ymm5[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 368(%rdi), %ymm10
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm10[6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm9
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 272(%rdi), %ymm7
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
; AVX2-ONLY-NEXT: vmovaps 112(%rdi), %xmm7
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm9 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm9[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 304(%rdi), %xmm9
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm10 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 16(%rdi), %xmm10
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm11 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 208(%rdi), %xmm11
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm12 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps %ymm4, (%rsi)
; AVX2-ONLY-NEXT: vmovaps %ymm2, 64(%rsi)
; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rsi)
; AVX2-ONLY-NEXT: vmovaps %ymm0, 32(%rsi)
; AVX2-ONLY-NEXT: vmovaps %ymm6, 64(%rdx)
; AVX2-ONLY-NEXT: vmovaps %ymm8, (%rdx)
; AVX2-ONLY-NEXT: vmovaps %ymm5, 96(%rdx)
; AVX2-ONLY-NEXT: vmovaps %ymm3, 32(%rdx)
; AVX2-ONLY-NEXT: vmovaps %ymm11, 64(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm10, (%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm9, 96(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm7, 32(%rcx)
; AVX2-ONLY-NEXT: vzeroupper
; AVX2-ONLY-NEXT: retq
;
; AVX512-LABEL: load_i64_stride3_vf16:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa64 320(%rdi), %zmm0
; AVX512-NEXT: vmovdqa64 256(%rdi), %zmm1
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm2
; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm3
; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm4
; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm5
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = <0,3,6,9,12,15,u,u>
; AVX512-NEXT: vmovdqa64 %zmm5, %zmm7
; AVX512-NEXT: vpermt2q %zmm1, %zmm6, %zmm7
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,4,5,10,13]
; AVX512-NEXT: vpermt2q %zmm0, %zmm8, %zmm7
; AVX512-NEXT: vpermi2q %zmm3, %zmm2, %zmm6
; AVX512-NEXT: vpermt2q %zmm4, %zmm8, %zmm6
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = <1,4,7,10,13,u,u,u>
; AVX512-NEXT: vmovdqa64 %zmm5, %zmm9
; AVX512-NEXT: vpermt2q %zmm1, %zmm8, %zmm9
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm10 = [0,1,2,3,4,8,11,14]
; AVX512-NEXT: vpermt2q %zmm0, %zmm10, %zmm9
; AVX512-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
; AVX512-NEXT: vpermt2q %zmm4, %zmm10, %zmm8
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm10 = <10,13,0,3,6,u,u,u>
; AVX512-NEXT: vpermt2q %zmm5, %zmm10, %zmm1
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,9,12,15]
; AVX512-NEXT: vpermt2q %zmm0, %zmm5, %zmm1
; AVX512-NEXT: vpermt2q %zmm2, %zmm10, %zmm3
; AVX512-NEXT: vpermt2q %zmm4, %zmm5, %zmm3
; AVX512-NEXT: vmovdqa64 %zmm7, 64(%rsi)
; AVX512-NEXT: vmovdqa64 %zmm6, (%rsi)
; AVX512-NEXT: vmovdqa64 %zmm9, 64(%rdx)
; AVX512-NEXT: vmovdqa64 %zmm8, (%rdx)
; AVX512-NEXT: vmovdqa64 %zmm1, 64(%rcx)
; AVX512-NEXT: vmovdqa64 %zmm3, (%rcx)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%wide.vec = load <48 x i64>, ptr %in.vec, align 64
%strided.vec0 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
%strided.vec1 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <16 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46>
%strided.vec2 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <16 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47>
store <16 x i64> %strided.vec0, ptr %out.vec0, align 64
store <16 x i64> %strided.vec1, ptr %out.vec1, align 64
store <16 x i64> %strided.vec2, ptr %out.vec2, align 64
ret void
}
define void @load_i64_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
; SSE-LABEL: load_i64_stride3_vf32:
; SSE: # %bb.0:
; SSE-NEXT: subq $408, %rsp # imm = 0x198
; SSE-NEXT: movapd 224(%rdi), %xmm0
; SSE-NEXT: movapd 272(%rdi), %xmm3
; SSE-NEXT: movapd 128(%rdi), %xmm1
; SSE-NEXT: movapd 176(%rdi), %xmm5
; SSE-NEXT: movapd 80(%rdi), %xmm4
; SSE-NEXT: movapd 96(%rdi), %xmm6
; SSE-NEXT: movapd 112(%rdi), %xmm11
; SSE-NEXT: movapd 144(%rdi), %xmm7
; SSE-NEXT: movapd 160(%rdi), %xmm12
; SSE-NEXT: movapd 192(%rdi), %xmm8
; SSE-NEXT: movapd 208(%rdi), %xmm13
; SSE-NEXT: movapd 240(%rdi), %xmm9
; SSE-NEXT: movapd 256(%rdi), %xmm2
; SSE-NEXT: movapd 48(%rdi), %xmm10
; SSE-NEXT: movapd 64(%rdi), %xmm15
; SSE-NEXT: movapd %xmm15, %xmm14
; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm10[0],xmm14[1]
; SSE-NEXT: movapd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm4[0]
; SSE-NEXT: movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm15[0],xmm4[1]
; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd %xmm11, %xmm4
; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm6[0],xmm4[1]
; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufpd {{.*#+}} xmm6 = xmm6[1],xmm1[0]
; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm11[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd %xmm12, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm7[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufpd {{.*#+}} xmm7 = xmm7[1],xmm5[0]
; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm12[0],xmm5[1]
; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd %xmm13, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm8[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm0[0]
; SSE-NEXT: movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm13[0],xmm0[1]
; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd %xmm2, %xmm0
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm9[0],xmm0[1]
; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufpd {{.*#+}} xmm9 = xmm9[1],xmm3[0]
; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm2[0],xmm3[1]
; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 288(%rdi), %xmm2
; SSE-NEXT: movapd 304(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 320(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 336(%rdi), %xmm2
; SSE-NEXT: movapd 352(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 368(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 384(%rdi), %xmm2
; SSE-NEXT: movapd 400(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 416(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 432(%rdi), %xmm2
; SSE-NEXT: movapd 448(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 464(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, (%rsp) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 480(%rdi), %xmm2
; SSE-NEXT: movapd 496(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 512(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 528(%rdi), %xmm15
; SSE-NEXT: movapd 544(%rdi), %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm15[0],xmm0[1]
; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 560(%rdi), %xmm0
; SSE-NEXT: shufpd {{.*#+}} xmm15 = xmm15[1],xmm0[0]
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 576(%rdi), %xmm11
; SSE-NEXT: movapd 592(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm14
; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm11[0],xmm14[1]
; SSE-NEXT: movapd 608(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm11 = xmm11[1],xmm1[0]
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 624(%rdi), %xmm8
; SSE-NEXT: movapd 640(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm13
; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm8[0],xmm13[1]
; SSE-NEXT: movapd 656(%rdi), %xmm12
; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm12[0]
; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm0[0],xmm12[1]
; SSE-NEXT: movapd 672(%rdi), %xmm0
; SSE-NEXT: movapd 688(%rdi), %xmm2
; SSE-NEXT: movapd %xmm2, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd 704(%rdi), %xmm10
; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm10[0]
; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm2[0],xmm10[1]
; SSE-NEXT: movapd 720(%rdi), %xmm2
; SSE-NEXT: movapd 736(%rdi), %xmm4
; SSE-NEXT: movapd %xmm4, %xmm3
; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm2[0],xmm3[1]
; SSE-NEXT: movapd 752(%rdi), %xmm7
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm7[0]
; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm4[0],xmm7[1]
; SSE-NEXT: movapd (%rdi), %xmm4
; SSE-NEXT: movapd 16(%rdi), %xmm6
; SSE-NEXT: movapd %xmm6, %xmm5
; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm4[0],xmm5[1]
; SSE-NEXT: movapd 32(%rdi), %xmm9
; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm9[0]
; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm6[0],xmm9[1]
; SSE-NEXT: movapd %xmm1, 224(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: movaps %xmm1, 160(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: movaps %xmm1, 96(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: movaps %xmm1, 32(%rsi)
; SSE-NEXT: movapd %xmm3, 240(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: movaps %xmm1, 176(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: movaps %xmm1, 112(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: movaps %xmm1, 48(%rsi)
; SSE-NEXT: movapd %xmm14, 192(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: movaps %xmm1, 128(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: movaps %xmm1, 64(%rsi)
; SSE-NEXT: movapd %xmm5, (%rsi)
; SSE-NEXT: movapd %xmm13, 208(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: movaps %xmm1, 144(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: movaps %xmm1, 80(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: movaps %xmm1, 16(%rsi)
; SSE-NEXT: movapd %xmm0, 224(%rdx)
; SSE-NEXT: movapd %xmm2, 240(%rdx)
; SSE-NEXT: movapd %xmm11, 192(%rdx)
; SSE-NEXT: movapd %xmm8, 208(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 160(%rdx)
; SSE-NEXT: movapd %xmm15, 176(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 128(%rdx)
; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 144(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 96(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 112(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 64(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 80(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 32(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 48(%rdx)
; SSE-NEXT: movapd %xmm4, (%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 16(%rdx)
; SSE-NEXT: movapd %xmm7, 240(%rcx)
; SSE-NEXT: movapd %xmm10, 224(%rcx)
; SSE-NEXT: movapd %xmm12, 208(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 192(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 176(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 160(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 144(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 128(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 112(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 96(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 80(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 64(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 48(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 32(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 16(%rcx)
; SSE-NEXT: movapd %xmm9, (%rcx)
; SSE-NEXT: addq $408, %rsp # imm = 0x198
; SSE-NEXT: retq
;
; AVX1-ONLY-LABEL: load_i64_stride3_vf32:
; AVX1-ONLY: # %bb.0:
; AVX1-ONLY-NEXT: subq $296, %rsp # imm = 0x128
; AVX1-ONLY-NEXT: vmovapd 32(%rdi), %ymm2
; AVX1-ONLY-NEXT: vmovapd 704(%rdi), %ymm0
; AVX1-ONLY-NEXT: vmovapd 512(%rdi), %ymm1
; AVX1-ONLY-NEXT: vmovapd 320(%rdi), %ymm8
; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm6
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = mem[0,1],ymm6[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = mem[0,1],ymm8[2,3]
; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = mem[0,1],ymm1[2,3]
; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovapd %ymm1, %ymm5
; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = mem[0,1],ymm0[2,3]
; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm11 = mem[0,1],ymm2[2,3]
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm9 = ymm7[1],ymm6[0],ymm7[3],ymm6[2]
; AVX1-ONLY-NEXT: vbroadcastsd 176(%rdi), %ymm10
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0,1,2],ymm10[3]
; AVX1-ONLY-NEXT: vmovupd %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm10 = ymm3[1],ymm8[0],ymm3[3],ymm8[2]
; AVX1-ONLY-NEXT: vbroadcastsd 368(%rdi), %ymm12
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm10[0,1,2],ymm12[3]
; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm12 = ymm4[1],ymm5[0],ymm4[3],ymm5[2]
; AVX1-ONLY-NEXT: vbroadcastsd 560(%rdi), %ymm13
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm12[0,1,2],ymm13[3]
; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm13 = ymm1[1],ymm0[0],ymm1[3],ymm0[2]
; AVX1-ONLY-NEXT: vbroadcastsd 752(%rdi), %ymm14
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm13[0,1,2],ymm14[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovapd %ymm2, %ymm13
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm14 = ymm11[1],ymm2[0],ymm11[3],ymm2[2]
; AVX1-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm15
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm14[0,1,2],ymm15[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovapd 224(%rdi), %ymm0
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = mem[0,1],ymm0[2,3]
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm15 = ymm3[1],ymm0[0],ymm3[3],ymm0[2]
; AVX1-ONLY-NEXT: vbroadcastsd 272(%rdi), %ymm9
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm15[0,1,2],ymm9[3]
; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovapd 416(%rdi), %ymm9
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = mem[0,1],ymm9[2,3]
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm4[1],ymm9[0],ymm4[3],ymm9[2]
; AVX1-ONLY-NEXT: vbroadcastsd 464(%rdi), %ymm10
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm10 = ymm1[0,1,2],ymm10[3]
; AVX1-ONLY-NEXT: vmovapd 608(%rdi), %ymm1
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm5 = mem[0,1],ymm1[2,3]
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm2 = ymm5[1],ymm1[0],ymm5[3],ymm1[2]
; AVX1-ONLY-NEXT: vbroadcastsd 656(%rdi), %ymm12
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm12 = ymm2[0,1,2],ymm12[3]
; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm2
; AVX1-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm2, %ymm2
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0],ymm2[1],ymm7[2],ymm2[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0],ymm6[1],ymm2[2],ymm6[3]
; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm6
; AVX1-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm6, %ymm6
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm11 = ymm11[0],ymm6[1],ymm11[2],ymm6[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm13 = ymm13[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm6[0],ymm13[1],ymm6[2],ymm13[3]
; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovaps 304(%rdi), %xmm13
; AVX1-ONLY-NEXT: vinsertf128 $1, 352(%rdi), %ymm13, %ymm13
; AVX1-ONLY-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm6 # 32-byte Folded Reload
; AVX1-ONLY-NEXT: # ymm6 = mem[0],ymm13[1],mem[2],ymm13[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm8 = ymm8[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm13[0],ymm8[1],ymm13[2],ymm8[3]
; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovaps 208(%rdi), %xmm13
; AVX1-ONLY-NEXT: vinsertf128 $1, 256(%rdi), %ymm13, %ymm13
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0],ymm13[1],ymm3[2],ymm13[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm8 = ymm13[0],ymm0[1],ymm13[2],ymm0[3]
; AVX1-ONLY-NEXT: vmovaps 496(%rdi), %xmm13
; AVX1-ONLY-NEXT: vinsertf128 $1, 544(%rdi), %ymm13, %ymm13
; AVX1-ONLY-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm2 # 32-byte Folded Reload
; AVX1-ONLY-NEXT: # ymm2 = mem[0,1],ymm13[2,3],mem[4,5],ymm13[6,7]
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],mem[4,5,6,7]
; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm14[2,3],ymm13[4,5],ymm14[6,7]
; AVX1-ONLY-NEXT: vmovaps 400(%rdi), %xmm14
; AVX1-ONLY-NEXT: vinsertf128 $1, 448(%rdi), %ymm14, %ymm14
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm4[0],ymm14[1],ymm4[2],ymm14[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm14[0],ymm9[1],ymm14[2],ymm9[3]
; AVX1-ONLY-NEXT: vmovaps 688(%rdi), %xmm14
; AVX1-ONLY-NEXT: vinsertf128 $1, 736(%rdi), %ymm14, %ymm14
; AVX1-ONLY-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload
; AVX1-ONLY-NEXT: # ymm0 = mem[0,1],ymm14[2,3],mem[4,5],ymm14[6,7]
; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm15 # 32-byte Reload
; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5],ymm15[6,7]
; AVX1-ONLY-NEXT: vmovaps 592(%rdi), %xmm15
; AVX1-ONLY-NEXT: vinsertf128 $1, 640(%rdi), %ymm15, %ymm15
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0],ymm15[1],ymm5[2],ymm15[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm15[0],ymm1[1],ymm15[2],ymm1[3]
; AVX1-ONLY-NEXT: vmovapd %ymm5, 192(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm4, 128(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm3, 64(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm11, (%rsi)
; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rsi)
; AVX1-ONLY-NEXT: vmovaps %ymm2, 160(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm6, 96(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm7, 32(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm12, 192(%rdx)
; AVX1-ONLY-NEXT: vmovapd %ymm10, 128(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 64(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rdx)
; AVX1-ONLY-NEXT: vmovapd %ymm1, 192(%rcx)
; AVX1-ONLY-NEXT: vmovaps %ymm14, 224(%rcx)
; AVX1-ONLY-NEXT: vmovapd %ymm9, 128(%rcx)
; AVX1-ONLY-NEXT: vmovaps %ymm13, 160(%rcx)
; AVX1-ONLY-NEXT: vmovapd %ymm8, 64(%rcx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rcx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rcx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rcx)
; AVX1-ONLY-NEXT: addq $296, %rsp # imm = 0x128
; AVX1-ONLY-NEXT: vzeroupper
; AVX1-ONLY-NEXT: retq
;
; AVX2-ONLY-LABEL: load_i64_stride3_vf32:
; AVX2-ONLY: # %bb.0:
; AVX2-ONLY-NEXT: subq $232, %rsp
; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm1
; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm2
; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 704(%rdi), %ymm13
; AVX2-ONLY-NEXT: vmovaps 672(%rdi), %ymm12
; AVX2-ONLY-NEXT: vmovaps 512(%rdi), %ymm11
; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %ymm10
; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %ymm9
; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %ymm7
; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm4
; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm5
; AVX2-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm6
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm8 = ymm5[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm4[4,5,6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm6[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm6
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm8 = ymm7[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm6[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vinsertf128 $1, 544(%rdi), %ymm0, %ymm6
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm8 = ymm10[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm6[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vinsertf128 $1, 736(%rdi), %ymm0, %ymm6
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm8 = ymm12[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm13[4,5,6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm6[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm6
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm8 = ymm2[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm6[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm14
; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %ymm15
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm6 = ymm15[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm14[4,5,6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 256(%rdi), %ymm0, %ymm8
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm8[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %ymm3
; AVX2-ONLY-NEXT: vmovaps 384(%rdi), %ymm0
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm6 = ymm0[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm3[4,5,6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 448(%rdi), %ymm0, %ymm8
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3,4,5],ymm8[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 608(%rdi), %ymm2
; AVX2-ONLY-NEXT: vmovaps 576(%rdi), %ymm1
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm6 = ymm1[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm2[4,5,6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 640(%rdi), %ymm0, %ymm8
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm8[6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 176(%rdi), %ymm5
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm5[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm9[0,1],ymm7[2,3],ymm9[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 368(%rdi), %ymm5
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm5[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm11[0,1],ymm10[2,3],ymm11[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 560(%rdi), %ymm5
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm4[0,1,2,3,4,5],ymm5[6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm13[0,1],ymm12[2,3],ymm13[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 752(%rdi), %ymm8
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm4[0,1,2,3,4,5],ymm8[6,7]
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
; AVX2-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
; AVX2-ONLY-NEXT: # ymm4 = ymm4[0,1],mem[2,3],ymm4[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm8
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm4[0,1,2,3,4,5],ymm8[6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 272(%rdi), %ymm8
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm4[0,1,2,3,4,5],ymm8[6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 464(%rdi), %ymm3
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm0[0,1,2,3,4,5],ymm3[6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 656(%rdi), %ymm1
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX2-ONLY-NEXT: vmovaps 112(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 16(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 304(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 208(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 496(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm8 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 400(%rdi), %xmm8
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm9 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 688(%rdi), %xmm9
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm10 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 592(%rdi), %xmm10
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm7 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0,1,2,3],ymm7[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps %ymm6, 192(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm6, 128(%rsi)
; AVX2-ONLY-NEXT: vmovups (%rsp), %ymm6 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm6, 64(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm6, (%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm6, 224(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm6, 160(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm6, 96(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm6, 32(%rsi)
; AVX2-ONLY-NEXT: vmovaps %ymm3, 192(%rdx)
; AVX2-ONLY-NEXT: vmovaps %ymm15, 128(%rdx)
; AVX2-ONLY-NEXT: vmovaps %ymm13, 64(%rdx)
; AVX2-ONLY-NEXT: vmovaps %ymm12, (%rdx)
; AVX2-ONLY-NEXT: vmovaps %ymm11, 224(%rdx)
; AVX2-ONLY-NEXT: vmovaps %ymm5, 160(%rdx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm3, 96(%rdx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm3, 32(%rdx)
; AVX2-ONLY-NEXT: vmovaps %ymm7, 192(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm9, 224(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm8, 128(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm0, 160(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm2, 96(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm4, (%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm14, 32(%rcx)
; AVX2-ONLY-NEXT: addq $232, %rsp
; AVX2-ONLY-NEXT: vzeroupper
; AVX2-ONLY-NEXT: retq
;
; AVX512-LABEL: load_i64_stride3_vf32:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa64 704(%rdi), %zmm4
; AVX512-NEXT: vmovdqa64 640(%rdi), %zmm0
; AVX512-NEXT: vmovdqa64 576(%rdi), %zmm5
; AVX512-NEXT: vmovdqa64 512(%rdi), %zmm6
; AVX512-NEXT: vmovdqa64 448(%rdi), %zmm2
; AVX512-NEXT: vmovdqa64 384(%rdi), %zmm7
; AVX512-NEXT: vmovdqa64 320(%rdi), %zmm8
; AVX512-NEXT: vmovdqa64 256(%rdi), %zmm1
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm9
; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm3
; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm10
; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm11
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm12 = <0,3,6,9,12,15,u,u>
; AVX512-NEXT: vmovdqa64 %zmm11, %zmm13
; AVX512-NEXT: vpermt2q %zmm1, %zmm12, %zmm13
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm14 = [0,1,2,3,4,5,10,13]
; AVX512-NEXT: vpermt2q %zmm8, %zmm14, %zmm13
; AVX512-NEXT: vmovdqa64 %zmm7, %zmm15
; AVX512-NEXT: vpermt2q %zmm2, %zmm12, %zmm15
; AVX512-NEXT: vpermt2q %zmm6, %zmm14, %zmm15
; AVX512-NEXT: vmovdqa64 %zmm5, %zmm16
; AVX512-NEXT: vpermt2q %zmm0, %zmm12, %zmm16
; AVX512-NEXT: vpermt2q %zmm4, %zmm14, %zmm16
; AVX512-NEXT: vpermi2q %zmm3, %zmm9, %zmm12
; AVX512-NEXT: vpermt2q %zmm10, %zmm14, %zmm12
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm14 = <1,4,7,10,13,u,u,u>
; AVX512-NEXT: vmovdqa64 %zmm7, %zmm17
; AVX512-NEXT: vpermt2q %zmm2, %zmm14, %zmm17
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm18 = [0,1,2,3,4,8,11,14]
; AVX512-NEXT: vpermt2q %zmm6, %zmm18, %zmm17
; AVX512-NEXT: vmovdqa64 %zmm11, %zmm19
; AVX512-NEXT: vpermt2q %zmm1, %zmm14, %zmm19
; AVX512-NEXT: vpermt2q %zmm8, %zmm18, %zmm19
; AVX512-NEXT: vmovdqa64 %zmm5, %zmm20
; AVX512-NEXT: vpermt2q %zmm0, %zmm14, %zmm20
; AVX512-NEXT: vpermt2q %zmm4, %zmm18, %zmm20
; AVX512-NEXT: vpermi2q %zmm3, %zmm9, %zmm14
; AVX512-NEXT: vpermt2q %zmm10, %zmm18, %zmm14
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm18 = <10,13,0,3,6,u,u,u>
; AVX512-NEXT: vpermt2q %zmm11, %zmm18, %zmm1
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,1,2,3,4,9,12,15]
; AVX512-NEXT: vpermt2q %zmm8, %zmm11, %zmm1
; AVX512-NEXT: vpermt2q %zmm5, %zmm18, %zmm0
; AVX512-NEXT: vpermt2q %zmm4, %zmm11, %zmm0
; AVX512-NEXT: vpermt2q %zmm7, %zmm18, %zmm2
; AVX512-NEXT: vpermt2q %zmm6, %zmm11, %zmm2
; AVX512-NEXT: vpermt2q %zmm9, %zmm18, %zmm3
; AVX512-NEXT: vpermt2q %zmm10, %zmm11, %zmm3
; AVX512-NEXT: vmovdqa64 %zmm16, 192(%rsi)
; AVX512-NEXT: vmovdqa64 %zmm15, 128(%rsi)
; AVX512-NEXT: vmovdqa64 %zmm13, 64(%rsi)
; AVX512-NEXT: vmovdqa64 %zmm12, (%rsi)
; AVX512-NEXT: vmovdqa64 %zmm20, 192(%rdx)
; AVX512-NEXT: vmovdqa64 %zmm14, (%rdx)
; AVX512-NEXT: vmovdqa64 %zmm19, 64(%rdx)
; AVX512-NEXT: vmovdqa64 %zmm17, 128(%rdx)
; AVX512-NEXT: vmovdqa64 %zmm2, 128(%rcx)
; AVX512-NEXT: vmovdqa64 %zmm0, 192(%rcx)
; AVX512-NEXT: vmovdqa64 %zmm3, (%rcx)
; AVX512-NEXT: vmovdqa64 %zmm1, 64(%rcx)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%wide.vec = load <96 x i64>, ptr %in.vec, align 64
%strided.vec0 = shufflevector <96 x i64> %wide.vec, <96 x i64> poison, <32 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45, i32 48, i32 51, i32 54, i32 57, i32 60, i32 63, i32 66, i32 69, i32 72, i32 75, i32 78, i32 81, i32 84, i32 87, i32 90, i32 93>
%strided.vec1 = shufflevector <96 x i64> %wide.vec, <96 x i64> poison, <32 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46, i32 49, i32 52, i32 55, i32 58, i32 61, i32 64, i32 67, i32 70, i32 73, i32 76, i32 79, i32 82, i32 85, i32 88, i32 91, i32 94>
%strided.vec2 = shufflevector <96 x i64> %wide.vec, <96 x i64> poison, <32 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47, i32 50, i32 53, i32 56, i32 59, i32 62, i32 65, i32 68, i32 71, i32 74, i32 77, i32 80, i32 83, i32 86, i32 89, i32 92, i32 95>
store <32 x i64> %strided.vec0, ptr %out.vec0, align 64
store <32 x i64> %strided.vec1, ptr %out.vec1, align 64
store <32 x i64> %strided.vec2, ptr %out.vec2, align 64
ret void
}
define void @load_i64_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
; SSE-LABEL: load_i64_stride3_vf64:
; SSE: # %bb.0:
; SSE-NEXT: subq $1176, %rsp # imm = 0x498
; SSE-NEXT: movapd 272(%rdi), %xmm1
; SSE-NEXT: movapd 224(%rdi), %xmm2
; SSE-NEXT: movapd 176(%rdi), %xmm3
; SSE-NEXT: movapd 128(%rdi), %xmm4
; SSE-NEXT: movapd 80(%rdi), %xmm5
; SSE-NEXT: movapd 240(%rdi), %xmm6
; SSE-NEXT: movapd 256(%rdi), %xmm11
; SSE-NEXT: movapd 192(%rdi), %xmm7
; SSE-NEXT: movapd 208(%rdi), %xmm12
; SSE-NEXT: movapd 144(%rdi), %xmm8
; SSE-NEXT: movapd 160(%rdi), %xmm13
; SSE-NEXT: movapd 96(%rdi), %xmm9
; SSE-NEXT: movapd 112(%rdi), %xmm14
; SSE-NEXT: movapd 48(%rdi), %xmm10
; SSE-NEXT: movapd 64(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm15
; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm10[0],xmm15[1]
; SSE-NEXT: movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm5[0]
; SSE-NEXT: movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm0[0],xmm5[1]
; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd %xmm14, %xmm0
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm9[0],xmm0[1]
; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufpd {{.*#+}} xmm9 = xmm9[1],xmm4[0]
; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm14[0],xmm4[1]
; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd %xmm13, %xmm0
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm8[0],xmm0[1]
; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm3[0]
; SSE-NEXT: movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm13[0],xmm3[1]
; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd %xmm12, %xmm0
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufpd {{.*#+}} xmm7 = xmm7[1],xmm2[0]
; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm12[0],xmm2[1]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd %xmm11, %xmm0
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm6[0],xmm0[1]
; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufpd {{.*#+}} xmm6 = xmm6[1],xmm1[0]
; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm11[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 288(%rdi), %xmm2
; SSE-NEXT: movapd 304(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 320(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 336(%rdi), %xmm2
; SSE-NEXT: movapd 352(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 368(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 384(%rdi), %xmm2
; SSE-NEXT: movapd 400(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 416(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 432(%rdi), %xmm2
; SSE-NEXT: movapd 448(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 464(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 480(%rdi), %xmm2
; SSE-NEXT: movapd 496(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 512(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 528(%rdi), %xmm2
; SSE-NEXT: movapd 544(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 560(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 576(%rdi), %xmm2
; SSE-NEXT: movapd 592(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 608(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 624(%rdi), %xmm2
; SSE-NEXT: movapd 640(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 656(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 672(%rdi), %xmm2
; SSE-NEXT: movapd 688(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 704(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 720(%rdi), %xmm2
; SSE-NEXT: movapd 736(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 752(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 768(%rdi), %xmm2
; SSE-NEXT: movapd 784(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 800(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 816(%rdi), %xmm2
; SSE-NEXT: movapd 832(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 848(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 864(%rdi), %xmm2
; SSE-NEXT: movapd 880(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 896(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 912(%rdi), %xmm2
; SSE-NEXT: movapd 928(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 944(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 960(%rdi), %xmm2
; SSE-NEXT: movapd 976(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 992(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 1008(%rdi), %xmm2
; SSE-NEXT: movapd 1024(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 1040(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 1056(%rdi), %xmm2
; SSE-NEXT: movapd 1072(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 1088(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 1104(%rdi), %xmm2
; SSE-NEXT: movapd 1120(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 1136(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 1152(%rdi), %xmm2
; SSE-NEXT: movapd 1168(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 1184(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 1200(%rdi), %xmm2
; SSE-NEXT: movapd 1216(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 1232(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 1248(%rdi), %xmm2
; SSE-NEXT: movapd 1264(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm15
; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm2[0],xmm15[1]
; SSE-NEXT: movapd 1280(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 1296(%rdi), %xmm14
; SSE-NEXT: movapd 1312(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm11
; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm14[0],xmm11[1]
; SSE-NEXT: movapd 1328(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm14 = xmm14[1],xmm1[0]
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 1344(%rdi), %xmm12
; SSE-NEXT: movapd 1360(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm8
; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm12[0],xmm8[1]
; SSE-NEXT: movapd 1376(%rdi), %xmm1
; SSE-NEXT: shufpd {{.*#+}} xmm12 = xmm12[1],xmm1[0]
; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT: movapd %xmm1, (%rsp) # 16-byte Spill
; SSE-NEXT: movapd 1392(%rdi), %xmm9
; SSE-NEXT: movapd 1408(%rdi), %xmm2
; SSE-NEXT: movapd %xmm2, %xmm3
; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm9[0],xmm3[1]
; SSE-NEXT: movapd 1424(%rdi), %xmm0
; SSE-NEXT: shufpd {{.*#+}} xmm9 = xmm9[1],xmm0[0]
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 1440(%rdi), %xmm4
; SSE-NEXT: movapd 1456(%rdi), %xmm7
; SSE-NEXT: movapd %xmm7, %xmm2
; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm4[0],xmm2[1]
; SSE-NEXT: movapd 1472(%rdi), %xmm0
; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm0[0]
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm7[0],xmm0[1]
; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movapd 1488(%rdi), %xmm1
; SSE-NEXT: movapd 1504(%rdi), %xmm0
; SSE-NEXT: movapd %xmm0, %xmm6
; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm1[0],xmm6[1]
; SSE-NEXT: movapd 1520(%rdi), %xmm13
; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm13[0]
; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm0[0],xmm13[1]
; SSE-NEXT: movapd (%rdi), %xmm10
; SSE-NEXT: movapd 16(%rdi), %xmm5
; SSE-NEXT: movapd %xmm5, %xmm7
; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm10[0],xmm7[1]
; SSE-NEXT: movapd 32(%rdi), %xmm0
; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm0[0]
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1]
; SSE-NEXT: movapd %xmm0, %xmm5
; SSE-NEXT: movapd %xmm6, 496(%rsi)
; SSE-NEXT: movapd %xmm2, 480(%rsi)
; SSE-NEXT: movapd %xmm3, 464(%rsi)
; SSE-NEXT: movapd %xmm8, 448(%rsi)
; SSE-NEXT: movapd %xmm11, 432(%rsi)
; SSE-NEXT: movapd %xmm15, 416(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 400(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 384(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 368(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 352(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 336(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 320(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 304(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 288(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 272(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 256(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 240(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 224(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 208(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 192(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 176(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 160(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 144(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 128(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 112(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 96(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 80(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 64(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 48(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 32(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 16(%rsi)
; SSE-NEXT: movapd %xmm7, (%rsi)
; SSE-NEXT: movapd %xmm1, 496(%rdx)
; SSE-NEXT: movapd %xmm4, 480(%rdx)
; SSE-NEXT: movapd %xmm9, 464(%rdx)
; SSE-NEXT: movapd %xmm12, 448(%rdx)
; SSE-NEXT: movapd %xmm14, 432(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 416(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 400(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 384(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 368(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 352(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 336(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 320(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 304(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 288(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 272(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 256(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 240(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 224(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 208(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 192(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 176(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 160(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 144(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 128(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 112(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 96(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 80(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 64(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 48(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 32(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 16(%rdx)
; SSE-NEXT: movapd %xmm10, (%rdx)
; SSE-NEXT: movapd %xmm13, 496(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 480(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 464(%rcx)
; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 448(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 432(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 416(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 400(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 384(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 368(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 352(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 336(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 320(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 304(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 288(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 272(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 256(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 240(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 224(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 208(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 192(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 176(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 160(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 144(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 128(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 112(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 96(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 80(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 64(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 48(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 32(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 16(%rcx)
; SSE-NEXT: movapd %xmm5, (%rcx)
; SSE-NEXT: addq $1176, %rsp # imm = 0x498
; SSE-NEXT: retq
;
; AVX1-ONLY-LABEL: load_i64_stride3_vf64:
; AVX1-ONLY: # %bb.0:
; AVX1-ONLY-NEXT: subq $1128, %rsp # imm = 0x468
; AVX1-ONLY-NEXT: vmovapd 896(%rdi), %ymm0
; AVX1-ONLY-NEXT: vmovapd 704(%rdi), %ymm1
; AVX1-ONLY-NEXT: vmovapd 512(%rdi), %ymm2
; AVX1-ONLY-NEXT: vmovapd 320(%rdi), %ymm3
; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm4
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm5 = mem[0,1],ymm4[2,3]
; AVX1-ONLY-NEXT: vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = mem[0,1],ymm3[2,3]
; AVX1-ONLY-NEXT: vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = mem[0,1],ymm2[2,3]
; AVX1-ONLY-NEXT: vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm8 = mem[0,1],ymm1[2,3]
; AVX1-ONLY-NEXT: vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovapd %ymm1, %ymm9
; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm10 = mem[0,1],ymm0[2,3]
; AVX1-ONLY-NEXT: vmovupd %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovapd %ymm0, %ymm11
; AVX1-ONLY-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm5[1],ymm4[0],ymm5[3],ymm4[2]
; AVX1-ONLY-NEXT: vbroadcastsd 176(%rdi), %ymm1
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm6[1],ymm3[0],ymm6[3],ymm3[2]
; AVX1-ONLY-NEXT: vbroadcastsd 368(%rdi), %ymm1
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm7[1],ymm2[0],ymm7[3],ymm2[2]
; AVX1-ONLY-NEXT: vbroadcastsd 560(%rdi), %ymm1
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm8[1],ymm9[0],ymm8[3],ymm9[2]
; AVX1-ONLY-NEXT: vbroadcastsd 752(%rdi), %ymm1
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm10[1],ymm11[0],ymm10[3],ymm11[2]
; AVX1-ONLY-NEXT: vbroadcastsd 944(%rdi), %ymm1
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovapd 1088(%rdi), %ymm11
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm11[2,3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm11[0],ymm0[3],ymm11[2]
; AVX1-ONLY-NEXT: vbroadcastsd 1136(%rdi), %ymm1
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovapd 1280(%rdi), %ymm8
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm8[2,3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm8[0],ymm0[3],ymm8[2]
; AVX1-ONLY-NEXT: vbroadcastsd 1328(%rdi), %ymm1
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovapd 1472(%rdi), %ymm5
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm5[2,3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm5[0],ymm0[3],ymm5[2]
; AVX1-ONLY-NEXT: vbroadcastsd 1520(%rdi), %ymm1
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovapd 32(%rdi), %ymm0
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = mem[0,1],ymm0[2,3]
; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[1],ymm0[0],ymm1[3],ymm0[2]
; AVX1-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm1
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovapd 224(%rdi), %ymm10
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm10[2,3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm10[0],ymm0[3],ymm10[2]
; AVX1-ONLY-NEXT: vbroadcastsd 272(%rdi), %ymm1
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovapd 416(%rdi), %ymm9
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm9[2,3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm9[0],ymm0[3],ymm9[2]
; AVX1-ONLY-NEXT: vbroadcastsd 464(%rdi), %ymm1
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovapd 608(%rdi), %ymm7
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm7[2,3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm7[0],ymm0[3],ymm7[2]
; AVX1-ONLY-NEXT: vbroadcastsd 656(%rdi), %ymm1
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovapd 800(%rdi), %ymm4
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm12 = mem[0,1],ymm4[2,3]
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm12[1],ymm4[0],ymm12[3],ymm4[2]
; AVX1-ONLY-NEXT: vbroadcastsd 848(%rdi), %ymm6
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm6[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovapd 992(%rdi), %ymm3
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = mem[0,1],ymm3[2,3]
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm6[1],ymm3[0],ymm6[3],ymm3[2]
; AVX1-ONLY-NEXT: vbroadcastsd 1040(%rdi), %ymm15
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovapd 1184(%rdi), %ymm2
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm15 = mem[0,1],ymm2[2,3]
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm15[1],ymm2[0],ymm15[3],ymm2[2]
; AVX1-ONLY-NEXT: vbroadcastsd 1232(%rdi), %ymm14
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm14[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovapd 1376(%rdi), %ymm0
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm14 = mem[0,1],ymm0[2,3]
; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm14[1],ymm0[0],ymm14[3],ymm0[2]
; AVX1-ONLY-NEXT: vbroadcastsd 1424(%rdi), %ymm13
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm13[3]
; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm1
; AVX1-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm1, %ymm1
; AVX1-ONLY-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm13 # 32-byte Folded Reload
; AVX1-ONLY-NEXT: # ymm13 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
; AVX1-ONLY-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],mem[4,5,6,7]
; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm13[2,3],ymm1[4,5],ymm13[6,7]
; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovaps 304(%rdi), %xmm1
; AVX1-ONLY-NEXT: vinsertf128 $1, 352(%rdi), %ymm1, %ymm1
; AVX1-ONLY-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm13 # 32-byte Folded Reload
; AVX1-ONLY-NEXT: # ymm13 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
; AVX1-ONLY-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],mem[4,5,6,7]
; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm13[2,3],ymm1[4,5],ymm13[6,7]
; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovaps 496(%rdi), %xmm1
; AVX1-ONLY-NEXT: vinsertf128 $1, 544(%rdi), %ymm1, %ymm1
; AVX1-ONLY-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm13 # 32-byte Folded Reload
; AVX1-ONLY-NEXT: # ymm13 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
; AVX1-ONLY-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],mem[4,5,6,7]
; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm13[2,3],ymm1[4,5],ymm13[6,7]
; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovaps 688(%rdi), %xmm1
; AVX1-ONLY-NEXT: vinsertf128 $1, 736(%rdi), %ymm1, %ymm1
; AVX1-ONLY-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm13 # 32-byte Folded Reload
; AVX1-ONLY-NEXT: # ymm13 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
; AVX1-ONLY-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],mem[4,5,6,7]
; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm13[2,3],ymm1[4,5],ymm13[6,7]
; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovaps 880(%rdi), %xmm1
; AVX1-ONLY-NEXT: vinsertf128 $1, 928(%rdi), %ymm1, %ymm1
; AVX1-ONLY-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm13 # 32-byte Folded Reload
; AVX1-ONLY-NEXT: # ymm13 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
; AVX1-ONLY-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm13 # 32-byte Reload
; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],mem[4,5,6,7]
; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm13[2,3],ymm1[4,5],ymm13[6,7]
; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovaps 1072(%rdi), %xmm1
; AVX1-ONLY-NEXT: vinsertf128 $1, 1120(%rdi), %ymm1, %ymm1
; AVX1-ONLY-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm13 # 32-byte Folded Reload
; AVX1-ONLY-NEXT: # ymm13 = mem[0],ymm1[1],mem[2],ymm1[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm11 = ymm11[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm11[1],ymm1[2],ymm11[3]
; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovaps 1264(%rdi), %xmm1
; AVX1-ONLY-NEXT: vinsertf128 $1, 1312(%rdi), %ymm1, %ymm1
; AVX1-ONLY-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm11 # 32-byte Folded Reload
; AVX1-ONLY-NEXT: # ymm11 = mem[0],ymm1[1],mem[2],ymm1[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm8 = ymm8[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2],ymm8[3]
; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovaps 1456(%rdi), %xmm1
; AVX1-ONLY-NEXT: vinsertf128 $1, 1504(%rdi), %ymm1, %ymm1
; AVX1-ONLY-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm8 # 32-byte Folded Reload
; AVX1-ONLY-NEXT: # ymm8 = mem[0],ymm1[1],mem[2],ymm1[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[2],ymm5[3]
; AVX1-ONLY-NEXT: vmovupd %ymm1, (%rsp) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovaps 1360(%rdi), %xmm1
; AVX1-ONLY-NEXT: vinsertf128 $1, 1408(%rdi), %ymm1, %ymm1
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm5 = ymm14[0],ymm1[1],ymm14[2],ymm1[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovaps 1168(%rdi), %xmm0
; AVX1-ONLY-NEXT: vinsertf128 $1, 1216(%rdi), %ymm0, %ymm0
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm15 = ymm15[0],ymm0[1],ymm15[2],ymm0[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovaps 976(%rdi), %xmm0
; AVX1-ONLY-NEXT: vinsertf128 $1, 1024(%rdi), %ymm0, %ymm0
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0],ymm0[1],ymm6[2],ymm0[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm3[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovaps 784(%rdi), %xmm0
; AVX1-ONLY-NEXT: vinsertf128 $1, 832(%rdi), %ymm0, %ymm0
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm12 = ymm12[0],ymm0[1],ymm12[2],ymm0[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm4[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-ONLY-NEXT: vmovaps 592(%rdi), %xmm1
; AVX1-ONLY-NEXT: vinsertf128 $1, 640(%rdi), %ymm1, %ymm1
; AVX1-ONLY-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
; AVX1-ONLY-NEXT: # ymm4 = mem[0],ymm1[1],mem[2],ymm1[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm1[0],ymm7[1],ymm1[2],ymm7[3]
; AVX1-ONLY-NEXT: vmovaps 400(%rdi), %xmm7
; AVX1-ONLY-NEXT: vinsertf128 $1, 448(%rdi), %ymm7, %ymm7
; AVX1-ONLY-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm2 # 32-byte Folded Reload
; AVX1-ONLY-NEXT: # ymm2 = mem[0],ymm7[1],mem[2],ymm7[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0],ymm9[1],ymm7[2],ymm9[3]
; AVX1-ONLY-NEXT: vmovaps 208(%rdi), %xmm9
; AVX1-ONLY-NEXT: vinsertf128 $1, 256(%rdi), %ymm9, %ymm9
; AVX1-ONLY-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm1 # 32-byte Folded Reload
; AVX1-ONLY-NEXT: # ymm1 = mem[0],ymm9[1],mem[2],ymm9[3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm10 = ymm10[0,1],mem[2,3]
; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2],ymm10[3]
; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm10
; AVX1-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm10, %ymm10
; AVX1-ONLY-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm0 # 32-byte Folded Reload
; AVX1-ONLY-NEXT: # ymm0 = mem[0,1],ymm10[2,3],mem[4,5],ymm10[6,7]
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],mem[4,5,6,7]
; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm14[2,3],ymm10[4,5],ymm14[6,7]
; AVX1-ONLY-NEXT: vmovapd %ymm5, 448(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm15, 384(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm6, 320(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm12, 256(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm4, 192(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm2, 128(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm1, 64(%rsi)
; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm8, 480(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm11, 416(%rsi)
; AVX1-ONLY-NEXT: vmovapd %ymm13, 352(%rsi)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 288(%rsi)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rsi)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%rsi)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rsi)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rsi)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 448(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 384(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 320(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 256(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 192(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 128(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 64(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 480(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 416(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 352(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 288(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rdx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rdx)
; AVX1-ONLY-NEXT: vmovaps %ymm10, (%rcx)
; AVX1-ONLY-NEXT: vmovapd %ymm9, 64(%rcx)
; AVX1-ONLY-NEXT: vmovapd %ymm7, 128(%rcx)
; AVX1-ONLY-NEXT: vmovapd %ymm3, 192(%rcx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 256(%rcx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 320(%rcx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 384(%rcx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 448(%rcx)
; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 480(%rcx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 416(%rcx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 352(%rcx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 288(%rcx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rcx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%rcx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rcx)
; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rcx)
; AVX1-ONLY-NEXT: addq $1128, %rsp # imm = 0x468
; AVX1-ONLY-NEXT: vzeroupper
; AVX1-ONLY-NEXT: retq
;
; AVX2-ONLY-LABEL: load_i64_stride3_vf64:
; AVX2-ONLY: # %bb.0:
; AVX2-ONLY-NEXT: subq $968, %rsp # imm = 0x3C8
; AVX2-ONLY-NEXT: vmovaps 896(%rdi), %ymm2
; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 864(%rdi), %ymm3
; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 704(%rdi), %ymm4
; AVX2-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 672(%rdi), %ymm5
; AVX2-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 512(%rdi), %ymm6
; AVX2-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %ymm7
; AVX2-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %ymm8
; AVX2-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %ymm9
; AVX2-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm10
; AVX2-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm1
; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm0
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = ymm9[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vinsertf128 $1, 544(%rdi), %ymm0, %ymm0
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = ymm7[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vinsertf128 $1, 736(%rdi), %ymm0, %ymm0
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = ymm5[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vinsertf128 $1, 928(%rdi), %ymm0, %ymm0
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = ymm3[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 1088(%rdi), %ymm1
; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 1056(%rdi), %ymm10
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm10[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 1120(%rdi), %ymm0, %ymm1
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 1280(%rdi), %ymm1
; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 1248(%rdi), %ymm9
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm9[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 1312(%rdi), %ymm0, %ymm1
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 1472(%rdi), %ymm1
; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 1440(%rdi), %ymm8
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm8[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 1504(%rdi), %ymm0, %ymm1
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm1
; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm7
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm7[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm1
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm1
; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %ymm6
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm6[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 256(%rdi), %ymm0, %ymm1
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %ymm14
; AVX2-ONLY-NEXT: vmovaps 384(%rdi), %ymm5
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm5[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 448(%rdi), %ymm0, %ymm1
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 608(%rdi), %ymm1
; AVX2-ONLY-NEXT: vmovups %ymm1, (%rsp) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 576(%rdi), %ymm4
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm4[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 640(%rdi), %ymm0, %ymm1
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 800(%rdi), %ymm1
; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 768(%rdi), %ymm3
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm3[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 832(%rdi), %ymm0, %ymm1
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 992(%rdi), %ymm15
; AVX2-ONLY-NEXT: vmovaps 960(%rdi), %ymm2
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm2[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 1024(%rdi), %ymm0, %ymm1
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 1184(%rdi), %ymm13
; AVX2-ONLY-NEXT: vmovaps 1152(%rdi), %ymm1
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm1[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 1216(%rdi), %ymm0, %ymm11
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm11[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 1376(%rdi), %ymm12
; AVX2-ONLY-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 1344(%rdi), %ymm0
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm11 = ymm0[0,3,2,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7]
; AVX2-ONLY-NEXT: vinsertf128 $1, 1408(%rdi), %ymm0, %ymm12
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm12[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
; AVX2-ONLY-NEXT: # ymm11 = ymm11[0,1],mem[2,3],ymm11[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm11[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 176(%rdi), %ymm12
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm12[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
; AVX2-ONLY-NEXT: # ymm11 = ymm11[0,1],mem[2,3],ymm11[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm11[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 368(%rdi), %ymm12
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm12[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
; AVX2-ONLY-NEXT: # ymm11 = ymm11[0,1],mem[2,3],ymm11[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm11[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 560(%rdi), %ymm12
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm12[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
; AVX2-ONLY-NEXT: # ymm11 = ymm11[0,1],mem[2,3],ymm11[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm11[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 752(%rdi), %ymm12
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm12[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
; AVX2-ONLY-NEXT: # ymm11 = ymm11[0,1],mem[2,3],ymm11[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm11[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 944(%rdi), %ymm12
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm12[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
; AVX2-ONLY-NEXT: # ymm10 = mem[0,1],ymm10[2,3],mem[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 1136(%rdi), %ymm11
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm11[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
; AVX2-ONLY-NEXT: # ymm9 = mem[0,1],ymm9[2,3],mem[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm9[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 1328(%rdi), %ymm10
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],ymm10[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
; AVX2-ONLY-NEXT: # ymm8 = mem[0,1],ymm8[2,3],mem[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 1520(%rdi), %ymm9
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
; AVX2-ONLY-NEXT: # ymm7 = mem[0,1],ymm7[2,3],mem[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm8
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
; AVX2-ONLY-NEXT: # ymm6 = mem[0,1],ymm6[2,3],mem[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 272(%rdi), %ymm7
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm14[0,1],ymm5[2,3],ymm14[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 464(%rdi), %ymm6
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm6[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vblendps $243, (%rsp), %ymm4, %ymm4 # 32-byte Folded Reload
; AVX2-ONLY-NEXT: # ymm4 = mem[0,1],ymm4[2,3],mem[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 656(%rdi), %ymm5
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm5[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm4, (%rsp) # 32-byte Spill
; AVX2-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
; AVX2-ONLY-NEXT: # ymm3 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 848(%rdi), %ymm4
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm4[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm15[0,1],ymm2[2,3],ymm15[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 1040(%rdi), %ymm3
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm13[0,1],ymm1[2,3],ymm13[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 1232(%rdi), %ymm2
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm1[0,1,2,3,4,5],ymm2[6,7]
; AVX2-ONLY-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-ONLY-NEXT: # ymm0 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
; AVX2-ONLY-NEXT: vbroadcastsd 1424(%rdi), %ymm1
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX2-ONLY-NEXT: vmovaps 112(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 304(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 496(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-ONLY-NEXT: vmovaps 688(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 880(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 1072(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 1264(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 1456(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 1360(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 1168(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 976(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 784(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 592(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 400(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 208(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-ONLY-NEXT: vmovaps 16(%rdi), %xmm0
; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm15 = mem[0,1,0,3]
; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm15, 448(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm15, 384(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm15, 320(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm15, 256(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm15, 192(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm15, 128(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm15, 64(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm15, (%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm15, 480(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm15, 416(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm15, 352(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm15, 288(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm15, 224(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm15, 160(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm15, 96(%rsi)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm15, 32(%rsi)
; AVX2-ONLY-NEXT: vmovaps %ymm11, 448(%rdx)
; AVX2-ONLY-NEXT: vmovaps %ymm13, 384(%rdx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm11, 320(%rdx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm11, 256(%rdx)
; AVX2-ONLY-NEXT: vmovups (%rsp), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm11, 192(%rdx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm11, 128(%rdx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm11, 64(%rdx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm11, (%rdx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm11, 480(%rdx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm11, 416(%rdx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm11, 352(%rdx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm11, 288(%rdx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm11, 224(%rdx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm11, 160(%rdx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm11, 96(%rdx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm11, 32(%rdx)
; AVX2-ONLY-NEXT: vmovaps %ymm0, (%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm2, 128(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm3, 192(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm4, 256(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm5, 320(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm6, 384(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm7, 448(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm8, 480(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm9, 416(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm10, 352(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm12, 288(%rcx)
; AVX2-ONLY-NEXT: vmovaps %ymm14, 224(%rcx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm0, 160(%rcx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm0, 96(%rcx)
; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-ONLY-NEXT: vmovaps %ymm0, 32(%rcx)
; AVX2-ONLY-NEXT: addq $968, %rsp # imm = 0x3C8
; AVX2-ONLY-NEXT: vzeroupper
; AVX2-ONLY-NEXT: retq
;
; AVX512-LABEL: load_i64_stride3_vf64:
; AVX512: # %bb.0:
; AVX512-NEXT: subq $200, %rsp
; AVX512-NEXT: vmovaps 1472(%rdi), %zmm0
; AVX512-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqa64 1408(%rdi), %zmm14
; AVX512-NEXT: vmovdqa64 1344(%rdi), %zmm16
; AVX512-NEXT: vmovaps 1280(%rdi), %zmm0
; AVX512-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqa64 1216(%rdi), %zmm0
; AVX512-NEXT: vmovdqa64 1152(%rdi), %zmm15
; AVX512-NEXT: vmovaps 1088(%rdi), %zmm1
; AVX512-NEXT: vmovups %zmm1, (%rsp) # 64-byte Spill
; AVX512-NEXT: vmovdqa64 1024(%rdi), %zmm18
; AVX512-NEXT: vmovdqa64 960(%rdi), %zmm22
; AVX512-NEXT: vmovaps 896(%rdi), %zmm1
; AVX512-NEXT: vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqa64 832(%rdi), %zmm17
; AVX512-NEXT: vmovdqa64 768(%rdi), %zmm21
; AVX512-NEXT: vmovdqa64 704(%rdi), %zmm4
; AVX512-NEXT: vmovdqa64 640(%rdi), %zmm12
; AVX512-NEXT: vmovdqa64 576(%rdi), %zmm28
; AVX512-NEXT: vmovdqa64 512(%rdi), %zmm5
; AVX512-NEXT: vmovdqa64 448(%rdi), %zmm23
; AVX512-NEXT: vmovdqa64 384(%rdi), %zmm27
; AVX512-NEXT: vmovdqa64 256(%rdi), %zmm20
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm19
; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm30
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm13 = <0,3,6,9,12,15,u,u>
; AVX512-NEXT: vmovdqa64 %zmm30, %zmm6
; AVX512-NEXT: vpermt2q %zmm20, %zmm13, %zmm6
; AVX512-NEXT: vmovdqa64 %zmm27, %zmm3
; AVX512-NEXT: vpermt2q %zmm23, %zmm13, %zmm3
; AVX512-NEXT: vmovdqa64 %zmm28, %zmm2
; AVX512-NEXT: vpermt2q %zmm12, %zmm13, %zmm2
; AVX512-NEXT: vmovdqa64 %zmm21, %zmm9
; AVX512-NEXT: vpermt2q %zmm17, %zmm13, %zmm9
; AVX512-NEXT: vmovdqa64 %zmm22, %zmm8
; AVX512-NEXT: vpermt2q %zmm18, %zmm13, %zmm8
; AVX512-NEXT: vmovdqa64 %zmm15, %zmm24
; AVX512-NEXT: vpermt2q %zmm0, %zmm13, %zmm24
; AVX512-NEXT: vmovdqa64 %zmm16, %zmm25
; AVX512-NEXT: vpermt2q %zmm14, %zmm13, %zmm25
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm26 = <1,4,7,10,13,u,u,u>
; AVX512-NEXT: vmovdqa64 %zmm30, %zmm29
; AVX512-NEXT: vpermt2q %zmm20, %zmm26, %zmm29
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm31 = <10,13,0,3,6,u,u,u>
; AVX512-NEXT: vpermt2q %zmm30, %zmm31, %zmm20
; AVX512-NEXT: vmovdqa64 %zmm28, %zmm30
; AVX512-NEXT: vpermt2q %zmm12, %zmm26, %zmm30
; AVX512-NEXT: vpermt2q %zmm28, %zmm31, %zmm12
; AVX512-NEXT: vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT: vmovdqa64 %zmm27, %zmm28
; AVX512-NEXT: vpermt2q %zmm23, %zmm26, %zmm28
; AVX512-NEXT: vpermt2q %zmm27, %zmm31, %zmm23
; AVX512-NEXT: vmovdqa64 %zmm22, %zmm27
; AVX512-NEXT: vpermt2q %zmm18, %zmm26, %zmm27
; AVX512-NEXT: vpermt2q %zmm22, %zmm31, %zmm18
; AVX512-NEXT: vmovdqa64 %zmm21, %zmm22
; AVX512-NEXT: vpermt2q %zmm17, %zmm26, %zmm22
; AVX512-NEXT: vpermt2q %zmm21, %zmm31, %zmm17
; AVX512-NEXT: vmovdqa64 %zmm16, %zmm21
; AVX512-NEXT: vpermt2q %zmm14, %zmm26, %zmm21
; AVX512-NEXT: vpermt2q %zmm16, %zmm31, %zmm14
; AVX512-NEXT: vmovdqa64 %zmm15, %zmm16
; AVX512-NEXT: vmovdqa64 %zmm0, %zmm12
; AVX512-NEXT: vpermt2q %zmm0, %zmm26, %zmm16
; AVX512-NEXT: vpermt2q %zmm15, %zmm31, %zmm12
; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm15
; AVX512-NEXT: vpermi2q %zmm15, %zmm19, %zmm13
; AVX512-NEXT: vpermi2q %zmm15, %zmm19, %zmm26
; AVX512-NEXT: vpermt2q %zmm19, %zmm31, %zmm15
; AVX512-NEXT: vmovdqa64 320(%rdi), %zmm19
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm31 = [0,1,2,3,4,5,10,13]
; AVX512-NEXT: vmovdqa64 %zmm6, %zmm7
; AVX512-NEXT: vpermt2q %zmm19, %zmm31, %zmm7
; AVX512-NEXT: vmovdqa64 %zmm3, %zmm11
; AVX512-NEXT: vpermt2q %zmm5, %zmm31, %zmm11
; AVX512-NEXT: vmovdqa64 %zmm2, %zmm10
; AVX512-NEXT: vpermt2q %zmm4, %zmm31, %zmm10
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512-NEXT: vpermt2q %zmm3, %zmm31, %zmm9
; AVX512-NEXT: vmovdqu64 (%rsp), %zmm2 # 64-byte Reload
; AVX512-NEXT: vpermt2q %zmm2, %zmm31, %zmm8
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-NEXT: vpermt2q %zmm1, %zmm31, %zmm24
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT: vpermt2q %zmm0, %zmm31, %zmm25
; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm6
; AVX512-NEXT: vpermt2q %zmm6, %zmm31, %zmm13
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm31 = [0,1,2,3,4,8,11,14]
; AVX512-NEXT: vpermt2q %zmm1, %zmm31, %zmm16
; AVX512-NEXT: vpermt2q %zmm19, %zmm31, %zmm29
; AVX512-NEXT: vpermt2q %zmm4, %zmm31, %zmm30
; AVX512-NEXT: vpermt2q %zmm5, %zmm31, %zmm28
; AVX512-NEXT: vpermt2q %zmm2, %zmm31, %zmm27
; AVX512-NEXT: vpermt2q %zmm3, %zmm31, %zmm22
; AVX512-NEXT: vpermt2q %zmm0, %zmm31, %zmm21
; AVX512-NEXT: vpermt2q %zmm6, %zmm31, %zmm26
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm31 = [0,1,2,3,4,9,12,15]
; AVX512-NEXT: vpermt2q %zmm19, %zmm31, %zmm20
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
; AVX512-NEXT: vpermt2q %zmm4, %zmm31, %zmm19
; AVX512-NEXT: vpermt2q %zmm5, %zmm31, %zmm23
; AVX512-NEXT: vpermt2q %zmm2, %zmm31, %zmm18
; AVX512-NEXT: vpermt2q %zmm3, %zmm31, %zmm17
; AVX512-NEXT: vpermt2q %zmm0, %zmm31, %zmm14
; AVX512-NEXT: vpermt2q %zmm1, %zmm31, %zmm12
; AVX512-NEXT: vpermt2q %zmm6, %zmm31, %zmm15
; AVX512-NEXT: vmovdqa64 %zmm25, 448(%rsi)
; AVX512-NEXT: vmovdqa64 %zmm24, 384(%rsi)
; AVX512-NEXT: vmovdqa64 %zmm8, 320(%rsi)
; AVX512-NEXT: vmovdqa64 %zmm9, 256(%rsi)
; AVX512-NEXT: vmovdqa64 %zmm10, 192(%rsi)
; AVX512-NEXT: vmovdqa64 %zmm11, 128(%rsi)
; AVX512-NEXT: vmovdqa64 %zmm7, 64(%rsi)
; AVX512-NEXT: vmovdqa64 %zmm13, (%rsi)
; AVX512-NEXT: vmovdqa64 %zmm21, 448(%rdx)
; AVX512-NEXT: vmovdqa64 %zmm22, 256(%rdx)
; AVX512-NEXT: vmovdqa64 %zmm27, 320(%rdx)
; AVX512-NEXT: vmovdqa64 %zmm28, 128(%rdx)
; AVX512-NEXT: vmovdqa64 %zmm30, 192(%rdx)
; AVX512-NEXT: vmovdqa64 %zmm26, (%rdx)
; AVX512-NEXT: vmovdqa64 %zmm29, 64(%rdx)
; AVX512-NEXT: vmovdqa64 %zmm16, 384(%rdx)
; AVX512-NEXT: vmovdqa64 %zmm12, 384(%rcx)
; AVX512-NEXT: vmovdqa64 %zmm14, 448(%rcx)
; AVX512-NEXT: vmovdqa64 %zmm17, 256(%rcx)
; AVX512-NEXT: vmovdqa64 %zmm18, 320(%rcx)
; AVX512-NEXT: vmovdqa64 %zmm23, 128(%rcx)
; AVX512-NEXT: vmovdqa64 %zmm19, 192(%rcx)
; AVX512-NEXT: vmovdqa64 %zmm15, (%rcx)
; AVX512-NEXT: vmovdqa64 %zmm20, 64(%rcx)
; AVX512-NEXT: addq $200, %rsp
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%wide.vec = load <192 x i64>, ptr %in.vec, align 64
%strided.vec0 = shufflevector <192 x i64> %wide.vec, <192 x i64> poison, <64 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45, i32 48, i32 51, i32 54, i32 57, i32 60, i32 63, i32 66, i32 69, i32 72, i32 75, i32 78, i32 81, i32 84, i32 87, i32 90, i32 93, i32 96, i32 99, i32 102, i32 105, i32 108, i32 111, i32 114, i32 117, i32 120, i32 123, i32 126, i32 129, i32 132, i32 135, i32 138, i32 141, i32 144, i32 147, i32 150, i32 153, i32 156, i32 159, i32 162, i32 165, i32 168, i32 171, i32 174, i32 177, i32 180, i32 183, i32 186, i32 189>
%strided.vec1 = shufflevector <192 x i64> %wide.vec, <192 x i64> poison, <64 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46, i32 49, i32 52, i32 55, i32 58, i32 61, i32 64, i32 67, i32 70, i32 73, i32 76, i32 79, i32 82, i32 85, i32 88, i32 91, i32 94, i32 97, i32 100, i32 103, i32 106, i32 109, i32 112, i32 115, i32 118, i32 121, i32 124, i32 127, i32 130, i32 133, i32 136, i32 139, i32 142, i32 145, i32 148, i32 151, i32 154, i32 157, i32 160, i32 163, i32 166, i32 169, i32 172, i32 175, i32 178, i32 181, i32 184, i32 187, i32 190>
%strided.vec2 = shufflevector <192 x i64> %wide.vec, <192 x i64> poison, <64 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47, i32 50, i32 53, i32 56, i32 59, i32 62, i32 65, i32 68, i32 71, i32 74, i32 77, i32 80, i32 83, i32 86, i32 89, i32 92, i32 95, i32 98, i32 101, i32 104, i32 107, i32 110, i32 113, i32 116, i32 119, i32 122, i32 125, i32 128, i32 131, i32 134, i32 137, i32 140, i32 143, i32 146, i32 149, i32 152, i32 155, i32 158, i32 161, i32 164, i32 167, i32 170, i32 173, i32 176, i32 179, i32 182, i32 185, i32 188, i32 191>
store <64 x i64> %strided.vec0, ptr %out.vec0, align 64
store <64 x i64> %strided.vec1, ptr %out.vec1, align 64
store <64 x i64> %strided.vec2, ptr %out.vec2, align 64
ret void
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; AVX: {{.*}}
; AVX1: {{.*}}
; AVX2: {{.*}}
; AVX2-FAST: {{.*}}
; AVX2-FAST-PERLANE: {{.*}}
; AVX2-SLOW: {{.*}}
; AVX512BW: {{.*}}
; AVX512BW-ONLY-FAST: {{.*}}
; AVX512BW-ONLY-SLOW: {{.*}}
; AVX512DQ-FAST: {{.*}}
; AVX512DQ-SLOW: {{.*}}
; AVX512DQBW-FAST: {{.*}}
; AVX512DQBW-SLOW: {{.*}}
; AVX512F: {{.*}}
; AVX512F-ONLY-FAST: {{.*}}
; AVX512F-ONLY-SLOW: {{.*}}
; FALLBACK0: {{.*}}
; FALLBACK1: {{.*}}
; FALLBACK10: {{.*}}
; FALLBACK11: {{.*}}
; FALLBACK12: {{.*}}
; FALLBACK2: {{.*}}
; FALLBACK3: {{.*}}
; FALLBACK4: {{.*}}
; FALLBACK5: {{.*}}
; FALLBACK6: {{.*}}
; FALLBACK7: {{.*}}
; FALLBACK8: {{.*}}
; FALLBACK9: {{.*}}