| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=haswell| FileCheck %s --check-prefixes=ALL,HSW |
| ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=skylake-avx512 | FileCheck %s --check-prefixes=ALL,SKX |
| ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=knl | FileCheck %s --check-prefixes=ALL,KNL |
| |
| define <16 x float> @expandload_v16f32_const_undef(float* %base) { |
| ; HSW-LABEL: expandload_v16f32_const_undef: |
| ; HSW: # %bb.0: # %cond.load |
| ; HSW-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] |
| ; HSW-NEXT: vinsertf128 $1, 44(%rdi), %ymm0, %ymm1 |
| ; HSW-NEXT: vmovups (%rdi), %ymm0 |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: expandload_v16f32_const_undef: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: movw $-2049, %ax # imm = 0xF7FF |
| ; SKX-NEXT: kmovd %eax, %k1 |
| ; SKX-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z} |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: expandload_v16f32_const_undef: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: movw $-2049, %ax # imm = 0xF7FF |
| ; KNL-NEXT: kmovw %eax, %k1 |
| ; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z} |
| ; KNL-NEXT: retq |
| %res = call <16 x float> @llvm.masked.expandload.v16f32(float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true>, <16 x float> undef) |
| ret <16 x float>%res |
| } |
| |
| define <16 x float> @expandload_v16f32_const(float* %base, <16 x float> %src0) { |
| ; HSW-LABEL: expandload_v16f32_const: |
| ; HSW: # %bb.0: # %cond.load |
| ; HSW-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero |
| ; HSW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4,5,6,7] |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm2 = xmm0[0],mem[0],xmm0[2,3] |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3] |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0] |
| ; HSW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] |
| ; HSW-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3] |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0] |
| ; HSW-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 |
| ; HSW-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero |
| ; HSW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4,5,6,7] |
| ; HSW-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero |
| ; HSW-NEXT: vextractf128 $1, %ymm1, %xmm3 |
| ; HSW-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3] |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm3 = xmm1[0],mem[0],xmm1[2,3] |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],mem[0],xmm3[3] |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3] |
| ; HSW-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7] |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3] |
| ; HSW-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: expandload_v16f32_const: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: movw $30719, %ax # imm = 0x77FF |
| ; SKX-NEXT: kmovd %eax, %k1 |
| ; SKX-NEXT: vexpandps (%rdi), %zmm0 {%k1} |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: expandload_v16f32_const: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: movw $30719, %ax # imm = 0x77FF |
| ; KNL-NEXT: kmovw %eax, %k1 |
| ; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1} |
| ; KNL-NEXT: retq |
| %res = call <16 x float> @llvm.masked.expandload.v16f32(float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <16 x float> %src0) |
| ret <16 x float>%res |
| } |
| |
| define <8 x double> @expandload_v8f64_v8i1(double* %base, <8 x double> %src0, <8 x i1> %mask) { |
| ; HSW-LABEL: expandload_v8f64_v8i1: |
| ; HSW: # %bb.0: |
| ; HSW-NEXT: vpextrb $0, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB2_2 |
| ; HSW-NEXT: # %bb.1: # %cond.load |
| ; HSW-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero |
| ; HSW-NEXT: vblendpd {{.*#+}} ymm0 = ymm3[0],ymm0[1,2,3] |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB2_2: # %else |
| ; HSW-NEXT: vpextrb $2, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB2_4 |
| ; HSW-NEXT: # %bb.3: # %cond.load1 |
| ; HSW-NEXT: vmovhpd {{.*#+}} xmm3 = xmm0[0],mem[0] |
| ; HSW-NEXT: vblendpd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3] |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB2_4: # %else2 |
| ; HSW-NEXT: vpextrb $4, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB2_6 |
| ; HSW-NEXT: # %bb.5: # %cond.load5 |
| ; HSW-NEXT: vextractf128 $1, %ymm0, %xmm3 |
| ; HSW-NEXT: vmovlpd {{.*#+}} xmm3 = mem[0],xmm3[1] |
| ; HSW-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB2_6: # %else6 |
| ; HSW-NEXT: vpextrb $6, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB2_8 |
| ; HSW-NEXT: # %bb.7: # %cond.load9 |
| ; HSW-NEXT: vextractf128 $1, %ymm0, %xmm3 |
| ; HSW-NEXT: vmovhpd {{.*#+}} xmm3 = xmm3[0],mem[0] |
| ; HSW-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB2_8: # %else10 |
| ; HSW-NEXT: vpextrb $8, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB2_10 |
| ; HSW-NEXT: # %bb.9: # %cond.load13 |
| ; HSW-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero |
| ; HSW-NEXT: vblendpd {{.*#+}} ymm1 = ymm3[0],ymm1[1,2,3] |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB2_10: # %else14 |
| ; HSW-NEXT: vpextrb $10, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB2_12 |
| ; HSW-NEXT: # %bb.11: # %cond.load17 |
| ; HSW-NEXT: vmovhpd {{.*#+}} xmm3 = xmm1[0],mem[0] |
| ; HSW-NEXT: vblendpd {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3] |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB2_12: # %else18 |
| ; HSW-NEXT: vpextrb $12, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB2_14 |
| ; HSW-NEXT: # %bb.13: # %cond.load21 |
| ; HSW-NEXT: vextractf128 $1, %ymm1, %xmm3 |
| ; HSW-NEXT: vmovlpd {{.*#+}} xmm3 = mem[0],xmm3[1] |
| ; HSW-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB2_14: # %else22 |
| ; HSW-NEXT: vpextrb $14, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB2_16 |
| ; HSW-NEXT: # %bb.15: # %cond.load25 |
| ; HSW-NEXT: vextractf128 $1, %ymm1, %xmm2 |
| ; HSW-NEXT: vmovhpd {{.*#+}} xmm2 = xmm2[0],mem[0] |
| ; HSW-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 |
| ; HSW-NEXT: .LBB2_16: # %else26 |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: expandload_v8f64_v8i1: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: vpsllw $15, %xmm1, %xmm1 |
| ; SKX-NEXT: vpmovw2m %xmm1, %k1 |
| ; SKX-NEXT: vexpandpd (%rdi), %zmm0 {%k1} |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: expandload_v8f64_v8i1: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero |
| ; KNL-NEXT: vpsllq $63, %zmm1, %zmm1 |
| ; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1 |
| ; KNL-NEXT: vexpandpd (%rdi), %zmm0 {%k1} |
| ; KNL-NEXT: retq |
| %res = call <8 x double> @llvm.masked.expandload.v8f64(double* %base, <8 x i1> %mask, <8 x double> %src0) |
| ret <8 x double>%res |
| } |
| |
| define <4 x float> @expandload_v4f32_const(float* %base, <4 x float> %src0) { |
| ; HSW-LABEL: expandload_v4f32_const: |
| ; HSW: # %bb.0: # %cond.load |
| ; HSW-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero |
| ; HSW-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3] |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: expandload_v4f32_const: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: movb $7, %al |
| ; SKX-NEXT: kmovd %eax, %k1 |
| ; SKX-NEXT: vexpandps (%rdi), %xmm0 {%k1} |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: expandload_v4f32_const: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 |
| ; KNL-NEXT: movw $7, %ax |
| ; KNL-NEXT: kmovw %eax, %k1 |
| ; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1} |
| ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 |
| ; KNL-NEXT: retq |
| %res = call <4 x float> @llvm.masked.expandload.v4f32(float* %base, <4 x i1> <i1 true, i1 true, i1 true, i1 false>, <4 x float> %src0) |
| ret <4 x float>%res |
| } |
| |
| define <2 x i64> @expandload_v2i64_const(i64* %base, <2 x i64> %src0) { |
| ; HSW-LABEL: expandload_v2i64_const: |
| ; HSW: # %bb.0: # %else |
| ; HSW-NEXT: vpinsrq $1, (%rdi), %xmm0, %xmm0 |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: expandload_v2i64_const: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: movb $2, %al |
| ; SKX-NEXT: kmovd %eax, %k1 |
| ; SKX-NEXT: vpexpandq (%rdi), %xmm0 {%k1} |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: expandload_v2i64_const: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 |
| ; KNL-NEXT: movb $2, %al |
| ; KNL-NEXT: kmovw %eax, %k1 |
| ; KNL-NEXT: vpexpandq (%rdi), %zmm0 {%k1} |
| ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 |
| ; KNL-NEXT: retq |
| %res = call <2 x i64> @llvm.masked.expandload.v2i64(i64* %base, <2 x i1> <i1 false, i1 true>, <2 x i64> %src0) |
| ret <2 x i64>%res |
| } |
| |
| define <2 x float> @expandload_v2f32_v2i1(float* %base, <2 x float> %src0, <2 x i32> %trigger) { |
| ; HSW-LABEL: expandload_v2f32_v2i1: |
| ; HSW: # %bb.0: |
| ; HSW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; HSW-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] |
| ; HSW-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1 |
| ; HSW-NEXT: vpextrb $0, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB5_2 |
| ; HSW-NEXT: # %bb.1: # %cond.load |
| ; HSW-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero |
| ; HSW-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3] |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB5_2: # %else |
| ; HSW-NEXT: vpextrb $8, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB5_4 |
| ; HSW-NEXT: # %bb.3: # %cond.load1 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3] |
| ; HSW-NEXT: .LBB5_4: # %else2 |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: expandload_v2f32_v2i1: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] |
| ; SKX-NEXT: vptestnmq %xmm1, %xmm1, %k1 |
| ; SKX-NEXT: vexpandps (%rdi), %xmm0 {%k1} |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: expandload_v2f32_v2i1: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 |
| ; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] |
| ; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0 |
| ; KNL-NEXT: kshiftlw $14, %k0, %k0 |
| ; KNL-NEXT: kshiftrw $14, %k0, %k1 |
| ; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1} |
| ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 |
| ; KNL-NEXT: retq |
| %mask = icmp eq <2 x i32> %trigger, zeroinitializer |
| %res = call <2 x float> @llvm.masked.expandload.v2f32(float* %base, <2 x i1> %mask, <2 x float> %src0) |
| ret <2 x float> %res |
| } |
| |
| define <32 x float> @expandload_v32f32_v32i32(float* %base, <32 x float> %src0, <32 x i32> %trigger) { |
| ; HSW-LABEL: expandload_v32f32_v32i32: |
| ; HSW: # %bb.0: |
| ; HSW-NEXT: vpxor %xmm8, %xmm8, %xmm8 |
| ; HSW-NEXT: vpcmpeqd %ymm8, %ymm4, %ymm8 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm8, %xmm9 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm9, %xmm9 |
| ; HSW-NEXT: vpextrb $0, %xmm9, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_2 |
| ; HSW-NEXT: # %bb.1: # %cond.load |
| ; HSW-NEXT: vmovd {{.*#+}} xmm9 = mem[0],zero,zero,zero |
| ; HSW-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0],ymm0[1,2,3,4,5,6,7] |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_2: # %else |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm8, %xmm8 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm8, %xmm8 |
| ; HSW-NEXT: vpextrb $1, %xmm8, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_4 |
| ; HSW-NEXT: # %bb.3: # %cond.load1 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm8 = xmm0[0],mem[0],xmm0[2,3] |
| ; HSW-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7] |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_4: # %else2 |
| ; HSW-NEXT: vxorps %xmm8, %xmm8, %xmm8 |
| ; HSW-NEXT: vpcmpeqd %ymm8, %ymm4, %ymm8 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm8, %xmm9 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm9, %xmm9 |
| ; HSW-NEXT: vpextrb $2, %xmm9, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_6 |
| ; HSW-NEXT: # %bb.5: # %cond.load5 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm9 = xmm0[0,1],mem[0],xmm0[3] |
| ; HSW-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7] |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_6: # %else6 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm8, %xmm8 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm8, %xmm8 |
| ; HSW-NEXT: vpextrb $3, %xmm8, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_8 |
| ; HSW-NEXT: # %bb.7: # %cond.load9 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm8 = xmm0[0,1,2],mem[0] |
| ; HSW-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7] |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_8: # %else10 |
| ; HSW-NEXT: vxorps %xmm8, %xmm8, %xmm8 |
| ; HSW-NEXT: vpcmpeqd %ymm8, %ymm4, %ymm8 |
| ; HSW-NEXT: vextracti128 $1, %ymm8, %xmm8 |
| ; HSW-NEXT: vpackssdw %xmm8, %xmm0, %xmm9 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm9, %xmm9 |
| ; HSW-NEXT: vpextrb $4, %xmm9, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_10 |
| ; HSW-NEXT: # %bb.9: # %cond.load13 |
| ; HSW-NEXT: vmovss {{.*#+}} xmm9 = mem[0],zero,zero,zero |
| ; HSW-NEXT: vextractf128 $1, %ymm0, %xmm10 |
| ; HSW-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0],xmm10[1,2,3] |
| ; HSW-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm0 |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_10: # %else14 |
| ; HSW-NEXT: vpackssdw %xmm8, %xmm0, %xmm8 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm8, %xmm8 |
| ; HSW-NEXT: vpextrb $5, %xmm8, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_12 |
| ; HSW-NEXT: # %bb.11: # %cond.load17 |
| ; HSW-NEXT: vextractf128 $1, %ymm0, %xmm8 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm8 = xmm8[0],mem[0],xmm8[2,3] |
| ; HSW-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm0 |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_12: # %else18 |
| ; HSW-NEXT: vxorps %xmm8, %xmm8, %xmm8 |
| ; HSW-NEXT: vpcmpeqd %ymm8, %ymm4, %ymm4 |
| ; HSW-NEXT: vextracti128 $1, %ymm4, %xmm8 |
| ; HSW-NEXT: vpackssdw %xmm8, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm4, %xmm4 |
| ; HSW-NEXT: vpextrb $6, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_14 |
| ; HSW-NEXT: # %bb.13: # %cond.load21 |
| ; HSW-NEXT: vextractf128 $1, %ymm0, %xmm4 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],mem[0],xmm4[3] |
| ; HSW-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_14: # %else22 |
| ; HSW-NEXT: vpackssdw %xmm8, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm4, %xmm4 |
| ; HSW-NEXT: vpextrb $7, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_16 |
| ; HSW-NEXT: # %bb.15: # %cond.load25 |
| ; HSW-NEXT: vextractf128 $1, %ymm0, %xmm4 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],mem[0] |
| ; HSW-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_16: # %else26 |
| ; HSW-NEXT: vxorps %xmm4, %xmm4, %xmm4 |
| ; HSW-NEXT: vpcmpeqd %ymm4, %ymm5, %ymm8 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm8, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $8, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_18 |
| ; HSW-NEXT: # %bb.17: # %cond.load29 |
| ; HSW-NEXT: vmovd {{.*#+}} xmm4 = mem[0],zero,zero,zero |
| ; HSW-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0],ymm1[1,2,3,4,5,6,7] |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_18: # %else30 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm8, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $9, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_20 |
| ; HSW-NEXT: # %bb.19: # %cond.load33 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm4 = xmm1[0],mem[0],xmm1[2,3] |
| ; HSW-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm1[4,5,6,7] |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_20: # %else34 |
| ; HSW-NEXT: vxorps %xmm4, %xmm4, %xmm4 |
| ; HSW-NEXT: vpcmpeqd %ymm4, %ymm5, %ymm8 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm8, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $10, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_22 |
| ; HSW-NEXT: # %bb.21: # %cond.load37 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm4 = xmm1[0,1],mem[0],xmm1[3] |
| ; HSW-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm1[4,5,6,7] |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_22: # %else38 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm8, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $11, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_24 |
| ; HSW-NEXT: # %bb.23: # %cond.load41 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm4 = xmm1[0,1,2],mem[0] |
| ; HSW-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm1[4,5,6,7] |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_24: # %else42 |
| ; HSW-NEXT: vxorps %xmm4, %xmm4, %xmm4 |
| ; HSW-NEXT: vpcmpeqd %ymm4, %ymm5, %ymm4 |
| ; HSW-NEXT: vextracti128 $1, %ymm4, %xmm8 |
| ; HSW-NEXT: vpackssdw %xmm8, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $12, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_26 |
| ; HSW-NEXT: # %bb.25: # %cond.load45 |
| ; HSW-NEXT: vmovss {{.*#+}} xmm9 = mem[0],zero,zero,zero |
| ; HSW-NEXT: vextractf128 $1, %ymm1, %xmm4 |
| ; HSW-NEXT: vblendps {{.*#+}} xmm4 = xmm9[0],xmm4[1,2,3] |
| ; HSW-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_26: # %else46 |
| ; HSW-NEXT: vpackssdw %xmm8, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $13, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_28 |
| ; HSW-NEXT: # %bb.27: # %cond.load49 |
| ; HSW-NEXT: vextractf128 $1, %ymm1, %xmm4 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],mem[0],xmm4[2,3] |
| ; HSW-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_28: # %else50 |
| ; HSW-NEXT: vxorps %xmm4, %xmm4, %xmm4 |
| ; HSW-NEXT: vpcmpeqd %ymm4, %ymm5, %ymm4 |
| ; HSW-NEXT: vextracti128 $1, %ymm4, %xmm4 |
| ; HSW-NEXT: vpackssdw %xmm4, %xmm0, %xmm5 |
| ; HSW-NEXT: vpacksswb %xmm5, %xmm0, %xmm5 |
| ; HSW-NEXT: vpextrb $14, %xmm5, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_30 |
| ; HSW-NEXT: # %bb.29: # %cond.load53 |
| ; HSW-NEXT: vextractf128 $1, %ymm1, %xmm5 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1],mem[0],xmm5[3] |
| ; HSW-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1 |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_30: # %else54 |
| ; HSW-NEXT: vpackssdw %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $15, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_32 |
| ; HSW-NEXT: # %bb.31: # %cond.load57 |
| ; HSW-NEXT: vextractf128 $1, %ymm1, %xmm4 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],mem[0] |
| ; HSW-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_32: # %else58 |
| ; HSW-NEXT: vxorps %xmm4, %xmm4, %xmm4 |
| ; HSW-NEXT: vpcmpeqd %ymm4, %ymm6, %ymm4 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm4, %xmm5 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm5, %xmm5 |
| ; HSW-NEXT: vpextrb $0, %xmm5, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_34 |
| ; HSW-NEXT: # %bb.33: # %cond.load61 |
| ; HSW-NEXT: vmovd {{.*#+}} xmm5 = mem[0],zero,zero,zero |
| ; HSW-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0],ymm2[1,2,3,4,5,6,7] |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_34: # %else62 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm4, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm4, %xmm4 |
| ; HSW-NEXT: vpextrb $1, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_36 |
| ; HSW-NEXT: # %bb.35: # %cond.load65 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm4 = xmm2[0],mem[0],xmm2[2,3] |
| ; HSW-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7] |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_36: # %else66 |
| ; HSW-NEXT: vxorps %xmm4, %xmm4, %xmm4 |
| ; HSW-NEXT: vpcmpeqd %ymm4, %ymm6, %ymm4 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm4, %xmm5 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm5, %xmm5 |
| ; HSW-NEXT: vpextrb $2, %xmm5, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_38 |
| ; HSW-NEXT: # %bb.37: # %cond.load69 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm5 = xmm2[0,1],mem[0],xmm2[3] |
| ; HSW-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7] |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_38: # %else70 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm4, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm4, %xmm4 |
| ; HSW-NEXT: vpextrb $3, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_40 |
| ; HSW-NEXT: # %bb.39: # %cond.load73 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm4 = xmm2[0,1,2],mem[0] |
| ; HSW-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7] |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_40: # %else74 |
| ; HSW-NEXT: vxorps %xmm4, %xmm4, %xmm4 |
| ; HSW-NEXT: vpcmpeqd %ymm4, %ymm6, %ymm4 |
| ; HSW-NEXT: vextracti128 $1, %ymm4, %xmm4 |
| ; HSW-NEXT: vpackssdw %xmm4, %xmm0, %xmm5 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm5, %xmm5 |
| ; HSW-NEXT: vpextrb $4, %xmm5, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_42 |
| ; HSW-NEXT: # %bb.41: # %cond.load77 |
| ; HSW-NEXT: vmovss {{.*#+}} xmm8 = mem[0],zero,zero,zero |
| ; HSW-NEXT: vextractf128 $1, %ymm2, %xmm5 |
| ; HSW-NEXT: vblendps {{.*#+}} xmm5 = xmm8[0],xmm5[1,2,3] |
| ; HSW-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2 |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_42: # %else78 |
| ; HSW-NEXT: vpackssdw %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm4, %xmm4 |
| ; HSW-NEXT: vpextrb $5, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_44 |
| ; HSW-NEXT: # %bb.43: # %cond.load81 |
| ; HSW-NEXT: vextractf128 $1, %ymm2, %xmm4 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],mem[0],xmm4[2,3] |
| ; HSW-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2 |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_44: # %else82 |
| ; HSW-NEXT: vxorps %xmm4, %xmm4, %xmm4 |
| ; HSW-NEXT: vpcmpeqd %ymm4, %ymm6, %ymm4 |
| ; HSW-NEXT: vextracti128 $1, %ymm4, %xmm4 |
| ; HSW-NEXT: vpackssdw %xmm4, %xmm0, %xmm5 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm5, %xmm5 |
| ; HSW-NEXT: vpextrb $6, %xmm5, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_46 |
| ; HSW-NEXT: # %bb.45: # %cond.load85 |
| ; HSW-NEXT: vextractf128 $1, %ymm2, %xmm5 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1],mem[0],xmm5[3] |
| ; HSW-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2 |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_46: # %else86 |
| ; HSW-NEXT: vpackssdw %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm4, %xmm4 |
| ; HSW-NEXT: vpextrb $7, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_48 |
| ; HSW-NEXT: # %bb.47: # %cond.load89 |
| ; HSW-NEXT: vextractf128 $1, %ymm2, %xmm4 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],mem[0] |
| ; HSW-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2 |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_48: # %else90 |
| ; HSW-NEXT: vxorps %xmm4, %xmm4, %xmm4 |
| ; HSW-NEXT: vpcmpeqd %ymm4, %ymm7, %ymm4 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm4, %xmm5 |
| ; HSW-NEXT: vpacksswb %xmm5, %xmm0, %xmm5 |
| ; HSW-NEXT: vpextrb $8, %xmm5, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_50 |
| ; HSW-NEXT: # %bb.49: # %cond.load93 |
| ; HSW-NEXT: vmovd {{.*#+}} xmm5 = mem[0],zero,zero,zero |
| ; HSW-NEXT: vpblendd {{.*#+}} ymm3 = ymm5[0],ymm3[1,2,3,4,5,6,7] |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_50: # %else94 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm4, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $9, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_52 |
| ; HSW-NEXT: # %bb.51: # %cond.load97 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm4 = xmm3[0],mem[0],xmm3[2,3] |
| ; HSW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7] |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_52: # %else98 |
| ; HSW-NEXT: vxorps %xmm4, %xmm4, %xmm4 |
| ; HSW-NEXT: vpcmpeqd %ymm4, %ymm7, %ymm4 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm4, %xmm5 |
| ; HSW-NEXT: vpacksswb %xmm5, %xmm0, %xmm5 |
| ; HSW-NEXT: vpextrb $10, %xmm5, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_54 |
| ; HSW-NEXT: # %bb.53: # %cond.load101 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm5 = xmm3[0,1],mem[0],xmm3[3] |
| ; HSW-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7] |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_54: # %else102 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm4, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $11, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_56 |
| ; HSW-NEXT: # %bb.55: # %cond.load105 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm4 = xmm3[0,1,2],mem[0] |
| ; HSW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7] |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_56: # %else106 |
| ; HSW-NEXT: vxorps %xmm4, %xmm4, %xmm4 |
| ; HSW-NEXT: vpcmpeqd %ymm4, %ymm7, %ymm4 |
| ; HSW-NEXT: vextracti128 $1, %ymm4, %xmm4 |
| ; HSW-NEXT: vpackssdw %xmm4, %xmm0, %xmm5 |
| ; HSW-NEXT: vpacksswb %xmm5, %xmm0, %xmm5 |
| ; HSW-NEXT: vpextrb $12, %xmm5, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_58 |
| ; HSW-NEXT: # %bb.57: # %cond.load109 |
| ; HSW-NEXT: vmovss {{.*#+}} xmm5 = mem[0],zero,zero,zero |
| ; HSW-NEXT: vextractf128 $1, %ymm3, %xmm6 |
| ; HSW-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0],xmm6[1,2,3] |
| ; HSW-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3 |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_58: # %else110 |
| ; HSW-NEXT: vpackssdw %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $13, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_60 |
| ; HSW-NEXT: # %bb.59: # %cond.load113 |
| ; HSW-NEXT: vextractf128 $1, %ymm3, %xmm4 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],mem[0],xmm4[2,3] |
| ; HSW-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3 |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_60: # %else114 |
| ; HSW-NEXT: vxorps %xmm4, %xmm4, %xmm4 |
| ; HSW-NEXT: vpcmpeqd %ymm4, %ymm7, %ymm4 |
| ; HSW-NEXT: vextracti128 $1, %ymm4, %xmm4 |
| ; HSW-NEXT: vpackssdw %xmm4, %xmm0, %xmm5 |
| ; HSW-NEXT: vpacksswb %xmm5, %xmm0, %xmm5 |
| ; HSW-NEXT: vpextrb $14, %xmm5, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_62 |
| ; HSW-NEXT: # %bb.61: # %cond.load117 |
| ; HSW-NEXT: vextractf128 $1, %ymm3, %xmm5 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1],mem[0],xmm5[3] |
| ; HSW-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3 |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_62: # %else118 |
| ; HSW-NEXT: vpackssdw %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $15, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_64 |
| ; HSW-NEXT: # %bb.63: # %cond.load121 |
| ; HSW-NEXT: vextractf128 $1, %ymm3, %xmm4 |
| ; HSW-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],mem[0] |
| ; HSW-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3 |
| ; HSW-NEXT: .LBB6_64: # %else122 |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: expandload_v32f32_v32i32: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: vptestnmd %zmm3, %zmm3, %k1 |
| ; SKX-NEXT: vptestnmd %zmm2, %zmm2, %k2 |
| ; SKX-NEXT: kmovw %k2, %eax |
| ; SKX-NEXT: popcntl %eax, %eax |
| ; SKX-NEXT: vexpandps (%rdi,%rax,4), %zmm1 {%k1} |
| ; SKX-NEXT: vexpandps (%rdi), %zmm0 {%k2} |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: expandload_v32f32_v32i32: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: vptestnmd %zmm3, %zmm3, %k1 |
| ; KNL-NEXT: vptestnmd %zmm2, %zmm2, %k2 |
| ; KNL-NEXT: kmovw %k2, %eax |
| ; KNL-NEXT: popcntl %eax, %eax |
| ; KNL-NEXT: vexpandps (%rdi,%rax,4), %zmm1 {%k1} |
| ; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k2} |
| ; KNL-NEXT: retq |
| %mask = icmp eq <32 x i32> %trigger, zeroinitializer |
| %res = call <32 x float> @llvm.masked.expandload.v32f32(float* %base, <32 x i1> %mask, <32 x float> %src0) |
| ret <32 x float> %res |
| } |
| |
| define <16 x double> @expandload_v16f64_v16i32(double* %base, <16 x double> %src0, <16 x i32> %trigger) { |
| ; HSW-LABEL: expandload_v16f64_v16i32: |
| ; HSW: # %bb.0: |
| ; HSW-NEXT: vpxor %xmm6, %xmm6, %xmm6 |
| ; HSW-NEXT: vpcmpeqd %ymm6, %ymm4, %ymm6 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm6, %xmm7 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm7, %xmm7 |
| ; HSW-NEXT: vpextrb $0, %xmm7, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_2 |
| ; HSW-NEXT: # %bb.1: # %cond.load |
| ; HSW-NEXT: vmovq {{.*#+}} xmm7 = mem[0],zero |
| ; HSW-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1],ymm0[2,3,4,5,6,7] |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB7_2: # %else |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm6, %xmm6 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm6, %xmm6 |
| ; HSW-NEXT: vpextrb $1, %xmm6, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_4 |
| ; HSW-NEXT: # %bb.3: # %cond.load1 |
| ; HSW-NEXT: vmovhpd {{.*#+}} xmm6 = xmm0[0],mem[0] |
| ; HSW-NEXT: vblendpd {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3] |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB7_4: # %else2 |
| ; HSW-NEXT: vxorpd %xmm6, %xmm6, %xmm6 |
| ; HSW-NEXT: vpcmpeqd %ymm6, %ymm4, %ymm6 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm6, %xmm7 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm7, %xmm7 |
| ; HSW-NEXT: vpextrb $2, %xmm7, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_6 |
| ; HSW-NEXT: # %bb.5: # %cond.load5 |
| ; HSW-NEXT: vextractf128 $1, %ymm0, %xmm7 |
| ; HSW-NEXT: vmovlpd {{.*#+}} xmm7 = mem[0],xmm7[1] |
| ; HSW-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm0 |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB7_6: # %else6 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm6, %xmm6 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm6, %xmm6 |
| ; HSW-NEXT: vpextrb $3, %xmm6, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_8 |
| ; HSW-NEXT: # %bb.7: # %cond.load9 |
| ; HSW-NEXT: vextractf128 $1, %ymm0, %xmm6 |
| ; HSW-NEXT: vmovhpd {{.*#+}} xmm6 = xmm6[0],mem[0] |
| ; HSW-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0 |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB7_8: # %else10 |
| ; HSW-NEXT: vxorpd %xmm6, %xmm6, %xmm6 |
| ; HSW-NEXT: vpcmpeqd %ymm6, %ymm4, %ymm6 |
| ; HSW-NEXT: vextracti128 $1, %ymm6, %xmm6 |
| ; HSW-NEXT: vpackssdw %xmm6, %xmm0, %xmm7 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm7, %xmm7 |
| ; HSW-NEXT: vpextrb $4, %xmm7, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_10 |
| ; HSW-NEXT: # %bb.9: # %cond.load13 |
| ; HSW-NEXT: vmovq {{.*#+}} xmm7 = mem[0],zero |
| ; HSW-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1],ymm1[2,3,4,5,6,7] |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB7_10: # %else14 |
| ; HSW-NEXT: vpackssdw %xmm6, %xmm0, %xmm6 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm6, %xmm6 |
| ; HSW-NEXT: vpextrb $5, %xmm6, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_12 |
| ; HSW-NEXT: # %bb.11: # %cond.load17 |
| ; HSW-NEXT: vmovhpd {{.*#+}} xmm6 = xmm1[0],mem[0] |
| ; HSW-NEXT: vblendpd {{.*#+}} ymm1 = ymm6[0,1],ymm1[2,3] |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB7_12: # %else18 |
| ; HSW-NEXT: vxorpd %xmm6, %xmm6, %xmm6 |
| ; HSW-NEXT: vpcmpeqd %ymm6, %ymm4, %ymm4 |
| ; HSW-NEXT: vextracti128 $1, %ymm4, %xmm4 |
| ; HSW-NEXT: vpackssdw %xmm4, %xmm0, %xmm6 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm6, %xmm6 |
| ; HSW-NEXT: vpextrb $6, %xmm6, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_14 |
| ; HSW-NEXT: # %bb.13: # %cond.load21 |
| ; HSW-NEXT: vextractf128 $1, %ymm1, %xmm6 |
| ; HSW-NEXT: vmovlpd {{.*#+}} xmm6 = mem[0],xmm6[1] |
| ; HSW-NEXT: vinsertf128 $1, %xmm6, %ymm1, %ymm1 |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB7_14: # %else22 |
| ; HSW-NEXT: vpackssdw %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm4, %xmm4 |
| ; HSW-NEXT: vpextrb $7, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_16 |
| ; HSW-NEXT: # %bb.15: # %cond.load25 |
| ; HSW-NEXT: vextractf128 $1, %ymm1, %xmm4 |
| ; HSW-NEXT: vmovhpd {{.*#+}} xmm4 = xmm4[0],mem[0] |
| ; HSW-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB7_16: # %else26 |
| ; HSW-NEXT: vxorpd %xmm4, %xmm4, %xmm4 |
| ; HSW-NEXT: vpcmpeqd %ymm4, %ymm5, %ymm4 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm4, %xmm6 |
| ; HSW-NEXT: vpacksswb %xmm6, %xmm0, %xmm6 |
| ; HSW-NEXT: vpextrb $8, %xmm6, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_18 |
| ; HSW-NEXT: # %bb.17: # %cond.load29 |
| ; HSW-NEXT: vmovq {{.*#+}} xmm6 = mem[0],zero |
| ; HSW-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3,4,5,6,7] |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB7_18: # %else30 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm4, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $9, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_20 |
| ; HSW-NEXT: # %bb.19: # %cond.load33 |
| ; HSW-NEXT: vmovhpd {{.*#+}} xmm4 = xmm2[0],mem[0] |
| ; HSW-NEXT: vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3] |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB7_20: # %else34 |
| ; HSW-NEXT: vxorpd %xmm4, %xmm4, %xmm4 |
| ; HSW-NEXT: vpcmpeqd %ymm4, %ymm5, %ymm4 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm4, %xmm6 |
| ; HSW-NEXT: vpacksswb %xmm6, %xmm0, %xmm6 |
| ; HSW-NEXT: vpextrb $10, %xmm6, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_22 |
| ; HSW-NEXT: # %bb.21: # %cond.load37 |
| ; HSW-NEXT: vextractf128 $1, %ymm2, %xmm6 |
| ; HSW-NEXT: vmovlpd {{.*#+}} xmm6 = mem[0],xmm6[1] |
| ; HSW-NEXT: vinsertf128 $1, %xmm6, %ymm2, %ymm2 |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB7_22: # %else38 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm4, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $11, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_24 |
| ; HSW-NEXT: # %bb.23: # %cond.load41 |
| ; HSW-NEXT: vextractf128 $1, %ymm2, %xmm4 |
| ; HSW-NEXT: vmovhpd {{.*#+}} xmm4 = xmm4[0],mem[0] |
| ; HSW-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2 |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB7_24: # %else42 |
| ; HSW-NEXT: vxorpd %xmm4, %xmm4, %xmm4 |
| ; HSW-NEXT: vpcmpeqd %ymm4, %ymm5, %ymm4 |
| ; HSW-NEXT: vextracti128 $1, %ymm4, %xmm4 |
| ; HSW-NEXT: vpackssdw %xmm4, %xmm0, %xmm6 |
| ; HSW-NEXT: vpacksswb %xmm6, %xmm0, %xmm6 |
| ; HSW-NEXT: vpextrb $12, %xmm6, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_26 |
| ; HSW-NEXT: # %bb.25: # %cond.load45 |
| ; HSW-NEXT: vmovq {{.*#+}} xmm6 = mem[0],zero |
| ; HSW-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1],ymm3[2,3,4,5,6,7] |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB7_26: # %else46 |
| ; HSW-NEXT: vpackssdw %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $13, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_28 |
| ; HSW-NEXT: # %bb.27: # %cond.load49 |
| ; HSW-NEXT: vmovhpd {{.*#+}} xmm4 = xmm3[0],mem[0] |
| ; HSW-NEXT: vblendpd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3] |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB7_28: # %else50 |
| ; HSW-NEXT: vxorpd %xmm4, %xmm4, %xmm4 |
| ; HSW-NEXT: vpcmpeqd %ymm4, %ymm5, %ymm4 |
| ; HSW-NEXT: vextracti128 $1, %ymm4, %xmm4 |
| ; HSW-NEXT: vpackssdw %xmm4, %xmm0, %xmm5 |
| ; HSW-NEXT: vpacksswb %xmm5, %xmm0, %xmm5 |
| ; HSW-NEXT: vpextrb $14, %xmm5, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_30 |
| ; HSW-NEXT: # %bb.29: # %cond.load53 |
| ; HSW-NEXT: vextractf128 $1, %ymm3, %xmm5 |
| ; HSW-NEXT: vmovlpd {{.*#+}} xmm5 = mem[0],xmm5[1] |
| ; HSW-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3 |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB7_30: # %else54 |
| ; HSW-NEXT: vpackssdw %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $15, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_32 |
| ; HSW-NEXT: # %bb.31: # %cond.load57 |
| ; HSW-NEXT: vextractf128 $1, %ymm3, %xmm4 |
| ; HSW-NEXT: vmovhpd {{.*#+}} xmm4 = xmm4[0],mem[0] |
| ; HSW-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3 |
| ; HSW-NEXT: .LBB7_32: # %else58 |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: expandload_v16f64_v16i32: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: vextracti64x4 $1, %zmm2, %ymm3 |
| ; SKX-NEXT: vptestnmd %ymm3, %ymm3, %k1 |
| ; SKX-NEXT: vptestnmd %ymm2, %ymm2, %k2 |
| ; SKX-NEXT: kmovb %k2, %eax |
| ; SKX-NEXT: popcntl %eax, %eax |
| ; SKX-NEXT: vexpandpd (%rdi,%rax,8), %zmm1 {%k1} |
| ; SKX-NEXT: vexpandpd (%rdi), %zmm0 {%k2} |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: expandload_v16f64_v16i32: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: vextracti64x4 $1, %zmm2, %ymm3 |
| ; KNL-NEXT: vptestnmd %zmm3, %zmm3, %k1 |
| ; KNL-NEXT: vptestnmd %zmm2, %zmm2, %k2 |
| ; KNL-NEXT: vexpandpd (%rdi), %zmm0 {%k2} |
| ; KNL-NEXT: kmovw %k2, %eax |
| ; KNL-NEXT: movzbl %al, %eax |
| ; KNL-NEXT: popcntl %eax, %eax |
| ; KNL-NEXT: vexpandpd (%rdi,%rax,8), %zmm1 {%k1} |
| ; KNL-NEXT: retq |
| %mask = icmp eq <16 x i32> %trigger, zeroinitializer |
| %res = call <16 x double> @llvm.masked.expandload.v16f64(double* %base, <16 x i1> %mask, <16 x double> %src0) |
| ret <16 x double> %res |
| } |
| |
| declare <16 x double> @llvm.masked.expandload.v16f64(double*, <16 x i1>, <16 x double>) |
| declare <8 x double> @llvm.masked.expandload.v8f64(double*, <8 x i1>, <8 x double>) |
| declare <4 x double> @llvm.masked.expandload.v4f64(double*, <4 x i1>, <4 x double>) |
| declare <2 x double> @llvm.masked.expandload.v2f64(double*, <2 x i1>, <2 x double>) |
| declare <1 x double> @llvm.masked.expandload.v1f64(double*, <1 x i1>, <1 x double>) |
| |
| declare <32 x float> @llvm.masked.expandload.v32f32(float*, <32 x i1>, <32 x float>) |
| declare <16 x float> @llvm.masked.expandload.v16f32(float*, <16 x i1>, <16 x float>) |
| declare <8 x float> @llvm.masked.expandload.v8f32(float*, <8 x i1>, <8 x float>) |
| declare <4 x float> @llvm.masked.expandload.v4f32(float*, <4 x i1>, <4 x float>) |
| declare <2 x float> @llvm.masked.expandload.v2f32(float*, <2 x i1>, <2 x float>) |
| |
| declare <8 x i64> @llvm.masked.expandload.v8i64(i64*, <8 x i1>, <8 x i64>) |
| declare <4 x i64> @llvm.masked.expandload.v4i64(i64*, <4 x i1>, <4 x i64>) |
| declare <2 x i64> @llvm.masked.expandload.v2i64(i64*, <2 x i1>, <2 x i64>) |
| declare <1 x i64> @llvm.masked.expandload.v1i64(i64*, <1 x i1>, <1 x i64>) |
| |
| declare <16 x i32> @llvm.masked.expandload.v16i32(i32*, <16 x i1>, <16 x i32>) |
| declare <8 x i32> @llvm.masked.expandload.v8i32(i32*, <8 x i1>, <8 x i32>) |
| declare <4 x i32> @llvm.masked.expandload.v4i32(i32*, <4 x i1>, <4 x i32>) |
| declare <2 x i32> @llvm.masked.expandload.v2i32(i32*, <2 x i1>, <2 x i32>) |
| |
| declare <32 x i16> @llvm.masked.expandload.v32i16(i16*, <32 x i1>, <32 x i16>) |
| declare <16 x i16> @llvm.masked.expandload.v16i16(i16*, <16 x i1>, <16 x i16>) |
| declare <8 x i16> @llvm.masked.expandload.v8i16(i16*, <8 x i1>, <8 x i16>) |
| declare <4 x i16> @llvm.masked.expandload.v4i16(i16*, <4 x i1>, <4 x i16>) |
| |
| declare <64 x i8> @llvm.masked.expandload.v64i8(i8*, <64 x i1>, <64 x i8>) |
| declare <32 x i8> @llvm.masked.expandload.v32i8(i8*, <32 x i1>, <32 x i8>) |
| declare <16 x i8> @llvm.masked.expandload.v16i8(i8*, <16 x i1>, <16 x i8>) |
| declare <8 x i8> @llvm.masked.expandload.v8i8(i8*, <8 x i1>, <8 x i8>) |