| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=haswell| FileCheck %s --check-prefixes=ALL,HSW |
| ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=skylake-avx512 | FileCheck %s --check-prefixes=ALL,SKX |
| ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=knl | FileCheck %s --check-prefixes=ALL,KNL |
| |
| define void @compressstore_v16f32_const(float* %base, <16 x float> %V) { |
| ; HSW-LABEL: compressstore_v16f32_const: |
| ; HSW: # %bb.0: # %cond.store |
| ; HSW-NEXT: vmovups %ymm0, (%rdi) |
| ; HSW-NEXT: vmovaps {{.*#+}} ymm0 = <0,1,2,4,u,u,u,u> |
| ; HSW-NEXT: vpermps %ymm1, %ymm0, %ymm0 |
| ; HSW-NEXT: vmovups %xmm0, 32(%rdi) |
| ; HSW-NEXT: vextractf128 $1, %ymm1, %xmm0 |
| ; HSW-NEXT: vextractps $1, %xmm0, 48(%rdi) |
| ; HSW-NEXT: vextractps $2, %xmm0, 52(%rdi) |
| ; HSW-NEXT: vextractps $3, %xmm0, 56(%rdi) |
| ; HSW-NEXT: vzeroupper |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: compressstore_v16f32_const: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: movw $-2049, %ax # imm = 0xF7FF |
| ; SKX-NEXT: kmovd %eax, %k1 |
| ; SKX-NEXT: vcompressps %zmm0, (%rdi) {%k1} |
| ; SKX-NEXT: vzeroupper |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: compressstore_v16f32_const: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: movw $-2049, %ax # imm = 0xF7FF |
| ; KNL-NEXT: kmovw %eax, %k1 |
| ; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k1} |
| ; KNL-NEXT: retq |
| call void @llvm.masked.compressstore.v16f32(<16 x float> %V, float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true>) |
| ret void |
| } |
| |
| define void @compressstore_v8f32_v8i1(float* %base, <8 x float> %V, <8 x i1> %mask) { |
| ; HSW-LABEL: compressstore_v8f32_v8i1: |
| ; HSW: # %bb.0: |
| ; HSW-NEXT: vpextrb $0, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB1_2 |
| ; HSW-NEXT: # %bb.1: # %cond.store |
| ; HSW-NEXT: vmovss %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB1_2: # %else |
| ; HSW-NEXT: vpextrb $2, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB1_4 |
| ; HSW-NEXT: # %bb.3: # %cond.store1 |
| ; HSW-NEXT: vextractps $1, %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB1_4: # %else2 |
| ; HSW-NEXT: vpextrb $4, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB1_6 |
| ; HSW-NEXT: # %bb.5: # %cond.store4 |
| ; HSW-NEXT: vextractps $2, %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB1_6: # %else5 |
| ; HSW-NEXT: vpextrb $6, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB1_8 |
| ; HSW-NEXT: # %bb.7: # %cond.store7 |
| ; HSW-NEXT: vextractps $3, %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB1_8: # %else8 |
| ; HSW-NEXT: vpextrb $8, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB1_10 |
| ; HSW-NEXT: # %bb.9: # %cond.store10 |
| ; HSW-NEXT: vextractf128 $1, %ymm0, %xmm2 |
| ; HSW-NEXT: vmovss %xmm2, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB1_10: # %else11 |
| ; HSW-NEXT: vpextrb $10, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB1_12 |
| ; HSW-NEXT: # %bb.11: # %cond.store13 |
| ; HSW-NEXT: vextractf128 $1, %ymm0, %xmm2 |
| ; HSW-NEXT: vextractps $1, %xmm2, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB1_12: # %else14 |
| ; HSW-NEXT: vpextrb $12, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB1_14 |
| ; HSW-NEXT: # %bb.13: # %cond.store16 |
| ; HSW-NEXT: vextractf128 $1, %ymm0, %xmm2 |
| ; HSW-NEXT: vextractps $2, %xmm2, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB1_14: # %else17 |
| ; HSW-NEXT: vpextrb $14, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB1_16 |
| ; HSW-NEXT: # %bb.15: # %cond.store19 |
| ; HSW-NEXT: vextractf128 $1, %ymm0, %xmm0 |
| ; HSW-NEXT: vextractps $3, %xmm0, (%rdi) |
| ; HSW-NEXT: .LBB1_16: # %else20 |
| ; HSW-NEXT: vzeroupper |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: compressstore_v8f32_v8i1: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: vpsllw $15, %xmm1, %xmm1 |
| ; SKX-NEXT: vpmovw2m %xmm1, %k1 |
| ; SKX-NEXT: vcompressps %ymm0, (%rdi) {%k1} |
| ; SKX-NEXT: vzeroupper |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: compressstore_v8f32_v8i1: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 |
| ; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero |
| ; KNL-NEXT: vpsllq $63, %zmm1, %zmm1 |
| ; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1 |
| ; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k1} |
| ; KNL-NEXT: retq |
| call void @llvm.masked.compressstore.v8f32(<8 x float> %V, float* %base, <8 x i1> %mask) |
| ret void |
| } |
| |
| define void @compressstore_v8f64_v8i1(double* %base, <8 x double> %V, <8 x i1> %mask) { |
| ; HSW-LABEL: compressstore_v8f64_v8i1: |
| ; HSW: # %bb.0: |
| ; HSW-NEXT: vpextrb $0, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB2_2 |
| ; HSW-NEXT: # %bb.1: # %cond.store |
| ; HSW-NEXT: vmovlpd %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB2_2: # %else |
| ; HSW-NEXT: vpextrb $2, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB2_4 |
| ; HSW-NEXT: # %bb.3: # %cond.store1 |
| ; HSW-NEXT: vmovhpd %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB2_4: # %else2 |
| ; HSW-NEXT: vpextrb $4, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB2_6 |
| ; HSW-NEXT: # %bb.5: # %cond.store4 |
| ; HSW-NEXT: vextractf128 $1, %ymm0, %xmm3 |
| ; HSW-NEXT: vmovlpd %xmm3, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB2_6: # %else5 |
| ; HSW-NEXT: vpextrb $6, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB2_8 |
| ; HSW-NEXT: # %bb.7: # %cond.store7 |
| ; HSW-NEXT: vextractf128 $1, %ymm0, %xmm0 |
| ; HSW-NEXT: vmovhpd %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB2_8: # %else8 |
| ; HSW-NEXT: vpextrb $8, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB2_10 |
| ; HSW-NEXT: # %bb.9: # %cond.store10 |
| ; HSW-NEXT: vmovlpd %xmm1, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB2_10: # %else11 |
| ; HSW-NEXT: vpextrb $10, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB2_12 |
| ; HSW-NEXT: # %bb.11: # %cond.store13 |
| ; HSW-NEXT: vmovhpd %xmm1, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB2_12: # %else14 |
| ; HSW-NEXT: vpextrb $12, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB2_14 |
| ; HSW-NEXT: # %bb.13: # %cond.store16 |
| ; HSW-NEXT: vextractf128 $1, %ymm1, %xmm0 |
| ; HSW-NEXT: vmovlpd %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB2_14: # %else17 |
| ; HSW-NEXT: vpextrb $14, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB2_16 |
| ; HSW-NEXT: # %bb.15: # %cond.store19 |
| ; HSW-NEXT: vextractf128 $1, %ymm1, %xmm0 |
| ; HSW-NEXT: vmovhpd %xmm0, (%rdi) |
| ; HSW-NEXT: .LBB2_16: # %else20 |
| ; HSW-NEXT: vzeroupper |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: compressstore_v8f64_v8i1: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: vpsllw $15, %xmm1, %xmm1 |
| ; SKX-NEXT: vpmovw2m %xmm1, %k1 |
| ; SKX-NEXT: vcompresspd %zmm0, (%rdi) {%k1} |
| ; SKX-NEXT: vzeroupper |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: compressstore_v8f64_v8i1: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero |
| ; KNL-NEXT: vpsllq $63, %zmm1, %zmm1 |
| ; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1 |
| ; KNL-NEXT: vcompresspd %zmm0, (%rdi) {%k1} |
| ; KNL-NEXT: retq |
| call void @llvm.masked.compressstore.v8f64(<8 x double> %V, double* %base, <8 x i1> %mask) |
| ret void |
| } |
| |
| define void @compressstore_v8i64_v8i1(i64* %base, <8 x i64> %V, <8 x i1> %mask) { |
| ; HSW-LABEL: compressstore_v8i64_v8i1: |
| ; HSW: # %bb.0: |
| ; HSW-NEXT: vpextrb $0, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB3_2 |
| ; HSW-NEXT: # %bb.1: # %cond.store |
| ; HSW-NEXT: vmovq %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB3_2: # %else |
| ; HSW-NEXT: vpextrb $2, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB3_4 |
| ; HSW-NEXT: # %bb.3: # %cond.store1 |
| ; HSW-NEXT: vpextrq $1, %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB3_4: # %else2 |
| ; HSW-NEXT: vpextrb $4, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB3_6 |
| ; HSW-NEXT: # %bb.5: # %cond.store4 |
| ; HSW-NEXT: vextracti128 $1, %ymm0, %xmm3 |
| ; HSW-NEXT: vmovq %xmm3, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB3_6: # %else5 |
| ; HSW-NEXT: vpextrb $6, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB3_8 |
| ; HSW-NEXT: # %bb.7: # %cond.store7 |
| ; HSW-NEXT: vextracti128 $1, %ymm0, %xmm0 |
| ; HSW-NEXT: vpextrq $1, %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB3_8: # %else8 |
| ; HSW-NEXT: vpextrb $8, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB3_10 |
| ; HSW-NEXT: # %bb.9: # %cond.store10 |
| ; HSW-NEXT: vmovq %xmm1, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB3_10: # %else11 |
| ; HSW-NEXT: vpextrb $10, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB3_12 |
| ; HSW-NEXT: # %bb.11: # %cond.store13 |
| ; HSW-NEXT: vpextrq $1, %xmm1, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB3_12: # %else14 |
| ; HSW-NEXT: vpextrb $12, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB3_14 |
| ; HSW-NEXT: # %bb.13: # %cond.store16 |
| ; HSW-NEXT: vextracti128 $1, %ymm1, %xmm0 |
| ; HSW-NEXT: vmovq %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB3_14: # %else17 |
| ; HSW-NEXT: vpextrb $14, %xmm2, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB3_16 |
| ; HSW-NEXT: # %bb.15: # %cond.store19 |
| ; HSW-NEXT: vextracti128 $1, %ymm1, %xmm0 |
| ; HSW-NEXT: vpextrq $1, %xmm0, (%rdi) |
| ; HSW-NEXT: .LBB3_16: # %else20 |
| ; HSW-NEXT: vzeroupper |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: compressstore_v8i64_v8i1: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: vpsllw $15, %xmm1, %xmm1 |
| ; SKX-NEXT: vpmovw2m %xmm1, %k1 |
| ; SKX-NEXT: vpcompressq %zmm0, (%rdi) {%k1} |
| ; SKX-NEXT: vzeroupper |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: compressstore_v8i64_v8i1: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero |
| ; KNL-NEXT: vpsllq $63, %zmm1, %zmm1 |
| ; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1 |
| ; KNL-NEXT: vpcompressq %zmm0, (%rdi) {%k1} |
| ; KNL-NEXT: retq |
| call void @llvm.masked.compressstore.v8i64(<8 x i64> %V, i64* %base, <8 x i1> %mask) |
| ret void |
| } |
| |
| define void @compressstore_v4i64_v4i1(i64* %base, <4 x i64> %V, <4 x i1> %mask) { |
| ; HSW-LABEL: compressstore_v4i64_v4i1: |
| ; HSW: # %bb.0: |
| ; HSW-NEXT: vpextrb $0, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB4_2 |
| ; HSW-NEXT: # %bb.1: # %cond.store |
| ; HSW-NEXT: vmovq %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB4_2: # %else |
| ; HSW-NEXT: vpextrb $4, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB4_4 |
| ; HSW-NEXT: # %bb.3: # %cond.store1 |
| ; HSW-NEXT: vpextrq $1, %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB4_4: # %else2 |
| ; HSW-NEXT: vpextrb $8, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB4_6 |
| ; HSW-NEXT: # %bb.5: # %cond.store4 |
| ; HSW-NEXT: vextracti128 $1, %ymm0, %xmm2 |
| ; HSW-NEXT: vmovq %xmm2, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB4_6: # %else5 |
| ; HSW-NEXT: vpextrb $12, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB4_8 |
| ; HSW-NEXT: # %bb.7: # %cond.store7 |
| ; HSW-NEXT: vextracti128 $1, %ymm0, %xmm0 |
| ; HSW-NEXT: vpextrq $1, %xmm0, (%rdi) |
| ; HSW-NEXT: .LBB4_8: # %else8 |
| ; HSW-NEXT: vzeroupper |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: compressstore_v4i64_v4i1: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: vpslld $31, %xmm1, %xmm1 |
| ; SKX-NEXT: vpmovd2m %xmm1, %k1 |
| ; SKX-NEXT: vpcompressq %ymm0, (%rdi) {%k1} |
| ; SKX-NEXT: vzeroupper |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: compressstore_v4i64_v4i1: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 |
| ; KNL-NEXT: vpslld $31, %xmm1, %xmm1 |
| ; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0 |
| ; KNL-NEXT: kshiftlw $12, %k0, %k0 |
| ; KNL-NEXT: kshiftrw $12, %k0, %k1 |
| ; KNL-NEXT: vpcompressq %zmm0, (%rdi) {%k1} |
| ; KNL-NEXT: retq |
| call void @llvm.masked.compressstore.v4i64(<4 x i64> %V, i64* %base, <4 x i1> %mask) |
| ret void |
| } |
| |
| define void @compressstore_v2i64_v2i1(i64* %base, <2 x i64> %V, <2 x i1> %mask) { |
| ; HSW-LABEL: compressstore_v2i64_v2i1: |
| ; HSW: # %bb.0: |
| ; HSW-NEXT: vpextrb $0, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB5_2 |
| ; HSW-NEXT: # %bb.1: # %cond.store |
| ; HSW-NEXT: vmovq %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB5_2: # %else |
| ; HSW-NEXT: vpextrb $8, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB5_4 |
| ; HSW-NEXT: # %bb.3: # %cond.store1 |
| ; HSW-NEXT: vpextrq $1, %xmm0, (%rdi) |
| ; HSW-NEXT: .LBB5_4: # %else2 |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: compressstore_v2i64_v2i1: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: vpsllq $63, %xmm1, %xmm1 |
| ; SKX-NEXT: vpmovq2m %xmm1, %k1 |
| ; SKX-NEXT: vpcompressq %xmm0, (%rdi) {%k1} |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: compressstore_v2i64_v2i1: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 |
| ; KNL-NEXT: vpsllq $63, %xmm1, %xmm1 |
| ; KNL-NEXT: vptestmq %zmm1, %zmm1, %k0 |
| ; KNL-NEXT: kshiftlw $14, %k0, %k0 |
| ; KNL-NEXT: kshiftrw $14, %k0, %k1 |
| ; KNL-NEXT: vpcompressq %zmm0, (%rdi) {%k1} |
| ; KNL-NEXT: retq |
| call void @llvm.masked.compressstore.v2i64(<2 x i64> %V, i64* %base, <2 x i1> %mask) |
| ret void |
| } |
| |
| define void @compressstore_v4f32_v4i1(float* %base, <4 x float> %V, <4 x i1> %mask) { |
| ; HSW-LABEL: compressstore_v4f32_v4i1: |
| ; HSW: # %bb.0: |
| ; HSW-NEXT: vpextrb $0, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_2 |
| ; HSW-NEXT: # %bb.1: # %cond.store |
| ; HSW-NEXT: vmovss %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_2: # %else |
| ; HSW-NEXT: vpextrb $4, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_4 |
| ; HSW-NEXT: # %bb.3: # %cond.store1 |
| ; HSW-NEXT: vextractps $1, %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_4: # %else2 |
| ; HSW-NEXT: vpextrb $8, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_6 |
| ; HSW-NEXT: # %bb.5: # %cond.store4 |
| ; HSW-NEXT: vextractps $2, %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB6_6: # %else5 |
| ; HSW-NEXT: vpextrb $12, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB6_8 |
| ; HSW-NEXT: # %bb.7: # %cond.store7 |
| ; HSW-NEXT: vextractps $3, %xmm0, (%rdi) |
| ; HSW-NEXT: .LBB6_8: # %else8 |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: compressstore_v4f32_v4i1: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: vpslld $31, %xmm1, %xmm1 |
| ; SKX-NEXT: vpmovd2m %xmm1, %k1 |
| ; SKX-NEXT: vcompressps %xmm0, (%rdi) {%k1} |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: compressstore_v4f32_v4i1: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 |
| ; KNL-NEXT: vpslld $31, %xmm1, %xmm1 |
| ; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0 |
| ; KNL-NEXT: kshiftlw $12, %k0, %k0 |
| ; KNL-NEXT: kshiftrw $12, %k0, %k1 |
| ; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k1} |
| ; KNL-NEXT: retq |
| call void @llvm.masked.compressstore.v4f32(<4 x float> %V, float* %base, <4 x i1> %mask) |
| ret void |
| } |
| |
| define void @compressstore_v2f32_v2i32(float* %base, <2 x float> %V, <2 x i32> %trigger) { |
| ; HSW-LABEL: compressstore_v2f32_v2i32: |
| ; HSW: # %bb.0: |
| ; HSW-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; HSW-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] |
| ; HSW-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1 |
| ; HSW-NEXT: vpextrb $0, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_2 |
| ; HSW-NEXT: # %bb.1: # %cond.store |
| ; HSW-NEXT: vmovss %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB7_2: # %else |
| ; HSW-NEXT: vpextrb $8, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB7_4 |
| ; HSW-NEXT: # %bb.3: # %cond.store1 |
| ; HSW-NEXT: vextractps $1, %xmm0, (%rdi) |
| ; HSW-NEXT: .LBB7_4: # %else2 |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: compressstore_v2f32_v2i32: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] |
| ; SKX-NEXT: vptestnmq %xmm1, %xmm1, %k1 |
| ; SKX-NEXT: vcompressps %xmm0, (%rdi) {%k1} |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: compressstore_v2f32_v2i32: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 |
| ; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] |
| ; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0 |
| ; KNL-NEXT: kshiftlw $14, %k0, %k0 |
| ; KNL-NEXT: kshiftrw $14, %k0, %k1 |
| ; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k1} |
| ; KNL-NEXT: retq |
| %mask = icmp eq <2 x i32> %trigger, zeroinitializer |
| call void @llvm.masked.compressstore.v2f32(<2 x float> %V, float* %base, <2 x i1> %mask) |
| ret void |
| } |
| |
| define void @compressstore_v32f32_v32i32(float* %base, <32 x float> %V, <32 x i32> %trigger) { |
| ; HSW-LABEL: compressstore_v32f32_v32i32: |
| ; HSW: # %bb.0: |
| ; HSW-NEXT: vpxor %xmm8, %xmm8, %xmm8 |
| ; HSW-NEXT: vpcmpeqd %ymm8, %ymm4, %ymm8 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm8, %xmm9 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm9, %xmm9 |
| ; HSW-NEXT: vpextrb $0, %xmm9, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_2 |
| ; HSW-NEXT: # %bb.1: # %cond.store |
| ; HSW-NEXT: vmovd %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_2: # %else |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm8, %xmm8 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm8, %xmm8 |
| ; HSW-NEXT: vpextrb $1, %xmm8, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_4 |
| ; HSW-NEXT: # %bb.3: # %cond.store1 |
| ; HSW-NEXT: vpextrd $1, %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_4: # %else2 |
| ; HSW-NEXT: vpxor %xmm8, %xmm8, %xmm8 |
| ; HSW-NEXT: vpcmpeqd %ymm8, %ymm4, %ymm8 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm8, %xmm9 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm9, %xmm9 |
| ; HSW-NEXT: vpextrb $2, %xmm9, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_6 |
| ; HSW-NEXT: # %bb.5: # %cond.store4 |
| ; HSW-NEXT: vpextrd $2, %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_6: # %else5 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm8, %xmm8 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm8, %xmm8 |
| ; HSW-NEXT: vpextrb $3, %xmm8, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_8 |
| ; HSW-NEXT: # %bb.7: # %cond.store7 |
| ; HSW-NEXT: vpextrd $3, %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_8: # %else8 |
| ; HSW-NEXT: vpxor %xmm8, %xmm8, %xmm8 |
| ; HSW-NEXT: vpcmpeqd %ymm8, %ymm4, %ymm8 |
| ; HSW-NEXT: vextracti128 $1, %ymm8, %xmm8 |
| ; HSW-NEXT: vpackssdw %xmm8, %xmm0, %xmm9 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm9, %xmm9 |
| ; HSW-NEXT: vpextrb $4, %xmm9, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_10 |
| ; HSW-NEXT: # %bb.9: # %cond.store10 |
| ; HSW-NEXT: vextracti128 $1, %ymm0, %xmm9 |
| ; HSW-NEXT: vmovd %xmm9, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_10: # %else11 |
| ; HSW-NEXT: vpackssdw %xmm8, %xmm0, %xmm8 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm8, %xmm8 |
| ; HSW-NEXT: vpextrb $5, %xmm8, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_12 |
| ; HSW-NEXT: # %bb.11: # %cond.store13 |
| ; HSW-NEXT: vextracti128 $1, %ymm0, %xmm8 |
| ; HSW-NEXT: vpextrd $1, %xmm8, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_12: # %else14 |
| ; HSW-NEXT: vpxor %xmm8, %xmm8, %xmm8 |
| ; HSW-NEXT: vpcmpeqd %ymm8, %ymm4, %ymm4 |
| ; HSW-NEXT: vextracti128 $1, %ymm4, %xmm8 |
| ; HSW-NEXT: vpackssdw %xmm8, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm4, %xmm4 |
| ; HSW-NEXT: vpextrb $6, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_14 |
| ; HSW-NEXT: # %bb.13: # %cond.store16 |
| ; HSW-NEXT: vextracti128 $1, %ymm0, %xmm4 |
| ; HSW-NEXT: vpextrd $2, %xmm4, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_14: # %else17 |
| ; HSW-NEXT: vpackssdw %xmm8, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm4, %xmm4 |
| ; HSW-NEXT: vpextrb $7, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_16 |
| ; HSW-NEXT: # %bb.15: # %cond.store19 |
| ; HSW-NEXT: vextracti128 $1, %ymm0, %xmm0 |
| ; HSW-NEXT: vpextrd $3, %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_16: # %else20 |
| ; HSW-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpcmpeqd %ymm0, %ymm5, %ymm0 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $8, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_18 |
| ; HSW-NEXT: # %bb.17: # %cond.store22 |
| ; HSW-NEXT: vmovd %xmm1, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_18: # %else23 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpextrb $9, %xmm0, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_20 |
| ; HSW-NEXT: # %bb.19: # %cond.store25 |
| ; HSW-NEXT: vpextrd $1, %xmm1, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_20: # %else26 |
| ; HSW-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpcmpeqd %ymm0, %ymm5, %ymm0 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $10, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_22 |
| ; HSW-NEXT: # %bb.21: # %cond.store28 |
| ; HSW-NEXT: vpextrd $2, %xmm1, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_22: # %else29 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpextrb $11, %xmm0, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_24 |
| ; HSW-NEXT: # %bb.23: # %cond.store31 |
| ; HSW-NEXT: vpextrd $3, %xmm1, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_24: # %else32 |
| ; HSW-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpcmpeqd %ymm0, %ymm5, %ymm0 |
| ; HSW-NEXT: vextracti128 $1, %ymm0, %xmm0 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $12, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_26 |
| ; HSW-NEXT: # %bb.25: # %cond.store34 |
| ; HSW-NEXT: vextracti128 $1, %ymm1, %xmm4 |
| ; HSW-NEXT: vmovd %xmm4, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_26: # %else35 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpextrb $13, %xmm0, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_28 |
| ; HSW-NEXT: # %bb.27: # %cond.store37 |
| ; HSW-NEXT: vextracti128 $1, %ymm1, %xmm0 |
| ; HSW-NEXT: vpextrd $1, %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_28: # %else38 |
| ; HSW-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpcmpeqd %ymm0, %ymm5, %ymm0 |
| ; HSW-NEXT: vextracti128 $1, %ymm0, %xmm0 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm4 |
| ; HSW-NEXT: vpacksswb %xmm4, %xmm0, %xmm4 |
| ; HSW-NEXT: vpextrb $14, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_30 |
| ; HSW-NEXT: # %bb.29: # %cond.store40 |
| ; HSW-NEXT: vextracti128 $1, %ymm1, %xmm4 |
| ; HSW-NEXT: vpextrd $2, %xmm4, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_30: # %else41 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpextrb $15, %xmm0, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_32 |
| ; HSW-NEXT: # %bb.31: # %cond.store43 |
| ; HSW-NEXT: vextracti128 $1, %ymm1, %xmm0 |
| ; HSW-NEXT: vpextrd $3, %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_32: # %else44 |
| ; HSW-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpcmpeqd %ymm0, %ymm6, %ymm0 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm1 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm1, %xmm1 |
| ; HSW-NEXT: vpextrb $0, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_34 |
| ; HSW-NEXT: # %bb.33: # %cond.store46 |
| ; HSW-NEXT: vmovd %xmm2, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_34: # %else47 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpextrb $1, %xmm0, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_36 |
| ; HSW-NEXT: # %bb.35: # %cond.store49 |
| ; HSW-NEXT: vpextrd $1, %xmm2, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_36: # %else50 |
| ; HSW-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpcmpeqd %ymm0, %ymm6, %ymm0 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm1 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm1, %xmm1 |
| ; HSW-NEXT: vpextrb $2, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_38 |
| ; HSW-NEXT: # %bb.37: # %cond.store52 |
| ; HSW-NEXT: vpextrd $2, %xmm2, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_38: # %else53 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpextrb $3, %xmm0, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_40 |
| ; HSW-NEXT: # %bb.39: # %cond.store55 |
| ; HSW-NEXT: vpextrd $3, %xmm2, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_40: # %else56 |
| ; HSW-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpcmpeqd %ymm0, %ymm6, %ymm0 |
| ; HSW-NEXT: vextracti128 $1, %ymm0, %xmm0 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm1 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm1, %xmm1 |
| ; HSW-NEXT: vpextrb $4, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_42 |
| ; HSW-NEXT: # %bb.41: # %cond.store58 |
| ; HSW-NEXT: vextracti128 $1, %ymm2, %xmm1 |
| ; HSW-NEXT: vmovd %xmm1, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_42: # %else59 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpextrb $5, %xmm0, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_44 |
| ; HSW-NEXT: # %bb.43: # %cond.store61 |
| ; HSW-NEXT: vextracti128 $1, %ymm2, %xmm0 |
| ; HSW-NEXT: vpextrd $1, %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_44: # %else62 |
| ; HSW-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpcmpeqd %ymm0, %ymm6, %ymm0 |
| ; HSW-NEXT: vextracti128 $1, %ymm0, %xmm0 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm1 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm1, %xmm1 |
| ; HSW-NEXT: vpextrb $6, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_46 |
| ; HSW-NEXT: # %bb.45: # %cond.store64 |
| ; HSW-NEXT: vextracti128 $1, %ymm2, %xmm1 |
| ; HSW-NEXT: vpextrd $2, %xmm1, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_46: # %else65 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpextrb $7, %xmm0, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_48 |
| ; HSW-NEXT: # %bb.47: # %cond.store67 |
| ; HSW-NEXT: vextracti128 $1, %ymm2, %xmm0 |
| ; HSW-NEXT: vpextrd $3, %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_48: # %else68 |
| ; HSW-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpcmpeqd %ymm0, %ymm7, %ymm0 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm1 |
| ; HSW-NEXT: vpacksswb %xmm1, %xmm0, %xmm1 |
| ; HSW-NEXT: vpextrb $8, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_50 |
| ; HSW-NEXT: # %bb.49: # %cond.store70 |
| ; HSW-NEXT: vmovd %xmm3, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_50: # %else71 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpextrb $9, %xmm0, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_52 |
| ; HSW-NEXT: # %bb.51: # %cond.store73 |
| ; HSW-NEXT: vpextrd $1, %xmm3, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_52: # %else74 |
| ; HSW-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpcmpeqd %ymm0, %ymm7, %ymm0 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm1 |
| ; HSW-NEXT: vpacksswb %xmm1, %xmm0, %xmm1 |
| ; HSW-NEXT: vpextrb $10, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_54 |
| ; HSW-NEXT: # %bb.53: # %cond.store76 |
| ; HSW-NEXT: vpextrd $2, %xmm3, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_54: # %else77 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpextrb $11, %xmm0, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_56 |
| ; HSW-NEXT: # %bb.55: # %cond.store79 |
| ; HSW-NEXT: vpextrd $3, %xmm3, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_56: # %else80 |
| ; HSW-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpcmpeqd %ymm0, %ymm7, %ymm0 |
| ; HSW-NEXT: vextracti128 $1, %ymm0, %xmm0 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm1 |
| ; HSW-NEXT: vpacksswb %xmm1, %xmm0, %xmm1 |
| ; HSW-NEXT: vpextrb $12, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_58 |
| ; HSW-NEXT: # %bb.57: # %cond.store82 |
| ; HSW-NEXT: vextracti128 $1, %ymm3, %xmm1 |
| ; HSW-NEXT: vmovd %xmm1, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_58: # %else83 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpextrb $13, %xmm0, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_60 |
| ; HSW-NEXT: # %bb.59: # %cond.store85 |
| ; HSW-NEXT: vextracti128 $1, %ymm3, %xmm0 |
| ; HSW-NEXT: vpextrd $1, %xmm0, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_60: # %else86 |
| ; HSW-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpcmpeqd %ymm0, %ymm7, %ymm0 |
| ; HSW-NEXT: vextracti128 $1, %ymm0, %xmm0 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm1 |
| ; HSW-NEXT: vpacksswb %xmm1, %xmm0, %xmm1 |
| ; HSW-NEXT: vpextrb $14, %xmm1, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_62 |
| ; HSW-NEXT: # %bb.61: # %cond.store88 |
| ; HSW-NEXT: vextracti128 $1, %ymm3, %xmm1 |
| ; HSW-NEXT: vpextrd $2, %xmm1, (%rdi) |
| ; HSW-NEXT: addq $4, %rdi |
| ; HSW-NEXT: .LBB8_62: # %else89 |
| ; HSW-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 |
| ; HSW-NEXT: vpextrb $15, %xmm0, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB8_64 |
| ; HSW-NEXT: # %bb.63: # %cond.store91 |
| ; HSW-NEXT: vextracti128 $1, %ymm3, %xmm0 |
| ; HSW-NEXT: vpextrd $3, %xmm0, (%rdi) |
| ; HSW-NEXT: .LBB8_64: # %else92 |
| ; HSW-NEXT: vzeroupper |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: compressstore_v32f32_v32i32: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: vptestnmd %zmm3, %zmm3, %k1 |
| ; SKX-NEXT: vptestnmd %zmm2, %zmm2, %k2 |
| ; SKX-NEXT: kmovw %k2, %eax |
| ; SKX-NEXT: popcntl %eax, %eax |
| ; SKX-NEXT: vcompressps %zmm1, (%rdi,%rax,4) {%k1} |
| ; SKX-NEXT: vcompressps %zmm0, (%rdi) {%k2} |
| ; SKX-NEXT: vzeroupper |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: compressstore_v32f32_v32i32: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: vptestnmd %zmm3, %zmm3, %k1 |
| ; KNL-NEXT: vptestnmd %zmm2, %zmm2, %k2 |
| ; KNL-NEXT: kmovw %k2, %eax |
| ; KNL-NEXT: popcntl %eax, %eax |
| ; KNL-NEXT: vcompressps %zmm1, (%rdi,%rax,4) {%k1} |
| ; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k2} |
| ; KNL-NEXT: retq |
| %mask = icmp eq <32 x i32> %trigger, zeroinitializer |
| call void @llvm.masked.compressstore.v32f32(<32 x float> %V, float* %base, <32 x i1> %mask) |
| ret void |
| } |
| |
| define void @compressstore_v16f64_v16i1(double* %base, <16 x double> %V, <16 x i1> %mask) { |
| ; HSW-LABEL: compressstore_v16f64_v16i1: |
| ; HSW: # %bb.0: |
| ; HSW-NEXT: vpextrb $0, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB9_2 |
| ; HSW-NEXT: # %bb.1: # %cond.store |
| ; HSW-NEXT: vmovlpd %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB9_2: # %else |
| ; HSW-NEXT: vpextrb $1, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB9_4 |
| ; HSW-NEXT: # %bb.3: # %cond.store1 |
| ; HSW-NEXT: vmovhpd %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB9_4: # %else2 |
| ; HSW-NEXT: vpextrb $2, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB9_6 |
| ; HSW-NEXT: # %bb.5: # %cond.store4 |
| ; HSW-NEXT: vextractf128 $1, %ymm0, %xmm5 |
| ; HSW-NEXT: vmovlpd %xmm5, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB9_6: # %else5 |
| ; HSW-NEXT: vpextrb $3, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB9_8 |
| ; HSW-NEXT: # %bb.7: # %cond.store7 |
| ; HSW-NEXT: vextractf128 $1, %ymm0, %xmm0 |
| ; HSW-NEXT: vmovhpd %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB9_8: # %else8 |
| ; HSW-NEXT: vpextrb $4, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB9_10 |
| ; HSW-NEXT: # %bb.9: # %cond.store10 |
| ; HSW-NEXT: vmovlpd %xmm1, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB9_10: # %else11 |
| ; HSW-NEXT: vpextrb $5, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB9_12 |
| ; HSW-NEXT: # %bb.11: # %cond.store13 |
| ; HSW-NEXT: vmovhpd %xmm1, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB9_12: # %else14 |
| ; HSW-NEXT: vpextrb $6, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB9_14 |
| ; HSW-NEXT: # %bb.13: # %cond.store16 |
| ; HSW-NEXT: vextractf128 $1, %ymm1, %xmm0 |
| ; HSW-NEXT: vmovlpd %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB9_14: # %else17 |
| ; HSW-NEXT: vpextrb $7, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB9_16 |
| ; HSW-NEXT: # %bb.15: # %cond.store19 |
| ; HSW-NEXT: vextractf128 $1, %ymm1, %xmm0 |
| ; HSW-NEXT: vmovhpd %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB9_16: # %else20 |
| ; HSW-NEXT: vpextrb $8, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB9_18 |
| ; HSW-NEXT: # %bb.17: # %cond.store22 |
| ; HSW-NEXT: vmovlpd %xmm2, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB9_18: # %else23 |
| ; HSW-NEXT: vpextrb $9, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB9_20 |
| ; HSW-NEXT: # %bb.19: # %cond.store25 |
| ; HSW-NEXT: vmovhpd %xmm2, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB9_20: # %else26 |
| ; HSW-NEXT: vpextrb $10, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB9_22 |
| ; HSW-NEXT: # %bb.21: # %cond.store28 |
| ; HSW-NEXT: vextractf128 $1, %ymm2, %xmm0 |
| ; HSW-NEXT: vmovlpd %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB9_22: # %else29 |
| ; HSW-NEXT: vpextrb $11, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB9_24 |
| ; HSW-NEXT: # %bb.23: # %cond.store31 |
| ; HSW-NEXT: vextractf128 $1, %ymm2, %xmm0 |
| ; HSW-NEXT: vmovhpd %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB9_24: # %else32 |
| ; HSW-NEXT: vpextrb $12, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB9_26 |
| ; HSW-NEXT: # %bb.25: # %cond.store34 |
| ; HSW-NEXT: vmovlpd %xmm3, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB9_26: # %else35 |
| ; HSW-NEXT: vpextrb $13, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB9_28 |
| ; HSW-NEXT: # %bb.27: # %cond.store37 |
| ; HSW-NEXT: vmovhpd %xmm3, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB9_28: # %else38 |
| ; HSW-NEXT: vpextrb $14, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB9_30 |
| ; HSW-NEXT: # %bb.29: # %cond.store40 |
| ; HSW-NEXT: vextractf128 $1, %ymm3, %xmm0 |
| ; HSW-NEXT: vmovlpd %xmm0, (%rdi) |
| ; HSW-NEXT: addq $8, %rdi |
| ; HSW-NEXT: .LBB9_30: # %else41 |
| ; HSW-NEXT: vpextrb $15, %xmm4, %eax |
| ; HSW-NEXT: testb $1, %al |
| ; HSW-NEXT: je .LBB9_32 |
| ; HSW-NEXT: # %bb.31: # %cond.store43 |
| ; HSW-NEXT: vextractf128 $1, %ymm3, %xmm0 |
| ; HSW-NEXT: vmovhpd %xmm0, (%rdi) |
| ; HSW-NEXT: .LBB9_32: # %else44 |
| ; HSW-NEXT: vzeroupper |
| ; HSW-NEXT: retq |
| ; |
| ; SKX-LABEL: compressstore_v16f64_v16i1: |
| ; SKX: # %bb.0: |
| ; SKX-NEXT: vpsllw $7, %xmm2, %xmm2 |
| ; SKX-NEXT: vpmovb2m %xmm2, %k1 |
| ; SKX-NEXT: kshiftrw $8, %k1, %k2 |
| ; SKX-NEXT: kmovb %k1, %eax |
| ; SKX-NEXT: popcntl %eax, %eax |
| ; SKX-NEXT: vcompresspd %zmm1, (%rdi,%rax,8) {%k2} |
| ; SKX-NEXT: vcompresspd %zmm0, (%rdi) {%k1} |
| ; SKX-NEXT: vzeroupper |
| ; SKX-NEXT: retq |
| ; |
| ; KNL-LABEL: compressstore_v16f64_v16i1: |
| ; KNL: # %bb.0: |
| ; KNL-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero |
| ; KNL-NEXT: vpslld $31, %zmm2, %zmm2 |
| ; KNL-NEXT: vptestmd %zmm2, %zmm2, %k1 |
| ; KNL-NEXT: kshiftrw $8, %k1, %k2 |
| ; KNL-NEXT: vcompresspd %zmm0, (%rdi) {%k1} |
| ; KNL-NEXT: kmovw %k1, %eax |
| ; KNL-NEXT: movzbl %al, %eax |
| ; KNL-NEXT: popcntl %eax, %eax |
| ; KNL-NEXT: vcompresspd %zmm1, (%rdi,%rax,8) {%k2} |
| ; KNL-NEXT: retq |
| call void @llvm.masked.compressstore.v16f64(<16 x double> %V, double* %base, <16 x i1> %mask) |
| ret void |
| } |
| |
| declare void @llvm.masked.compressstore.v16f64(<16 x double>, double*, <16 x i1>) |
| declare void @llvm.masked.compressstore.v8f64(<8 x double>, double*, <8 x i1>) |
| declare void @llvm.masked.compressstore.v4f64(<4 x double>, double*, <4 x i1>) |
| declare void @llvm.masked.compressstore.v2f64(<2 x double>, double*, <2 x i1>) |
| declare void @llvm.masked.compressstore.v1f64(<1 x double>, double*, <1 x i1>) |
| |
| declare void @llvm.masked.compressstore.v32f32(<32 x float>, float*, <32 x i1>) |
| declare void @llvm.masked.compressstore.v16f32(<16 x float>, float*, <16 x i1>) |
| declare void @llvm.masked.compressstore.v8f32(<8 x float>, float*, <8 x i1>) |
| declare void @llvm.masked.compressstore.v4f32(<4 x float>, float*, <4 x i1>) |
| declare void @llvm.masked.compressstore.v2f32(<2 x float>, float*, <2 x i1>) |
| |
| declare void @llvm.masked.compressstore.v8i64(<8 x i64>, i64*, <8 x i1>) |
| declare void @llvm.masked.compressstore.v4i64(<4 x i64>, i64*, <4 x i1>) |
| declare void @llvm.masked.compressstore.v2i64(<2 x i64>, i64*, <2 x i1>) |
| declare void @llvm.masked.compressstore.v1i64(<1 x i64>, i64*, <1 x i1>) |
| |
| declare void @llvm.masked.compressstore.v16i32(<16 x i32>, i32*, <16 x i1>) |
| declare void @llvm.masked.compressstore.v8i32(<8 x i32>, i32*, <8 x i1>) |
| declare void @llvm.masked.compressstore.v4i32(<4 x i32>, i32*, <4 x i1>) |
| declare void @llvm.masked.compressstore.v2i32(<2 x i32>, i32*, <2 x i1>) |
| |
| declare void @llvm.masked.compressstore.v32i16(<32 x i16>, i16*, <32 x i1>) |
| declare void @llvm.masked.compressstore.v16i16(<16 x i16>, i16*, <16 x i1>) |
| declare void @llvm.masked.compressstore.v8i16(<8 x i16>, i16*, <8 x i1>) |
| declare void @llvm.masked.compressstore.v4i16(<4 x i16>, i16*, <4 x i1>) |
| |
| declare void @llvm.masked.compressstore.v64i8(<64 x i8>, i8*, <64 x i1>) |
| declare void @llvm.masked.compressstore.v32i8(<32 x i8>, i8*, <32 x i1>) |
| declare void @llvm.masked.compressstore.v16i8(<16 x i8>, i8*, <16 x i1>) |
| declare void @llvm.masked.compressstore.v8i8(<8 x i8>, i8*, <8 x i1>) |