| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvfh | FileCheck %s --check-prefixes=CHECK,RV32 |
| ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvfh | FileCheck %s --check-prefixes=CHECK,RV64 |
| |
| ; Integers |
| |
| define {<16 x i1>, <16 x i1>} @vector_deinterleave_v16i1_v32i1(<32 x i1> %vec) { |
| ; CHECK-LABEL: vector_deinterleave_v16i1_v32i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.v.i v9, 0 |
| ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v8, v0, 2 |
| ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
| ; CHECK-NEXT: vmerge.vim v10, v9, 1, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmerge.vim v8, v9, 1, v0 |
| ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v9, v10, 0 |
| ; CHECK-NEXT: vnsrl.wi v11, v8, 0 |
| ; CHECK-NEXT: vnsrl.wi v10, v10, 8 |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 8 |
| ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
| ; CHECK-NEXT: vslideup.vi v9, v11, 8 |
| ; CHECK-NEXT: vslideup.vi v10, v8, 8 |
| ; CHECK-NEXT: vmsne.vi v0, v9, 0 |
| ; CHECK-NEXT: vmsne.vi v8, v10, 0 |
| ; CHECK-NEXT: ret |
| %retval = call {<16 x i1>, <16 x i1>} @llvm.vector.deinterleave2.v32i1(<32 x i1> %vec) |
| ret {<16 x i1>, <16 x i1>} %retval |
| } |
| |
| define {<16 x i8>, <16 x i8>} @vector_deinterleave_v16i8_v32i8(<32 x i8> %vec) { |
| ; CHECK-LABEL: vector_deinterleave_v16i8_v32i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v10, v8, 0 |
| ; CHECK-NEXT: vnsrl.wi v11, v8, 8 |
| ; CHECK-NEXT: vmv.v.v v8, v10 |
| ; CHECK-NEXT: vmv.v.v v9, v11 |
| ; CHECK-NEXT: ret |
| %retval = call {<16 x i8>, <16 x i8>} @llvm.vector.deinterleave2.v32i8(<32 x i8> %vec) |
| ret {<16 x i8>, <16 x i8>} %retval |
| } |
| |
| define {<8 x i16>, <8 x i16>} @vector_deinterleave_v8i16_v16i16(<16 x i16> %vec) { |
| ; CHECK-LABEL: vector_deinterleave_v8i16_v16i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v10, v8, 0 |
| ; CHECK-NEXT: vnsrl.wi v11, v8, 16 |
| ; CHECK-NEXT: vmv.v.v v8, v10 |
| ; CHECK-NEXT: vmv.v.v v9, v11 |
| ; CHECK-NEXT: ret |
| %retval = call {<8 x i16>, <8 x i16>} @llvm.vector.deinterleave2.v16i16(<16 x i16> %vec) |
| ret {<8 x i16>, <8 x i16>} %retval |
| } |
| |
| define {<4 x i32>, <4 x i32>} @vector_deinterleave_v4i32_vv8i32(<8 x i32> %vec) { |
| ; CHECK-LABEL: vector_deinterleave_v4i32_vv8i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a0, 32 |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; CHECK-NEXT: vnsrl.wx v10, v8, a0 |
| ; CHECK-NEXT: vnsrl.wi v11, v8, 0 |
| ; CHECK-NEXT: vmv.v.v v8, v11 |
| ; CHECK-NEXT: vmv.v.v v9, v10 |
| ; CHECK-NEXT: ret |
| %retval = call {<4 x i32>, <4 x i32>} @llvm.vector.deinterleave2.v8i32(<8 x i32> %vec) |
| ret {<4 x i32>, <4 x i32>} %retval |
| } |
| |
| define {<2 x i64>, <2 x i64>} @vector_deinterleave_v2i64_v4i64(<4 x i64> %vec) { |
| ; CHECK-LABEL: vector_deinterleave_v2i64_v4i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v10, v8, 2 |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; CHECK-NEXT: vmv.v.i v0, 1 |
| ; CHECK-NEXT: vmv1r.v v9, v10 |
| ; CHECK-NEXT: vslidedown.vi v9, v8, 1, v0.t |
| ; CHECK-NEXT: vslideup.vi v8, v10, 1 |
| ; CHECK-NEXT: ret |
| %retval = call {<2 x i64>, <2 x i64>} @llvm.vector.deinterleave2.v4i64(<4 x i64> %vec) |
| ret {<2 x i64>, <2 x i64>} %retval |
| } |
| |
| define {<4 x i64>, <4 x i64>} @vector_deinterleave_v4i64_v8i64(<8 x i64> %vec) { |
| ; CHECK-LABEL: vector_deinterleave_v4i64_v8i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmv.v.i v0, 8 |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v16, v8, 4 |
| ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmv.v.i v10, 2 |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv.v.i v11, 12 |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vslideup.vi v14, v16, 2 |
| ; CHECK-NEXT: vslideup.vi v14, v16, 1, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v10 |
| ; CHECK-NEXT: vslidedown.vi v12, v8, 1, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v11 |
| ; CHECK-NEXT: vmerge.vvm v12, v12, v14, v0 |
| ; CHECK-NEXT: vslidedown.vi v18, v8, 1 |
| ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmv.v.i v14, 4 |
| ; CHECK-NEXT: vmv1r.v v0, v10 |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vslidedown.vi v18, v8, 2, v0.t |
| ; CHECK-NEXT: vmv2r.v v8, v16 |
| ; CHECK-NEXT: vmv1r.v v0, v14 |
| ; CHECK-NEXT: vslideup.vi v8, v16, 1, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v11 |
| ; CHECK-NEXT: vmerge.vvm v10, v18, v8, v0 |
| ; CHECK-NEXT: vmv2r.v v8, v12 |
| ; CHECK-NEXT: ret |
| %retval = call {<4 x i64>, <4 x i64>} @llvm.vector.deinterleave2.v8i64(<8 x i64> %vec) |
| ret {<4 x i64>, <4 x i64>} %retval |
| } |
| |
| define {<8 x i64>, <8 x i64>} @vector_deinterleave_v8i64_v16i64(<16 x i64> %vec) { |
| ; CHECK-LABEL: vector_deinterleave_v8i64_v16i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a0, 85 |
| ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; CHECK-NEXT: vmv.v.i v0, -16 |
| ; CHECK-NEXT: vid.v v16 |
| ; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v24, v8, 8 |
| ; CHECK-NEXT: vmv.s.x v12, a0 |
| ; CHECK-NEXT: li a0, 170 |
| ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; CHECK-NEXT: vadd.vv v20, v16, v16 |
| ; CHECK-NEXT: vmv.s.x v21, a0 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vcompress.vm v16, v8, v12 |
| ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma |
| ; CHECK-NEXT: vadd.vi v22, v20, -8 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vcompress.vm v12, v8, v21 |
| ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma |
| ; CHECK-NEXT: vadd.vi v8, v20, -7 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu |
| ; CHECK-NEXT: vrgatherei16.vv v16, v24, v22, v0.t |
| ; CHECK-NEXT: vrgatherei16.vv v12, v24, v8, v0.t |
| ; CHECK-NEXT: vmv.v.v v8, v16 |
| ; CHECK-NEXT: ret |
| %retval = call {<8 x i64>, <8 x i64>} @llvm.vector.deinterleave2.v16i64(<16 x i64> %vec) |
| ret {<8 x i64>, <8 x i64>} %retval |
| } |
| |
| define {<2 x i32>, <2 x i32>, <2 x i32>} @vector_deinterleave3_v2i32_v6i32(<6 x i32> %v) { |
| ; CHECK-LABEL: vector_deinterleave3_v2i32_v6i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi sp, sp, -16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 1 |
| ; CHECK-NEXT: sub sp, sp, a0 |
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v10, v8, 2 |
| ; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v12, v8, 4 |
| ; CHECK-NEXT: srli a0, a0, 3 |
| ; CHECK-NEXT: add a1, a0, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma |
| ; CHECK-NEXT: vslideup.vx v8, v10, a0 |
| ; CHECK-NEXT: addi a0, sp, 16 |
| ; CHECK-NEXT: vmv1r.v v9, v12 |
| ; CHECK-NEXT: vs2r.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma |
| ; CHECK-NEXT: vlseg3e32.v v8, (a0) |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 1 |
| ; CHECK-NEXT: add sp, sp, a0 |
| ; CHECK-NEXT: .cfi_def_cfa sp, 16 |
| ; CHECK-NEXT: addi sp, sp, 16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 0 |
| ; CHECK-NEXT: ret |
| %res = call {<2 x i32>, <2 x i32>, <2 x i32>} @llvm.vector.deinterleave3.v6i32(<6 x i32> %v) |
| ret {<2 x i32>, <2 x i32>, <2 x i32>} %res |
| } |
| |
| |
| define {<2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>} @vector_deinterleave5_v2i16_v10i16(<10 x i16> %v) { |
| ; CHECK-LABEL: vector_deinterleave5_v2i16_v10i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi sp, sp, -16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 1 |
| ; CHECK-NEXT: sub sp, sp, a0 |
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: vsetivli zero, 2, e16, m1, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v10, v8, 6 |
| ; CHECK-NEXT: vslidedown.vi v11, v8, 4 |
| ; CHECK-NEXT: vslidedown.vi v12, v8, 2 |
| ; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v14, v8, 8 |
| ; CHECK-NEXT: srli a1, a0, 3 |
| ; CHECK-NEXT: srli a0, a0, 2 |
| ; CHECK-NEXT: add a2, a1, a1 |
| ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma |
| ; CHECK-NEXT: vslideup.vx v11, v10, a1 |
| ; CHECK-NEXT: vslideup.vx v8, v12, a1 |
| ; CHECK-NEXT: add a1, a0, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma |
| ; CHECK-NEXT: vslideup.vx v8, v11, a0 |
| ; CHECK-NEXT: addi a0, sp, 16 |
| ; CHECK-NEXT: vmv1r.v v9, v14 |
| ; CHECK-NEXT: vs2r.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma |
| ; CHECK-NEXT: vlseg5e16.v v8, (a0) |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 1 |
| ; CHECK-NEXT: add sp, sp, a0 |
| ; CHECK-NEXT: .cfi_def_cfa sp, 16 |
| ; CHECK-NEXT: addi sp, sp, 16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 0 |
| ; CHECK-NEXT: ret |
| %res = call {<2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>} @llvm.vector.deinterleave5.v10i16(<10 x i16> %v) |
| ret {<2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>} %res |
| } |
| |
| define {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} @vector_deinterleave7_v14i8_v2i8(<14 x i8> %v) { |
| ; RV32-LABEL: vector_deinterleave7_v14i8_v2i8: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -48 |
| ; RV32-NEXT: .cfi_def_cfa_offset 48 |
| ; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: .cfi_offset s0, -8 |
| ; RV32-NEXT: .cfi_offset s1, -12 |
| ; RV32-NEXT: csrr a0, vlenb |
| ; RV32-NEXT: slli a0, a0, 2 |
| ; RV32-NEXT: sub sp, sp, a0 |
| ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb |
| ; RV32-NEXT: addi a0, sp, 32 |
| ; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill |
| ; RV32-NEXT: csrr s1, vlenb |
| ; RV32-NEXT: vsetivli zero, 2, e8, m1, ta, ma |
| ; RV32-NEXT: vslidedown.vi v11, v8, 10 |
| ; RV32-NEXT: vslidedown.vi v10, v8, 8 |
| ; RV32-NEXT: vslidedown.vi v9, v8, 2 |
| ; RV32-NEXT: srli s0, s1, 3 |
| ; RV32-NEXT: add a0, s0, s0 |
| ; RV32-NEXT: vsetvli zero, a0, e8, mf2, tu, ma |
| ; RV32-NEXT: vslideup.vx v10, v11, s0 |
| ; RV32-NEXT: vmv1r.v v11, v8 |
| ; RV32-NEXT: vslideup.vx v11, v9, s0 |
| ; RV32-NEXT: vsetivli zero, 2, e8, m1, ta, ma |
| ; RV32-NEXT: vslidedown.vi v9, v8, 12 |
| ; RV32-NEXT: srli a0, s1, 2 |
| ; RV32-NEXT: add a1, a0, s0 |
| ; RV32-NEXT: vsetvli zero, a1, e8, mf2, tu, ma |
| ; RV32-NEXT: vslideup.vx v10, v9, a0 |
| ; RV32-NEXT: csrr a2, vlenb |
| ; RV32-NEXT: slli a2, a2, 1 |
| ; RV32-NEXT: add a2, sp, a2 |
| ; RV32-NEXT: addi a2, a2, 32 |
| ; RV32-NEXT: vs1r.v v10, (a2) # Unknown-size Folded Spill |
| ; RV32-NEXT: vsetivli zero, 2, e8, m1, ta, ma |
| ; RV32-NEXT: vslidedown.vi v9, v8, 4 |
| ; RV32-NEXT: vsetvli zero, a1, e8, mf2, tu, ma |
| ; RV32-NEXT: vslideup.vx v11, v9, a0 |
| ; RV32-NEXT: csrr a0, vlenb |
| ; RV32-NEXT: add a0, sp, a0 |
| ; RV32-NEXT: addi a0, a0, 32 |
| ; RV32-NEXT: vs1r.v v11, (a0) # Unknown-size Folded Spill |
| ; RV32-NEXT: li a1, 3 |
| ; RV32-NEXT: mv a0, s0 |
| ; RV32-NEXT: call __mulsi3 |
| ; RV32-NEXT: add s0, a0, s0 |
| ; RV32-NEXT: addi a1, sp, 32 |
| ; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload |
| ; RV32-NEXT: vsetivli zero, 2, e8, m1, ta, ma |
| ; RV32-NEXT: vslidedown.vi v8, v8, 6 |
| ; RV32-NEXT: srli s1, s1, 1 |
| ; RV32-NEXT: csrr a1, vlenb |
| ; RV32-NEXT: add a1, sp, a1 |
| ; RV32-NEXT: addi a1, a1, 32 |
| ; RV32-NEXT: vl1r.v v9, (a1) # Unknown-size Folded Reload |
| ; RV32-NEXT: vsetvli zero, s0, e8, mf2, ta, ma |
| ; RV32-NEXT: vslideup.vx v9, v8, a0 |
| ; RV32-NEXT: add a0, s1, s1 |
| ; RV32-NEXT: csrr a1, vlenb |
| ; RV32-NEXT: slli a1, a1, 1 |
| ; RV32-NEXT: add a1, sp, a1 |
| ; RV32-NEXT: addi a1, a1, 32 |
| ; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload |
| ; RV32-NEXT: vsetvli zero, a0, e8, m1, ta, ma |
| ; RV32-NEXT: vslideup.vx v9, v8, s1 |
| ; RV32-NEXT: csrr a0, vlenb |
| ; RV32-NEXT: slli a1, a0, 1 |
| ; RV32-NEXT: add a0, a1, a0 |
| ; RV32-NEXT: add a0, sp, a0 |
| ; RV32-NEXT: addi a0, a0, 32 |
| ; RV32-NEXT: vs1r.v v9, (a0) |
| ; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma |
| ; RV32-NEXT: vlseg7e8.v v8, (a0) |
| ; RV32-NEXT: csrr a0, vlenb |
| ; RV32-NEXT: slli a0, a0, 2 |
| ; RV32-NEXT: add sp, sp, a0 |
| ; RV32-NEXT: .cfi_def_cfa sp, 48 |
| ; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: .cfi_restore ra |
| ; RV32-NEXT: .cfi_restore s0 |
| ; RV32-NEXT: .cfi_restore s1 |
| ; RV32-NEXT: addi sp, sp, 48 |
| ; RV32-NEXT: .cfi_def_cfa_offset 0 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vector_deinterleave7_v14i8_v2i8: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: addi sp, sp, -64 |
| ; RV64-NEXT: .cfi_def_cfa_offset 64 |
| ; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: .cfi_offset s0, -16 |
| ; RV64-NEXT: .cfi_offset s1, -24 |
| ; RV64-NEXT: csrr a0, vlenb |
| ; RV64-NEXT: slli a0, a0, 2 |
| ; RV64-NEXT: sub sp, sp, a0 |
| ; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb |
| ; RV64-NEXT: addi a0, sp, 32 |
| ; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill |
| ; RV64-NEXT: csrr s1, vlenb |
| ; RV64-NEXT: vsetivli zero, 2, e8, m1, ta, ma |
| ; RV64-NEXT: vslidedown.vi v11, v8, 10 |
| ; RV64-NEXT: vslidedown.vi v10, v8, 8 |
| ; RV64-NEXT: vslidedown.vi v9, v8, 2 |
| ; RV64-NEXT: srli s0, s1, 3 |
| ; RV64-NEXT: add a0, s0, s0 |
| ; RV64-NEXT: vsetvli zero, a0, e8, mf2, tu, ma |
| ; RV64-NEXT: vslideup.vx v10, v11, s0 |
| ; RV64-NEXT: vmv1r.v v11, v8 |
| ; RV64-NEXT: vslideup.vx v11, v9, s0 |
| ; RV64-NEXT: vsetivli zero, 2, e8, m1, ta, ma |
| ; RV64-NEXT: vslidedown.vi v9, v8, 12 |
| ; RV64-NEXT: srli a0, s1, 2 |
| ; RV64-NEXT: add a1, a0, s0 |
| ; RV64-NEXT: vsetvli zero, a1, e8, mf2, tu, ma |
| ; RV64-NEXT: vslideup.vx v10, v9, a0 |
| ; RV64-NEXT: csrr a2, vlenb |
| ; RV64-NEXT: slli a2, a2, 1 |
| ; RV64-NEXT: add a2, sp, a2 |
| ; RV64-NEXT: addi a2, a2, 32 |
| ; RV64-NEXT: vs1r.v v10, (a2) # Unknown-size Folded Spill |
| ; RV64-NEXT: vsetivli zero, 2, e8, m1, ta, ma |
| ; RV64-NEXT: vslidedown.vi v9, v8, 4 |
| ; RV64-NEXT: vsetvli zero, a1, e8, mf2, tu, ma |
| ; RV64-NEXT: vslideup.vx v11, v9, a0 |
| ; RV64-NEXT: csrr a0, vlenb |
| ; RV64-NEXT: add a0, sp, a0 |
| ; RV64-NEXT: addi a0, a0, 32 |
| ; RV64-NEXT: vs1r.v v11, (a0) # Unknown-size Folded Spill |
| ; RV64-NEXT: li a1, 3 |
| ; RV64-NEXT: mv a0, s0 |
| ; RV64-NEXT: call __muldi3 |
| ; RV64-NEXT: add s0, a0, s0 |
| ; RV64-NEXT: addi a1, sp, 32 |
| ; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload |
| ; RV64-NEXT: vsetivli zero, 2, e8, m1, ta, ma |
| ; RV64-NEXT: vslidedown.vi v8, v8, 6 |
| ; RV64-NEXT: srli s1, s1, 1 |
| ; RV64-NEXT: csrr a1, vlenb |
| ; RV64-NEXT: add a1, sp, a1 |
| ; RV64-NEXT: addi a1, a1, 32 |
| ; RV64-NEXT: vl1r.v v9, (a1) # Unknown-size Folded Reload |
| ; RV64-NEXT: vsetvli zero, s0, e8, mf2, ta, ma |
| ; RV64-NEXT: vslideup.vx v9, v8, a0 |
| ; RV64-NEXT: add a0, s1, s1 |
| ; RV64-NEXT: csrr a1, vlenb |
| ; RV64-NEXT: slli a1, a1, 1 |
| ; RV64-NEXT: add a1, sp, a1 |
| ; RV64-NEXT: addi a1, a1, 32 |
| ; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload |
| ; RV64-NEXT: vsetvli zero, a0, e8, m1, ta, ma |
| ; RV64-NEXT: vslideup.vx v9, v8, s1 |
| ; RV64-NEXT: csrr a0, vlenb |
| ; RV64-NEXT: slli a1, a0, 1 |
| ; RV64-NEXT: add a0, a1, a0 |
| ; RV64-NEXT: add a0, sp, a0 |
| ; RV64-NEXT: addi a0, a0, 32 |
| ; RV64-NEXT: vs1r.v v9, (a0) |
| ; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma |
| ; RV64-NEXT: vlseg7e8.v v8, (a0) |
| ; RV64-NEXT: csrr a0, vlenb |
| ; RV64-NEXT: slli a0, a0, 2 |
| ; RV64-NEXT: add sp, sp, a0 |
| ; RV64-NEXT: .cfi_def_cfa sp, 64 |
| ; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: .cfi_restore ra |
| ; RV64-NEXT: .cfi_restore s0 |
| ; RV64-NEXT: .cfi_restore s1 |
| ; RV64-NEXT: addi sp, sp, 64 |
| ; RV64-NEXT: .cfi_def_cfa_offset 0 |
| ; RV64-NEXT: ret |
| %res = call {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} @llvm.vector.deinterleave7.v14i8(<14 x i8> %v) |
| ret {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} %res |
| } |
| |
| |
| ; Floats |
| |
| define {<2 x half>, <2 x half>} @vector_deinterleave_v2f16_v4f16(<4 x half> %vec) { |
| ; CHECK-LABEL: vector_deinterleave_v2f16_v4f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v10, v8, 0 |
| ; CHECK-NEXT: vnsrl.wi v9, v8, 16 |
| ; CHECK-NEXT: vmv1r.v v8, v10 |
| ; CHECK-NEXT: ret |
| %retval = call {<2 x half>, <2 x half>} @llvm.vector.deinterleave2.v4f16(<4 x half> %vec) |
| ret {<2 x half>, <2 x half>} %retval |
| } |
| |
| define {<4 x half>, <4 x half>} @vector_deinterleave_v4f16_v8f16(<8 x half> %vec) { |
| ; CHECK-LABEL: vector_deinterleave_v4f16_v8f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v10, v8, 0 |
| ; CHECK-NEXT: vnsrl.wi v9, v8, 16 |
| ; CHECK-NEXT: vmv1r.v v8, v10 |
| ; CHECK-NEXT: ret |
| %retval = call {<4 x half>, <4 x half>} @llvm.vector.deinterleave2.v8f16(<8 x half> %vec) |
| ret {<4 x half>, <4 x half>} %retval |
| } |
| |
| define {<2 x float>, <2 x float>} @vector_deinterleave_v2f32_v4f32(<4 x float> %vec) { |
| ; CHECK-LABEL: vector_deinterleave_v2f32_v4f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a0, 32 |
| ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
| ; CHECK-NEXT: vnsrl.wx v9, v8, a0 |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0 |
| ; CHECK-NEXT: ret |
| %retval = call {<2 x float>, <2 x float>} @llvm.vector.deinterleave2.v4f32(<4 x float> %vec) |
| ret {<2 x float>, <2 x float>} %retval |
| } |
| |
| define {<8 x half>, <8 x half>} @vector_deinterleave_v8f16_v16f16(<16 x half> %vec) { |
| ; CHECK-LABEL: vector_deinterleave_v8f16_v16f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v10, v8, 0 |
| ; CHECK-NEXT: vnsrl.wi v11, v8, 16 |
| ; CHECK-NEXT: vmv.v.v v8, v10 |
| ; CHECK-NEXT: vmv.v.v v9, v11 |
| ; CHECK-NEXT: ret |
| %retval = call {<8 x half>, <8 x half>} @llvm.vector.deinterleave2.v16f16(<16 x half> %vec) |
| ret {<8 x half>, <8 x half>} %retval |
| } |
| |
| define {<4 x float>, <4 x float>} @vector_deinterleave_v4f32_v8f32(<8 x float> %vec) { |
| ; CHECK-LABEL: vector_deinterleave_v4f32_v8f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a0, 32 |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; CHECK-NEXT: vnsrl.wx v10, v8, a0 |
| ; CHECK-NEXT: vnsrl.wi v11, v8, 0 |
| ; CHECK-NEXT: vmv.v.v v8, v11 |
| ; CHECK-NEXT: vmv.v.v v9, v10 |
| ; CHECK-NEXT: ret |
| %retval = call {<4 x float>, <4 x float>} @llvm.vector.deinterleave2.v8f32(<8 x float> %vec) |
| ret {<4 x float>, <4 x float>} %retval |
| } |
| |
| define {<2 x double>, <2 x double>} @vector_deinterleave_v2f64_v4f64(<4 x double> %vec) { |
| ; CHECK-LABEL: vector_deinterleave_v2f64_v4f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v10, v8, 2 |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; CHECK-NEXT: vmv.v.i v0, 1 |
| ; CHECK-NEXT: vmv1r.v v9, v10 |
| ; CHECK-NEXT: vslidedown.vi v9, v8, 1, v0.t |
| ; CHECK-NEXT: vslideup.vi v8, v10, 1 |
| ; CHECK-NEXT: ret |
| %retval = call {<2 x double>, <2 x double>} @llvm.vector.deinterleave2.v4f64(<4 x double> %vec) |
| ret {<2 x double>, <2 x double>} %retval |
| } |
| |
| define {<4 x double>, <4 x double>} @vector_deinterleave_v4f64_v8f64(<8 x double> %vec) { |
| ; CHECK-LABEL: vector_deinterleave_v4f64_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmv.v.i v0, 8 |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v16, v8, 4 |
| ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmv.v.i v10, 2 |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv.v.i v11, 12 |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vslideup.vi v14, v16, 2 |
| ; CHECK-NEXT: vslideup.vi v14, v16, 1, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v10 |
| ; CHECK-NEXT: vslidedown.vi v12, v8, 1, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v11 |
| ; CHECK-NEXT: vmerge.vvm v12, v12, v14, v0 |
| ; CHECK-NEXT: vslidedown.vi v18, v8, 1 |
| ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmv.v.i v14, 4 |
| ; CHECK-NEXT: vmv1r.v v0, v10 |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vslidedown.vi v18, v8, 2, v0.t |
| ; CHECK-NEXT: vmv2r.v v8, v16 |
| ; CHECK-NEXT: vmv1r.v v0, v14 |
| ; CHECK-NEXT: vslideup.vi v8, v16, 1, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v11 |
| ; CHECK-NEXT: vmerge.vvm v10, v18, v8, v0 |
| ; CHECK-NEXT: vmv2r.v v8, v12 |
| ; CHECK-NEXT: ret |
| %retval = call {<4 x double>, <4 x double>} @llvm.vector.deinterleave2.v8f64(<8 x double> %vec) |
| ret {<4 x double>, <4 x double>} %retval |
| } |
| |
| define {<2 x float>, <2 x float>, <2 x float>} @vector_deinterleave3_v632_v2f32(<6 x float> %v) { |
| ; CHECK-LABEL: vector_deinterleave3_v632_v2f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi sp, sp, -16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 1 |
| ; CHECK-NEXT: sub sp, sp, a0 |
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v10, v8, 2 |
| ; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v12, v8, 4 |
| ; CHECK-NEXT: srli a0, a0, 3 |
| ; CHECK-NEXT: add a1, a0, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma |
| ; CHECK-NEXT: vslideup.vx v8, v10, a0 |
| ; CHECK-NEXT: addi a0, sp, 16 |
| ; CHECK-NEXT: vmv1r.v v9, v12 |
| ; CHECK-NEXT: vs2r.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma |
| ; CHECK-NEXT: vlseg3e32.v v8, (a0) |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 1 |
| ; CHECK-NEXT: add sp, sp, a0 |
| ; CHECK-NEXT: .cfi_def_cfa sp, 16 |
| ; CHECK-NEXT: addi sp, sp, 16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 0 |
| ; CHECK-NEXT: ret |
| %res = call {<2 x float>, <2 x float>, <2 x float>} @llvm.vector.deinterleave3.v6f32(<6 x float> %v) |
| ret {<2 x float>, <2 x float>, <2 x float>} %res |
| } |
| |
| |
| define {<2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>} @vector_deinterleave5_v10f16_v2f16(<10 x half> %v) { |
| ; CHECK-LABEL: vector_deinterleave5_v10f16_v2f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi sp, sp, -16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 1 |
| ; CHECK-NEXT: sub sp, sp, a0 |
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: vsetivli zero, 2, e16, m1, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v10, v8, 6 |
| ; CHECK-NEXT: vslidedown.vi v11, v8, 4 |
| ; CHECK-NEXT: vslidedown.vi v12, v8, 2 |
| ; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v14, v8, 8 |
| ; CHECK-NEXT: srli a1, a0, 3 |
| ; CHECK-NEXT: srli a0, a0, 2 |
| ; CHECK-NEXT: add a2, a1, a1 |
| ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma |
| ; CHECK-NEXT: vslideup.vx v11, v10, a1 |
| ; CHECK-NEXT: vslideup.vx v8, v12, a1 |
| ; CHECK-NEXT: add a1, a0, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma |
| ; CHECK-NEXT: vslideup.vx v8, v11, a0 |
| ; CHECK-NEXT: addi a0, sp, 16 |
| ; CHECK-NEXT: vmv1r.v v9, v14 |
| ; CHECK-NEXT: vs2r.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma |
| ; CHECK-NEXT: vlseg5e16.v v8, (a0) |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 1 |
| ; CHECK-NEXT: add sp, sp, a0 |
| ; CHECK-NEXT: .cfi_def_cfa sp, 16 |
| ; CHECK-NEXT: addi sp, sp, 16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 0 |
| ; CHECK-NEXT: ret |
| %res = call {<2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>} @llvm.vector.deinterleave5.v10f16(<10 x half> %v) |
| ret {<2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>} %res |
| } |
| |
| define {<1 x half>, <1 x half>, <1 x half>, <1 x half>, <1 x half>, <1 x half>, <1 x half>} @vector_deinterleave7_v7f16_v1f16(<7 x half> %v) { |
| ; CHECK-LABEL: vector_deinterleave7_v7f16_v1f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi sp, sp, -16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 1 |
| ; CHECK-NEXT: sub sp, sp, a0 |
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v9, v8, 3 |
| ; CHECK-NEXT: vslidedown.vi v10, v8, 2 |
| ; CHECK-NEXT: vslidedown.vi v11, v8, 1 |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vslidedown.vi v14, v8, 5 |
| ; CHECK-NEXT: vslidedown.vi v15, v8, 6 |
| ; CHECK-NEXT: srli a1, a0, 3 |
| ; CHECK-NEXT: srli a0, a0, 2 |
| ; CHECK-NEXT: add a2, a1, a1 |
| ; CHECK-NEXT: add a3, a0, a0 |
| ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma |
| ; CHECK-NEXT: vslideup.vx v10, v9, a1 |
| ; CHECK-NEXT: vslideup.vx v12, v11, a1 |
| ; CHECK-NEXT: vsetvli zero, a3, e16, m1, ta, ma |
| ; CHECK-NEXT: vslideup.vx v12, v10, a0 |
| ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v13, v8, 4 |
| ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma |
| ; CHECK-NEXT: vslideup.vx v13, v14, a1 |
| ; CHECK-NEXT: vsetvli zero, a3, e16, m1, ta, ma |
| ; CHECK-NEXT: vslideup.vx v13, v15, a0 |
| ; CHECK-NEXT: addi a0, sp, 16 |
| ; CHECK-NEXT: vs2r.v v12, (a0) |
| ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma |
| ; CHECK-NEXT: vlseg7e16.v v8, (a0) |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 1 |
| ; CHECK-NEXT: add sp, sp, a0 |
| ; CHECK-NEXT: .cfi_def_cfa sp, 16 |
| ; CHECK-NEXT: addi sp, sp, 16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 0 |
| ; CHECK-NEXT: ret |
| %res = call {<1 x half>, <1 x half>, <1 x half>, <1 x half>, <1 x half>, <1 x half>, <1 x half>} @llvm.vector.deinterleave7.v7f16(<7 x half> %v) |
| ret {<1 x half>, <1 x half>, <1 x half>, <1 x half>, <1 x half>, <1 x half>, <1 x half>} %res |
| } |