| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 |
| ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 |
| |
| declare <2 x i7> @llvm.vp.trunc.v2i7.v2i16(<2 x i16>, <2 x i1>, i32) |
| |
| define <2 x i7> @vtrunc_v2i7_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v2i7_v2i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t |
| ; CHECK-NEXT: ret |
| %v = call <2 x i7> @llvm.vp.trunc.v2i7.v2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl) |
| ret <2 x i7> %v |
| } |
| |
| declare <2 x i8> @llvm.vp.trunc.v2i8.v2i15(<2 x i15>, <2 x i1>, i32) |
| |
| define <2 x i8> @vtrunc_v2i8_v2i15(<2 x i15> %a, <2 x i1> %m, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v2i8_v2i15: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t |
| ; CHECK-NEXT: ret |
| %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i15(<2 x i15> %a, <2 x i1> %m, i32 %vl) |
| ret <2 x i8> %v |
| } |
| |
| declare <2 x i8> @llvm.vp.trunc.v2i8.v2i16(<2 x i16>, <2 x i1>, i32) |
| |
| define <2 x i8> @vtrunc_v2i8_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v2i8_v2i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t |
| ; CHECK-NEXT: ret |
| %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl) |
| ret <2 x i8> %v |
| } |
| |
| define <2 x i8> @vtrunc_v2i8_v2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v2i8_v2i16_unmasked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0 |
| ; CHECK-NEXT: ret |
| %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i16(<2 x i16> %a, <2 x i1> <i1 true, i1 true>, i32 %vl) |
| ret <2 x i8> %v |
| } |
| |
| declare <128 x i7> @llvm.vp.trunc.v128i7.v128i16(<128 x i16>, <128 x i1>, i32) |
| |
| define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v128i7_v128i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv8r.v v24, v8 |
| ; CHECK-NEXT: li a1, 64 |
| ; CHECK-NEXT: vslidedown.vi v12, v0, 8 |
| ; CHECK-NEXT: mv a2, a0 |
| ; CHECK-NEXT: bltu a0, a1, .LBB4_2 |
| ; CHECK-NEXT: # %bb.1: |
| ; CHECK-NEXT: li a2, 64 |
| ; CHECK-NEXT: .LBB4_2: |
| ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v24, 0, v0.t |
| ; CHECK-NEXT: addi a2, a0, -64 |
| ; CHECK-NEXT: sltu a0, a0, a2 |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: and a0, a0, a2 |
| ; CHECK-NEXT: vmv1r.v v0, v12 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t |
| ; CHECK-NEXT: li a0, 128 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma |
| ; CHECK-NEXT: vslideup.vx v8, v24, a1 |
| ; CHECK-NEXT: ret |
| %v = call <128 x i7> @llvm.vp.trunc.v128i7.v128i16(<128 x i16> %a, <128 x i1> %m, i32 %vl) |
| ret <128 x i7> %v |
| } |
| |
| declare <2 x i8> @llvm.vp.trunc.v2i8.v2i32(<2 x i32>, <2 x i1>, i32) |
| |
| define <2 x i8> @vtrunc_v2i8_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v2i8_v2i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t |
| ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t |
| ; CHECK-NEXT: ret |
| %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl) |
| ret <2 x i8> %v |
| } |
| |
| define <2 x i8> @vtrunc_v2i8_v2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v2i8_v2i32_unmasked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0 |
| ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0 |
| ; CHECK-NEXT: ret |
| %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i32(<2 x i32> %a, <2 x i1> <i1 true, i1 true>, i32 %vl) |
| ret <2 x i8> %v |
| } |
| |
| declare <2 x i8> @llvm.vp.trunc.v2i8.v2i64(<2 x i64>, <2 x i1>, i32) |
| |
| define <2 x i8> @vtrunc_v2i8_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v2i8_v2i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t |
| ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t |
| ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t |
| ; CHECK-NEXT: ret |
| %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl) |
| ret <2 x i8> %v |
| } |
| |
| define <2 x i8> @vtrunc_v2i8_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v2i8_v2i64_unmasked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0 |
| ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0 |
| ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0 |
| ; CHECK-NEXT: ret |
| %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i64(<2 x i64> %a, <2 x i1> <i1 true, i1 true>, i32 %vl) |
| ret <2 x i8> %v |
| } |
| |
| declare <2 x i16> @llvm.vp.trunc.v2i16.v2i32(<2 x i32>, <2 x i1>, i32) |
| |
| define <2 x i16> @vtrunc_v2i16_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v2i16_v2i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t |
| ; CHECK-NEXT: ret |
| %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl) |
| ret <2 x i16> %v |
| } |
| |
| define <2 x i16> @vtrunc_v2i16_v2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v2i16_v2i32_unmasked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0 |
| ; CHECK-NEXT: ret |
| %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i32(<2 x i32> %a, <2 x i1> <i1 true, i1 true>, i32 %vl) |
| ret <2 x i16> %v |
| } |
| |
| declare <2 x i16> @llvm.vp.trunc.v2i16.v2i64(<2 x i64>, <2 x i1>, i32) |
| |
| define <2 x i16> @vtrunc_v2i16_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v2i16_v2i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t |
| ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t |
| ; CHECK-NEXT: ret |
| %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl) |
| ret <2 x i16> %v |
| } |
| |
| define <2 x i16> @vtrunc_v2i16_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v2i16_v2i64_unmasked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0 |
| ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0 |
| ; CHECK-NEXT: ret |
| %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i64(<2 x i64> %a, <2 x i1> <i1 true, i1 true>, i32 %vl) |
| ret <2 x i16> %v |
| } |
| |
| declare <15 x i16> @llvm.vp.trunc.v15i16.v15i64(<15 x i64>, <15 x i1>, i32) |
| |
| define <15 x i16> @vtrunc_v15i16_v15i64(<15 x i64> %a, <15 x i1> %m, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v15i16_v15i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t |
| ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t |
| ; CHECK-NEXT: ret |
| %v = call <15 x i16> @llvm.vp.trunc.v15i16.v15i64(<15 x i64> %a, <15 x i1> %m, i32 %vl) |
| ret <15 x i16> %v |
| } |
| |
| declare <2 x i32> @llvm.vp.trunc.v2i32.v2i64(<2 x i64>, <2 x i1>, i32) |
| |
| define <2 x i32> @vtrunc_v2i32_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v2i32_v2i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t |
| ; CHECK-NEXT: ret |
| %v = call <2 x i32> @llvm.vp.trunc.v2i32.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl) |
| ret <2 x i32> %v |
| } |
| |
| define <2 x i32> @vtrunc_v2i32_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v2i32_v2i64_unmasked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v8, 0 |
| ; CHECK-NEXT: ret |
| %v = call <2 x i32> @llvm.vp.trunc.v2i32.v2i64(<2 x i64> %a, <2 x i1> <i1 true, i1 true>, i32 %vl) |
| ret <2 x i32> %v |
| } |
| |
| declare <128 x i32> @llvm.vp.trunc.v128i32.v128i64(<128 x i64>, <128 x i1>, i32) |
| |
| define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 zeroext %vl) { |
| ; RV32-LABEL: vtrunc_v128i32_v128i64: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw s0, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset s0, -4 |
| ; RV32-NEXT: csrr a2, vlenb |
| ; RV32-NEXT: li a3, 72 |
| ; RV32-NEXT: mul a2, a2, a3 |
| ; RV32-NEXT: sub sp, sp, a2 |
| ; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0xc8, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 72 * vlenb |
| ; RV32-NEXT: vsetivli zero, 8, e8, m1, ta, ma |
| ; RV32-NEXT: vmv1r.v v7, v0 |
| ; RV32-NEXT: csrr a2, vlenb |
| ; RV32-NEXT: slli a2, a2, 5 |
| ; RV32-NEXT: add a2, sp, a2 |
| ; RV32-NEXT: addi a2, a2, 16 |
| ; RV32-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill |
| ; RV32-NEXT: csrr a2, vlenb |
| ; RV32-NEXT: li a3, 40 |
| ; RV32-NEXT: mul a2, a2, a3 |
| ; RV32-NEXT: add a2, sp, a2 |
| ; RV32-NEXT: addi a2, a2, 16 |
| ; RV32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill |
| ; RV32-NEXT: vslidedown.vi v5, v0, 8 |
| ; RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, ma |
| ; RV32-NEXT: vslidedown.vi v4, v0, 4 |
| ; RV32-NEXT: addi a2, a7, -64 |
| ; RV32-NEXT: vslidedown.vi v3, v5, 4 |
| ; RV32-NEXT: sltu a3, a7, a2 |
| ; RV32-NEXT: addi a3, a3, -1 |
| ; RV32-NEXT: and a4, a3, a2 |
| ; RV32-NEXT: addi a2, a4, -32 |
| ; RV32-NEXT: sltu a3, a4, a2 |
| ; RV32-NEXT: addi a3, a3, -1 |
| ; RV32-NEXT: and a3, a3, a2 |
| ; RV32-NEXT: li a2, 16 |
| ; RV32-NEXT: addi t0, a3, -16 |
| ; RV32-NEXT: mv a5, a3 |
| ; RV32-NEXT: bltu a3, a2, .LBB16_2 |
| ; RV32-NEXT: # %bb.1: |
| ; RV32-NEXT: li a5, 16 |
| ; RV32-NEXT: .LBB16_2: |
| ; RV32-NEXT: li t2, 64 |
| ; RV32-NEXT: sltu t1, a3, t0 |
| ; RV32-NEXT: mv a6, a7 |
| ; RV32-NEXT: bltu a7, t2, .LBB16_4 |
| ; RV32-NEXT: # %bb.3: |
| ; RV32-NEXT: li a6, 64 |
| ; RV32-NEXT: .LBB16_4: |
| ; RV32-NEXT: addi t2, a1, 128 |
| ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma |
| ; RV32-NEXT: vslidedown.vi v6, v4, 2 |
| ; RV32-NEXT: addi t6, a1, 512 |
| ; RV32-NEXT: addi t5, a1, 640 |
| ; RV32-NEXT: vslidedown.vi v0, v3, 2 |
| ; RV32-NEXT: addi t1, t1, -1 |
| ; RV32-NEXT: addi t3, a1, 384 |
| ; RV32-NEXT: vslidedown.vi v2, v5, 2 |
| ; RV32-NEXT: li a3, 32 |
| ; RV32-NEXT: addi t4, a6, -32 |
| ; RV32-NEXT: sltu a6, a6, t4 |
| ; RV32-NEXT: addi a6, a6, -1 |
| ; RV32-NEXT: and a6, a6, t4 |
| ; RV32-NEXT: addi t4, a6, -16 |
| ; RV32-NEXT: sltu s0, a6, t4 |
| ; RV32-NEXT: addi s0, s0, -1 |
| ; RV32-NEXT: bltu a6, a2, .LBB16_6 |
| ; RV32-NEXT: # %bb.5: |
| ; RV32-NEXT: li a6, 16 |
| ; RV32-NEXT: .LBB16_6: |
| ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma |
| ; RV32-NEXT: vle64.v v8, (t6) |
| ; RV32-NEXT: csrr t6, vlenb |
| ; RV32-NEXT: sw a0, 4(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: li a0, 56 |
| ; RV32-NEXT: mul t6, t6, a0 |
| ; RV32-NEXT: lw a0, 4(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: add t6, sp, t6 |
| ; RV32-NEXT: addi t6, t6, 16 |
| ; RV32-NEXT: vs8r.v v8, (t6) # vscale x 64-byte Folded Spill |
| ; RV32-NEXT: vle64.v v8, (t5) |
| ; RV32-NEXT: vle64.v v16, (t2) |
| ; RV32-NEXT: vle64.v v24, (a1) |
| ; RV32-NEXT: csrr t2, vlenb |
| ; RV32-NEXT: li t5, 48 |
| ; RV32-NEXT: mul t2, t2, t5 |
| ; RV32-NEXT: add t2, sp, t2 |
| ; RV32-NEXT: addi t2, t2, 16 |
| ; RV32-NEXT: vs8r.v v24, (t2) # vscale x 64-byte Folded Spill |
| ; RV32-NEXT: vle64.v v24, (t3) |
| ; RV32-NEXT: csrr t2, vlenb |
| ; RV32-NEXT: slli t2, t2, 3 |
| ; RV32-NEXT: add t2, sp, t2 |
| ; RV32-NEXT: addi t2, t2, 16 |
| ; RV32-NEXT: vs8r.v v24, (t2) # vscale x 64-byte Folded Spill |
| ; RV32-NEXT: and t2, t1, t0 |
| ; RV32-NEXT: and t1, s0, t4 |
| ; RV32-NEXT: addi a1, a1, 256 |
| ; RV32-NEXT: mv t0, a4 |
| ; RV32-NEXT: bltu a4, a3, .LBB16_8 |
| ; RV32-NEXT: # %bb.7: |
| ; RV32-NEXT: li t0, 32 |
| ; RV32-NEXT: .LBB16_8: |
| ; RV32-NEXT: vsetvli zero, t2, e32, m4, ta, ma |
| ; RV32-NEXT: vnsrl.wi v24, v8, 0, v0.t |
| ; RV32-NEXT: csrr t2, vlenb |
| ; RV32-NEXT: li t3, 24 |
| ; RV32-NEXT: mul t2, t2, t3 |
| ; RV32-NEXT: add t2, sp, t2 |
| ; RV32-NEXT: addi t2, t2, 16 |
| ; RV32-NEXT: vs8r.v v24, (t2) # vscale x 64-byte Folded Spill |
| ; RV32-NEXT: vmv1r.v v0, v3 |
| ; RV32-NEXT: csrr t2, vlenb |
| ; RV32-NEXT: li t3, 56 |
| ; RV32-NEXT: mul t2, t2, t3 |
| ; RV32-NEXT: add t2, sp, t2 |
| ; RV32-NEXT: addi t2, t2, 16 |
| ; RV32-NEXT: vl8r.v v24, (t2) # vscale x 64-byte Folded Reload |
| ; RV32-NEXT: vsetvli zero, a5, e32, m4, ta, ma |
| ; RV32-NEXT: vnsrl.wi v8, v24, 0, v0.t |
| ; RV32-NEXT: csrr a5, vlenb |
| ; RV32-NEXT: slli a5, a5, 6 |
| ; RV32-NEXT: add a5, sp, a5 |
| ; RV32-NEXT: addi a5, a5, 16 |
| ; RV32-NEXT: vs8r.v v8, (a5) # vscale x 64-byte Folded Spill |
| ; RV32-NEXT: vmv1r.v v0, v6 |
| ; RV32-NEXT: vsetvli zero, t1, e32, m4, ta, ma |
| ; RV32-NEXT: vnsrl.wi v8, v16, 0, v0.t |
| ; RV32-NEXT: csrr a5, vlenb |
| ; RV32-NEXT: slli a5, a5, 4 |
| ; RV32-NEXT: add a5, sp, a5 |
| ; RV32-NEXT: addi a5, a5, 16 |
| ; RV32-NEXT: vs8r.v v8, (a5) # vscale x 64-byte Folded Spill |
| ; RV32-NEXT: addi a5, t0, -16 |
| ; RV32-NEXT: sltu t0, t0, a5 |
| ; RV32-NEXT: addi t0, t0, -1 |
| ; RV32-NEXT: and a5, t0, a5 |
| ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma |
| ; RV32-NEXT: vle64.v v8, (a1) |
| ; RV32-NEXT: addi a1, sp, 16 |
| ; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill |
| ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma |
| ; RV32-NEXT: vslidedown.vi v30, v7, 2 |
| ; RV32-NEXT: vmv1r.v v0, v4 |
| ; RV32-NEXT: csrr a1, vlenb |
| ; RV32-NEXT: li t0, 48 |
| ; RV32-NEXT: mul a1, a1, t0 |
| ; RV32-NEXT: add a1, sp, a1 |
| ; RV32-NEXT: addi a1, a1, 16 |
| ; RV32-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload |
| ; RV32-NEXT: vsetvli zero, a6, e32, m4, ta, ma |
| ; RV32-NEXT: vnsrl.wi v8, v16, 0, v0.t |
| ; RV32-NEXT: csrr a1, vlenb |
| ; RV32-NEXT: li a6, 56 |
| ; RV32-NEXT: mul a1, a1, a6 |
| ; RV32-NEXT: add a1, sp, a1 |
| ; RV32-NEXT: addi a1, a1, 16 |
| ; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill |
| ; RV32-NEXT: vmv1r.v v0, v2 |
| ; RV32-NEXT: csrr a1, vlenb |
| ; RV32-NEXT: slli a1, a1, 3 |
| ; RV32-NEXT: add a1, sp, a1 |
| ; RV32-NEXT: addi a1, a1, 16 |
| ; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload |
| ; RV32-NEXT: vsetvli zero, a5, e32, m4, ta, ma |
| ; RV32-NEXT: vnsrl.wi v16, v8, 0, v0.t |
| ; RV32-NEXT: bltu a4, a2, .LBB16_10 |
| ; RV32-NEXT: # %bb.9: |
| ; RV32-NEXT: li a4, 16 |
| ; RV32-NEXT: .LBB16_10: |
| ; RV32-NEXT: vmv1r.v v0, v5 |
| ; RV32-NEXT: addi a1, sp, 16 |
| ; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload |
| ; RV32-NEXT: vsetvli zero, a4, e32, m4, ta, ma |
| ; RV32-NEXT: vnsrl.wi v24, v8, 0, v0.t |
| ; RV32-NEXT: csrr a1, vlenb |
| ; RV32-NEXT: li a4, 48 |
| ; RV32-NEXT: mul a1, a1, a4 |
| ; RV32-NEXT: add a1, sp, a1 |
| ; RV32-NEXT: addi a1, a1, 16 |
| ; RV32-NEXT: vs8r.v v24, (a1) # vscale x 64-byte Folded Spill |
| ; RV32-NEXT: mv a1, a7 |
| ; RV32-NEXT: bltu a7, a3, .LBB16_12 |
| ; RV32-NEXT: # %bb.11: |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: .LBB16_12: |
| ; RV32-NEXT: vmv1r.v v0, v30 |
| ; RV32-NEXT: csrr a4, vlenb |
| ; RV32-NEXT: li a5, 24 |
| ; RV32-NEXT: mul a4, a4, a5 |
| ; RV32-NEXT: add a4, sp, a4 |
| ; RV32-NEXT: addi a4, a4, 16 |
| ; RV32-NEXT: vl8r.v v24, (a4) # vscale x 64-byte Folded Reload |
| ; RV32-NEXT: vmv4r.v v8, v24 |
| ; RV32-NEXT: csrr a4, vlenb |
| ; RV32-NEXT: slli a4, a4, 4 |
| ; RV32-NEXT: add a4, sp, a4 |
| ; RV32-NEXT: addi a4, a4, 16 |
| ; RV32-NEXT: vl8r.v v24, (a4) # vscale x 64-byte Folded Reload |
| ; RV32-NEXT: csrr a4, vlenb |
| ; RV32-NEXT: li a5, 24 |
| ; RV32-NEXT: mul a4, a4, a5 |
| ; RV32-NEXT: add a4, sp, a4 |
| ; RV32-NEXT: addi a4, a4, 16 |
| ; RV32-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill |
| ; RV32-NEXT: csrr a4, vlenb |
| ; RV32-NEXT: slli a4, a4, 6 |
| ; RV32-NEXT: add a4, sp, a4 |
| ; RV32-NEXT: addi a4, a4, 16 |
| ; RV32-NEXT: vl8r.v v16, (a4) # vscale x 64-byte Folded Reload |
| ; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma |
| ; RV32-NEXT: vslideup.vi v16, v8, 16 |
| ; RV32-NEXT: csrr a4, vlenb |
| ; RV32-NEXT: slli a4, a4, 6 |
| ; RV32-NEXT: add a4, sp, a4 |
| ; RV32-NEXT: addi a4, a4, 16 |
| ; RV32-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill |
| ; RV32-NEXT: addi a4, a1, -16 |
| ; RV32-NEXT: csrr a5, vlenb |
| ; RV32-NEXT: li a6, 56 |
| ; RV32-NEXT: mul a5, a5, a6 |
| ; RV32-NEXT: add a5, sp, a5 |
| ; RV32-NEXT: addi a5, a5, 16 |
| ; RV32-NEXT: vl8r.v v16, (a5) # vscale x 64-byte Folded Reload |
| ; RV32-NEXT: vslideup.vi v16, v24, 16 |
| ; RV32-NEXT: csrr a5, vlenb |
| ; RV32-NEXT: li a6, 56 |
| ; RV32-NEXT: mul a5, a5, a6 |
| ; RV32-NEXT: add a5, sp, a5 |
| ; RV32-NEXT: addi a5, a5, 16 |
| ; RV32-NEXT: vs8r.v v16, (a5) # vscale x 64-byte Folded Spill |
| ; RV32-NEXT: csrr a5, vlenb |
| ; RV32-NEXT: li a6, 48 |
| ; RV32-NEXT: mul a5, a5, a6 |
| ; RV32-NEXT: add a5, sp, a5 |
| ; RV32-NEXT: addi a5, a5, 16 |
| ; RV32-NEXT: vl8r.v v16, (a5) # vscale x 64-byte Folded Reload |
| ; RV32-NEXT: csrr a5, vlenb |
| ; RV32-NEXT: li a6, 24 |
| ; RV32-NEXT: mul a5, a5, a6 |
| ; RV32-NEXT: add a5, sp, a5 |
| ; RV32-NEXT: addi a5, a5, 16 |
| ; RV32-NEXT: vl8r.v v8, (a5) # vscale x 64-byte Folded Reload |
| ; RV32-NEXT: vslideup.vi v16, v8, 16 |
| ; RV32-NEXT: csrr a5, vlenb |
| ; RV32-NEXT: li a6, 48 |
| ; RV32-NEXT: mul a5, a5, a6 |
| ; RV32-NEXT: add a5, sp, a5 |
| ; RV32-NEXT: addi a5, a5, 16 |
| ; RV32-NEXT: vs8r.v v16, (a5) # vscale x 64-byte Folded Spill |
| ; RV32-NEXT: sltu a1, a1, a4 |
| ; RV32-NEXT: addi a1, a1, -1 |
| ; RV32-NEXT: and a1, a1, a4 |
| ; RV32-NEXT: csrr a4, vlenb |
| ; RV32-NEXT: slli a4, a4, 5 |
| ; RV32-NEXT: add a4, sp, a4 |
| ; RV32-NEXT: addi a4, a4, 16 |
| ; RV32-NEXT: vl8r.v v16, (a4) # vscale x 64-byte Folded Reload |
| ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma |
| ; RV32-NEXT: vnsrl.wi v8, v16, 0, v0.t |
| ; RV32-NEXT: bltu a7, a2, .LBB16_14 |
| ; RV32-NEXT: # %bb.13: |
| ; RV32-NEXT: li a7, 16 |
| ; RV32-NEXT: .LBB16_14: |
| ; RV32-NEXT: vmv1r.v v0, v7 |
| ; RV32-NEXT: csrr a1, vlenb |
| ; RV32-NEXT: li a2, 40 |
| ; RV32-NEXT: mul a1, a1, a2 |
| ; RV32-NEXT: add a1, sp, a1 |
| ; RV32-NEXT: addi a1, a1, 16 |
| ; RV32-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload |
| ; RV32-NEXT: vsetvli zero, a7, e32, m4, ta, ma |
| ; RV32-NEXT: vnsrl.wi v24, v16, 0, v0.t |
| ; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma |
| ; RV32-NEXT: vslideup.vi v24, v8, 16 |
| ; RV32-NEXT: vse32.v v24, (a0) |
| ; RV32-NEXT: addi a1, a0, 256 |
| ; RV32-NEXT: csrr a2, vlenb |
| ; RV32-NEXT: li a3, 48 |
| ; RV32-NEXT: mul a2, a2, a3 |
| ; RV32-NEXT: add a2, sp, a2 |
| ; RV32-NEXT: addi a2, a2, 16 |
| ; RV32-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload |
| ; RV32-NEXT: vse32.v v8, (a1) |
| ; RV32-NEXT: addi a1, a0, 128 |
| ; RV32-NEXT: csrr a2, vlenb |
| ; RV32-NEXT: li a3, 56 |
| ; RV32-NEXT: mul a2, a2, a3 |
| ; RV32-NEXT: add a2, sp, a2 |
| ; RV32-NEXT: addi a2, a2, 16 |
| ; RV32-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload |
| ; RV32-NEXT: vse32.v v8, (a1) |
| ; RV32-NEXT: addi a0, a0, 384 |
| ; RV32-NEXT: csrr a1, vlenb |
| ; RV32-NEXT: slli a1, a1, 6 |
| ; RV32-NEXT: add a1, sp, a1 |
| ; RV32-NEXT: addi a1, a1, 16 |
| ; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload |
| ; RV32-NEXT: vse32.v v8, (a0) |
| ; RV32-NEXT: csrr a0, vlenb |
| ; RV32-NEXT: li a1, 72 |
| ; RV32-NEXT: mul a0, a0, a1 |
| ; RV32-NEXT: add sp, sp, a0 |
| ; RV32-NEXT: .cfi_def_cfa sp, 32 |
| ; RV32-NEXT: lw s0, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: .cfi_restore s0 |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 0 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vtrunc_v128i32_v128i64: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: addi sp, sp, -48 |
| ; RV64-NEXT: .cfi_def_cfa_offset 48 |
| ; RV64-NEXT: sd s0, 40(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset s0, -8 |
| ; RV64-NEXT: csrr a2, vlenb |
| ; RV64-NEXT: li a3, 72 |
| ; RV64-NEXT: mul a2, a2, a3 |
| ; RV64-NEXT: sub sp, sp, a2 |
| ; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0xc8, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 72 * vlenb |
| ; RV64-NEXT: vsetivli zero, 8, e8, m1, ta, ma |
| ; RV64-NEXT: vmv1r.v v7, v0 |
| ; RV64-NEXT: csrr a2, vlenb |
| ; RV64-NEXT: slli a2, a2, 5 |
| ; RV64-NEXT: add a2, sp, a2 |
| ; RV64-NEXT: addi a2, a2, 32 |
| ; RV64-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill |
| ; RV64-NEXT: csrr a2, vlenb |
| ; RV64-NEXT: li a3, 40 |
| ; RV64-NEXT: mul a2, a2, a3 |
| ; RV64-NEXT: add a2, sp, a2 |
| ; RV64-NEXT: addi a2, a2, 32 |
| ; RV64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill |
| ; RV64-NEXT: vslidedown.vi v5, v0, 8 |
| ; RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, ma |
| ; RV64-NEXT: vslidedown.vi v4, v0, 4 |
| ; RV64-NEXT: addi a2, a7, -64 |
| ; RV64-NEXT: vslidedown.vi v3, v5, 4 |
| ; RV64-NEXT: sltu a3, a7, a2 |
| ; RV64-NEXT: addi a3, a3, -1 |
| ; RV64-NEXT: and a4, a3, a2 |
| ; RV64-NEXT: addi a2, a4, -32 |
| ; RV64-NEXT: sltu a3, a4, a2 |
| ; RV64-NEXT: addi a3, a3, -1 |
| ; RV64-NEXT: and a3, a3, a2 |
| ; RV64-NEXT: li a2, 16 |
| ; RV64-NEXT: addi t0, a3, -16 |
| ; RV64-NEXT: mv a5, a3 |
| ; RV64-NEXT: bltu a3, a2, .LBB16_2 |
| ; RV64-NEXT: # %bb.1: |
| ; RV64-NEXT: li a5, 16 |
| ; RV64-NEXT: .LBB16_2: |
| ; RV64-NEXT: li t2, 64 |
| ; RV64-NEXT: sltu t1, a3, t0 |
| ; RV64-NEXT: mv a6, a7 |
| ; RV64-NEXT: bltu a7, t2, .LBB16_4 |
| ; RV64-NEXT: # %bb.3: |
| ; RV64-NEXT: li a6, 64 |
| ; RV64-NEXT: .LBB16_4: |
| ; RV64-NEXT: addi t2, a1, 128 |
| ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma |
| ; RV64-NEXT: vslidedown.vi v6, v4, 2 |
| ; RV64-NEXT: addi t6, a1, 512 |
| ; RV64-NEXT: addi t5, a1, 640 |
| ; RV64-NEXT: vslidedown.vi v0, v3, 2 |
| ; RV64-NEXT: addi t1, t1, -1 |
| ; RV64-NEXT: addi t3, a1, 384 |
| ; RV64-NEXT: vslidedown.vi v2, v5, 2 |
| ; RV64-NEXT: li a3, 32 |
| ; RV64-NEXT: addi t4, a6, -32 |
| ; RV64-NEXT: sltu a6, a6, t4 |
| ; RV64-NEXT: addi a6, a6, -1 |
| ; RV64-NEXT: and a6, a6, t4 |
| ; RV64-NEXT: addi t4, a6, -16 |
| ; RV64-NEXT: sltu s0, a6, t4 |
| ; RV64-NEXT: addi s0, s0, -1 |
| ; RV64-NEXT: bltu a6, a2, .LBB16_6 |
| ; RV64-NEXT: # %bb.5: |
| ; RV64-NEXT: li a6, 16 |
| ; RV64-NEXT: .LBB16_6: |
| ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma |
| ; RV64-NEXT: vle64.v v8, (t6) |
| ; RV64-NEXT: csrr t6, vlenb |
| ; RV64-NEXT: sd a0, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: li a0, 56 |
| ; RV64-NEXT: mul t6, t6, a0 |
| ; RV64-NEXT: ld a0, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: add t6, sp, t6 |
| ; RV64-NEXT: addi t6, t6, 32 |
| ; RV64-NEXT: vs8r.v v8, (t6) # vscale x 64-byte Folded Spill |
| ; RV64-NEXT: vle64.v v8, (t5) |
| ; RV64-NEXT: vle64.v v16, (t2) |
| ; RV64-NEXT: vle64.v v24, (a1) |
| ; RV64-NEXT: csrr t2, vlenb |
| ; RV64-NEXT: li t5, 48 |
| ; RV64-NEXT: mul t2, t2, t5 |
| ; RV64-NEXT: add t2, sp, t2 |
| ; RV64-NEXT: addi t2, t2, 32 |
| ; RV64-NEXT: vs8r.v v24, (t2) # vscale x 64-byte Folded Spill |
| ; RV64-NEXT: vle64.v v24, (t3) |
| ; RV64-NEXT: csrr t2, vlenb |
| ; RV64-NEXT: slli t2, t2, 3 |
| ; RV64-NEXT: add t2, sp, t2 |
| ; RV64-NEXT: addi t2, t2, 32 |
| ; RV64-NEXT: vs8r.v v24, (t2) # vscale x 64-byte Folded Spill |
| ; RV64-NEXT: and t2, t1, t0 |
| ; RV64-NEXT: and t1, s0, t4 |
| ; RV64-NEXT: addi a1, a1, 256 |
| ; RV64-NEXT: mv t0, a4 |
| ; RV64-NEXT: bltu a4, a3, .LBB16_8 |
| ; RV64-NEXT: # %bb.7: |
| ; RV64-NEXT: li t0, 32 |
| ; RV64-NEXT: .LBB16_8: |
| ; RV64-NEXT: vsetvli zero, t2, e32, m4, ta, ma |
| ; RV64-NEXT: vnsrl.wi v24, v8, 0, v0.t |
| ; RV64-NEXT: csrr t2, vlenb |
| ; RV64-NEXT: li t3, 24 |
| ; RV64-NEXT: mul t2, t2, t3 |
| ; RV64-NEXT: add t2, sp, t2 |
| ; RV64-NEXT: addi t2, t2, 32 |
| ; RV64-NEXT: vs8r.v v24, (t2) # vscale x 64-byte Folded Spill |
| ; RV64-NEXT: vmv1r.v v0, v3 |
| ; RV64-NEXT: csrr t2, vlenb |
| ; RV64-NEXT: li t3, 56 |
| ; RV64-NEXT: mul t2, t2, t3 |
| ; RV64-NEXT: add t2, sp, t2 |
| ; RV64-NEXT: addi t2, t2, 32 |
| ; RV64-NEXT: vl8r.v v24, (t2) # vscale x 64-byte Folded Reload |
| ; RV64-NEXT: vsetvli zero, a5, e32, m4, ta, ma |
| ; RV64-NEXT: vnsrl.wi v8, v24, 0, v0.t |
| ; RV64-NEXT: csrr a5, vlenb |
| ; RV64-NEXT: slli a5, a5, 6 |
| ; RV64-NEXT: add a5, sp, a5 |
| ; RV64-NEXT: addi a5, a5, 32 |
| ; RV64-NEXT: vs8r.v v8, (a5) # vscale x 64-byte Folded Spill |
| ; RV64-NEXT: vmv1r.v v0, v6 |
| ; RV64-NEXT: vsetvli zero, t1, e32, m4, ta, ma |
| ; RV64-NEXT: vnsrl.wi v8, v16, 0, v0.t |
| ; RV64-NEXT: csrr a5, vlenb |
| ; RV64-NEXT: slli a5, a5, 4 |
| ; RV64-NEXT: add a5, sp, a5 |
| ; RV64-NEXT: addi a5, a5, 32 |
| ; RV64-NEXT: vs8r.v v8, (a5) # vscale x 64-byte Folded Spill |
| ; RV64-NEXT: addi a5, t0, -16 |
| ; RV64-NEXT: sltu t0, t0, a5 |
| ; RV64-NEXT: addi t0, t0, -1 |
| ; RV64-NEXT: and a5, t0, a5 |
| ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma |
| ; RV64-NEXT: vle64.v v8, (a1) |
| ; RV64-NEXT: addi a1, sp, 32 |
| ; RV64-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill |
| ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma |
| ; RV64-NEXT: vslidedown.vi v30, v7, 2 |
| ; RV64-NEXT: vmv1r.v v0, v4 |
| ; RV64-NEXT: csrr a1, vlenb |
| ; RV64-NEXT: li t0, 48 |
| ; RV64-NEXT: mul a1, a1, t0 |
| ; RV64-NEXT: add a1, sp, a1 |
| ; RV64-NEXT: addi a1, a1, 32 |
| ; RV64-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload |
| ; RV64-NEXT: vsetvli zero, a6, e32, m4, ta, ma |
| ; RV64-NEXT: vnsrl.wi v8, v16, 0, v0.t |
| ; RV64-NEXT: csrr a1, vlenb |
| ; RV64-NEXT: li a6, 56 |
| ; RV64-NEXT: mul a1, a1, a6 |
| ; RV64-NEXT: add a1, sp, a1 |
| ; RV64-NEXT: addi a1, a1, 32 |
| ; RV64-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill |
| ; RV64-NEXT: vmv1r.v v0, v2 |
| ; RV64-NEXT: csrr a1, vlenb |
| ; RV64-NEXT: slli a1, a1, 3 |
| ; RV64-NEXT: add a1, sp, a1 |
| ; RV64-NEXT: addi a1, a1, 32 |
| ; RV64-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload |
| ; RV64-NEXT: vsetvli zero, a5, e32, m4, ta, ma |
| ; RV64-NEXT: vnsrl.wi v16, v8, 0, v0.t |
| ; RV64-NEXT: bltu a4, a2, .LBB16_10 |
| ; RV64-NEXT: # %bb.9: |
| ; RV64-NEXT: li a4, 16 |
| ; RV64-NEXT: .LBB16_10: |
| ; RV64-NEXT: vmv1r.v v0, v5 |
| ; RV64-NEXT: addi a1, sp, 32 |
| ; RV64-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload |
| ; RV64-NEXT: vsetvli zero, a4, e32, m4, ta, ma |
| ; RV64-NEXT: vnsrl.wi v24, v8, 0, v0.t |
| ; RV64-NEXT: csrr a1, vlenb |
| ; RV64-NEXT: li a4, 48 |
| ; RV64-NEXT: mul a1, a1, a4 |
| ; RV64-NEXT: add a1, sp, a1 |
| ; RV64-NEXT: addi a1, a1, 32 |
| ; RV64-NEXT: vs8r.v v24, (a1) # vscale x 64-byte Folded Spill |
| ; RV64-NEXT: mv a1, a7 |
| ; RV64-NEXT: bltu a7, a3, .LBB16_12 |
| ; RV64-NEXT: # %bb.11: |
| ; RV64-NEXT: li a1, 32 |
| ; RV64-NEXT: .LBB16_12: |
| ; RV64-NEXT: vmv1r.v v0, v30 |
| ; RV64-NEXT: csrr a4, vlenb |
| ; RV64-NEXT: li a5, 24 |
| ; RV64-NEXT: mul a4, a4, a5 |
| ; RV64-NEXT: add a4, sp, a4 |
| ; RV64-NEXT: addi a4, a4, 32 |
| ; RV64-NEXT: vl8r.v v24, (a4) # vscale x 64-byte Folded Reload |
| ; RV64-NEXT: vmv4r.v v8, v24 |
| ; RV64-NEXT: csrr a4, vlenb |
| ; RV64-NEXT: slli a4, a4, 4 |
| ; RV64-NEXT: add a4, sp, a4 |
| ; RV64-NEXT: addi a4, a4, 32 |
| ; RV64-NEXT: vl8r.v v24, (a4) # vscale x 64-byte Folded Reload |
| ; RV64-NEXT: csrr a4, vlenb |
| ; RV64-NEXT: li a5, 24 |
| ; RV64-NEXT: mul a4, a4, a5 |
| ; RV64-NEXT: add a4, sp, a4 |
| ; RV64-NEXT: addi a4, a4, 32 |
| ; RV64-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill |
| ; RV64-NEXT: csrr a4, vlenb |
| ; RV64-NEXT: slli a4, a4, 6 |
| ; RV64-NEXT: add a4, sp, a4 |
| ; RV64-NEXT: addi a4, a4, 32 |
| ; RV64-NEXT: vl8r.v v16, (a4) # vscale x 64-byte Folded Reload |
| ; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma |
| ; RV64-NEXT: vslideup.vi v16, v8, 16 |
| ; RV64-NEXT: csrr a4, vlenb |
| ; RV64-NEXT: slli a4, a4, 6 |
| ; RV64-NEXT: add a4, sp, a4 |
| ; RV64-NEXT: addi a4, a4, 32 |
| ; RV64-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill |
| ; RV64-NEXT: addi a4, a1, -16 |
| ; RV64-NEXT: csrr a5, vlenb |
| ; RV64-NEXT: li a6, 56 |
| ; RV64-NEXT: mul a5, a5, a6 |
| ; RV64-NEXT: add a5, sp, a5 |
| ; RV64-NEXT: addi a5, a5, 32 |
| ; RV64-NEXT: vl8r.v v16, (a5) # vscale x 64-byte Folded Reload |
| ; RV64-NEXT: vslideup.vi v16, v24, 16 |
| ; RV64-NEXT: csrr a5, vlenb |
| ; RV64-NEXT: li a6, 56 |
| ; RV64-NEXT: mul a5, a5, a6 |
| ; RV64-NEXT: add a5, sp, a5 |
| ; RV64-NEXT: addi a5, a5, 32 |
| ; RV64-NEXT: vs8r.v v16, (a5) # vscale x 64-byte Folded Spill |
| ; RV64-NEXT: csrr a5, vlenb |
| ; RV64-NEXT: li a6, 48 |
| ; RV64-NEXT: mul a5, a5, a6 |
| ; RV64-NEXT: add a5, sp, a5 |
| ; RV64-NEXT: addi a5, a5, 32 |
| ; RV64-NEXT: vl8r.v v16, (a5) # vscale x 64-byte Folded Reload |
| ; RV64-NEXT: csrr a5, vlenb |
| ; RV64-NEXT: li a6, 24 |
| ; RV64-NEXT: mul a5, a5, a6 |
| ; RV64-NEXT: add a5, sp, a5 |
| ; RV64-NEXT: addi a5, a5, 32 |
| ; RV64-NEXT: vl8r.v v8, (a5) # vscale x 64-byte Folded Reload |
| ; RV64-NEXT: vslideup.vi v16, v8, 16 |
| ; RV64-NEXT: csrr a5, vlenb |
| ; RV64-NEXT: li a6, 48 |
| ; RV64-NEXT: mul a5, a5, a6 |
| ; RV64-NEXT: add a5, sp, a5 |
| ; RV64-NEXT: addi a5, a5, 32 |
| ; RV64-NEXT: vs8r.v v16, (a5) # vscale x 64-byte Folded Spill |
| ; RV64-NEXT: sltu a1, a1, a4 |
| ; RV64-NEXT: addi a1, a1, -1 |
| ; RV64-NEXT: and a1, a1, a4 |
| ; RV64-NEXT: csrr a4, vlenb |
| ; RV64-NEXT: slli a4, a4, 5 |
| ; RV64-NEXT: add a4, sp, a4 |
| ; RV64-NEXT: addi a4, a4, 32 |
| ; RV64-NEXT: vl8r.v v16, (a4) # vscale x 64-byte Folded Reload |
| ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma |
| ; RV64-NEXT: vnsrl.wi v8, v16, 0, v0.t |
| ; RV64-NEXT: bltu a7, a2, .LBB16_14 |
| ; RV64-NEXT: # %bb.13: |
| ; RV64-NEXT: li a7, 16 |
| ; RV64-NEXT: .LBB16_14: |
| ; RV64-NEXT: vmv1r.v v0, v7 |
| ; RV64-NEXT: csrr a1, vlenb |
| ; RV64-NEXT: li a2, 40 |
| ; RV64-NEXT: mul a1, a1, a2 |
| ; RV64-NEXT: add a1, sp, a1 |
| ; RV64-NEXT: addi a1, a1, 32 |
| ; RV64-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload |
| ; RV64-NEXT: vsetvli zero, a7, e32, m4, ta, ma |
| ; RV64-NEXT: vnsrl.wi v24, v16, 0, v0.t |
| ; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma |
| ; RV64-NEXT: vslideup.vi v24, v8, 16 |
| ; RV64-NEXT: vse32.v v24, (a0) |
| ; RV64-NEXT: addi a1, a0, 256 |
| ; RV64-NEXT: csrr a2, vlenb |
| ; RV64-NEXT: li a3, 48 |
| ; RV64-NEXT: mul a2, a2, a3 |
| ; RV64-NEXT: add a2, sp, a2 |
| ; RV64-NEXT: addi a2, a2, 32 |
| ; RV64-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload |
| ; RV64-NEXT: vse32.v v8, (a1) |
| ; RV64-NEXT: addi a1, a0, 128 |
| ; RV64-NEXT: csrr a2, vlenb |
| ; RV64-NEXT: li a3, 56 |
| ; RV64-NEXT: mul a2, a2, a3 |
| ; RV64-NEXT: add a2, sp, a2 |
| ; RV64-NEXT: addi a2, a2, 32 |
| ; RV64-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload |
| ; RV64-NEXT: vse32.v v8, (a1) |
| ; RV64-NEXT: addi a0, a0, 384 |
| ; RV64-NEXT: csrr a1, vlenb |
| ; RV64-NEXT: slli a1, a1, 6 |
| ; RV64-NEXT: add a1, sp, a1 |
| ; RV64-NEXT: addi a1, a1, 32 |
| ; RV64-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload |
| ; RV64-NEXT: vse32.v v8, (a0) |
| ; RV64-NEXT: csrr a0, vlenb |
| ; RV64-NEXT: li a1, 72 |
| ; RV64-NEXT: mul a0, a0, a1 |
| ; RV64-NEXT: add sp, sp, a0 |
| ; RV64-NEXT: .cfi_def_cfa sp, 48 |
| ; RV64-NEXT: ld s0, 40(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: .cfi_restore s0 |
| ; RV64-NEXT: addi sp, sp, 48 |
| ; RV64-NEXT: .cfi_def_cfa_offset 0 |
| ; RV64-NEXT: ret |
| %v = call <128 x i32> @llvm.vp.trunc.v128i32.v128i64(<128 x i64> %a, <128 x i1> %m, i32 %vl) |
| ret <128 x i32> %v |
| } |
| |
| declare <32 x i32> @llvm.vp.trunc.v32i32.v32i64(<32 x i64>, <32 x i1>, i32) |
| |
| define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v32i32_v32i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma |
| ; CHECK-NEXT: vmv8r.v v24, v8 |
| ; CHECK-NEXT: li a2, 16 |
| ; CHECK-NEXT: vslidedown.vi v12, v0, 2 |
| ; CHECK-NEXT: mv a1, a0 |
| ; CHECK-NEXT: bltu a0, a2, .LBB17_2 |
| ; CHECK-NEXT: # %bb.1: |
| ; CHECK-NEXT: li a1, 16 |
| ; CHECK-NEXT: .LBB17_2: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v8, v24, 0, v0.t |
| ; CHECK-NEXT: addi a1, a0, -16 |
| ; CHECK-NEXT: sltu a0, a0, a1 |
| ; CHECK-NEXT: addi a0, a0, -1 |
| ; CHECK-NEXT: and a0, a0, a1 |
| ; CHECK-NEXT: vmv1r.v v0, v12 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t |
| ; CHECK-NEXT: li a0, 32 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma |
| ; CHECK-NEXT: vslideup.vi v8, v24, 16 |
| ; CHECK-NEXT: ret |
| %v = call <32 x i32> @llvm.vp.trunc.v32i32.v32i64(<32 x i64> %a, <32 x i1> %m, i32 %vl) |
| ret <32 x i32> %v |
| } |
| |
| declare <2 x i7> @llvm.vp.trunc.v2i7.v2i8(<2 x i8>, <2 x i1>, i32) |
| |
| define <2 x i7> @vtrunc_v2i7_v2i8(<2 x i8> %a, <2 x i1> %m, i32 zeroext %vl) { |
| ; CHECK-LABEL: vtrunc_v2i7_v2i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: ret |
| %v = call <2 x i7> @llvm.vp.trunc.v2i7.v2i8(<2 x i8> %a, <2 x i1> %m, i32 %vl) |
| ret <2 x i7> %v |
| } |