| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc < %s -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s |
| |
| define void @vid(ptr %p) { |
| ; CHECK-LABEL: vid: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma |
| ; CHECK-NEXT: vid.v v8 |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: vl8re64.v v16, (a0) |
| ; CHECK-NEXT: vl8re64.v v24, (a0) |
| ; CHECK-NEXT: vl8re64.v v0, (a0) |
| ; CHECK-NEXT: vl8re64.v v8, (a0) |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: vs8r.v v0, (a0) |
| ; CHECK-NEXT: vs8r.v v24, (a0) |
| ; CHECK-NEXT: vs8r.v v16, (a0) |
| ; CHECK-NEXT: vid.v v8 |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %vid = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(<vscale x 8 x i64> poison, i64 -1) |
| store volatile <vscale x 8 x i64> %vid, ptr %p |
| |
| %a = load volatile <vscale x 8 x i64>, ptr %p |
| %b = load volatile <vscale x 8 x i64>, ptr %p |
| %c = load volatile <vscale x 8 x i64>, ptr %p |
| %d = load volatile <vscale x 8 x i64>, ptr %p |
| store volatile <vscale x 8 x i64> %d, ptr %p |
| store volatile <vscale x 8 x i64> %c, ptr %p |
| store volatile <vscale x 8 x i64> %b, ptr %p |
| store volatile <vscale x 8 x i64> %a, ptr %p |
| |
| store volatile <vscale x 8 x i64> %vid, ptr %p |
| ret void |
| } |
| |
| |
| define void @vid_passthru(ptr %p, <vscale x 8 x i64> %v) { |
| ; CHECK-LABEL: vid_passthru: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi sp, sp, -16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: csrr a1, vlenb |
| ; CHECK-NEXT: slli a1, a1, 3 |
| ; CHECK-NEXT: sub sp, sp, a1 |
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb |
| ; CHECK-NEXT: vsetivli zero, 1, e64, m8, tu, ma |
| ; CHECK-NEXT: vid.v v8 |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: vl8re64.v v16, (a0) |
| ; CHECK-NEXT: addi a1, sp, 16 |
| ; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill |
| ; CHECK-NEXT: vl8re64.v v24, (a0) |
| ; CHECK-NEXT: vl8re64.v v0, (a0) |
| ; CHECK-NEXT: vl8re64.v v16, (a0) |
| ; CHECK-NEXT: vs8r.v v16, (a0) |
| ; CHECK-NEXT: vs8r.v v0, (a0) |
| ; CHECK-NEXT: vs8r.v v24, (a0) |
| ; CHECK-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload |
| ; CHECK-NEXT: vs8r.v v16, (a0) |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 3 |
| ; CHECK-NEXT: add sp, sp, a0 |
| ; CHECK-NEXT: .cfi_def_cfa sp, 16 |
| ; CHECK-NEXT: addi sp, sp, 16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 0 |
| ; CHECK-NEXT: ret |
| %vid = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(<vscale x 8 x i64> %v, i64 1) |
| store volatile <vscale x 8 x i64> %vid, ptr %p |
| |
| %a = load volatile <vscale x 8 x i64>, ptr %p |
| %b = load volatile <vscale x 8 x i64>, ptr %p |
| %c = load volatile <vscale x 8 x i64>, ptr %p |
| %d = load volatile <vscale x 8 x i64>, ptr %p |
| store volatile <vscale x 8 x i64> %d, ptr %p |
| store volatile <vscale x 8 x i64> %c, ptr %p |
| store volatile <vscale x 8 x i64> %b, ptr %p |
| store volatile <vscale x 8 x i64> %a, ptr %p |
| |
| store volatile <vscale x 8 x i64> %vid, ptr %p |
| ret void |
| } |
| |
| define void @vmv.v.i(ptr %p) { |
| ; CHECK-LABEL: vmv.v.i: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma |
| ; CHECK-NEXT: vmv.v.i v8, 1 |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: vl8re64.v v16, (a0) |
| ; CHECK-NEXT: vl8re64.v v24, (a0) |
| ; CHECK-NEXT: vl8re64.v v0, (a0) |
| ; CHECK-NEXT: vl8re64.v v8, (a0) |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: vs8r.v v0, (a0) |
| ; CHECK-NEXT: vs8r.v v24, (a0) |
| ; CHECK-NEXT: vs8r.v v16, (a0) |
| ; CHECK-NEXT: vmv.v.i v8, 1 |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %vmv.v.i = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(<vscale x 8 x i64> poison, i64 1, i64 -1) |
| store volatile <vscale x 8 x i64> %vmv.v.i, ptr %p |
| |
| %a = load volatile <vscale x 8 x i64>, ptr %p |
| %b = load volatile <vscale x 8 x i64>, ptr %p |
| %c = load volatile <vscale x 8 x i64>, ptr %p |
| %d = load volatile <vscale x 8 x i64>, ptr %p |
| store volatile <vscale x 8 x i64> %d, ptr %p |
| store volatile <vscale x 8 x i64> %c, ptr %p |
| store volatile <vscale x 8 x i64> %b, ptr %p |
| store volatile <vscale x 8 x i64> %a, ptr %p |
| |
| store volatile <vscale x 8 x i64> %vmv.v.i, ptr %p |
| ret void |
| } |
| |
| define void @vmv.v.x_needs_extended(ptr %p, i64 %x) { |
| ; CHECK-LABEL: vmv.v.x_needs_extended: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi sp, sp, -16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: csrr a2, vlenb |
| ; CHECK-NEXT: slli a2, a2, 3 |
| ; CHECK-NEXT: sub sp, sp, a2 |
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb |
| ; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma |
| ; CHECK-NEXT: vmv.v.x v8, a1 |
| ; CHECK-NEXT: addi a1, sp, 16 |
| ; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: vl8re64.v v16, (a0) |
| ; CHECK-NEXT: vl8re64.v v24, (a0) |
| ; CHECK-NEXT: vl8re64.v v0, (a0) |
| ; CHECK-NEXT: vl8re64.v v8, (a0) |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: vs8r.v v0, (a0) |
| ; CHECK-NEXT: vs8r.v v24, (a0) |
| ; CHECK-NEXT: vs8r.v v16, (a0) |
| ; CHECK-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 3 |
| ; CHECK-NEXT: add sp, sp, a0 |
| ; CHECK-NEXT: .cfi_def_cfa sp, 16 |
| ; CHECK-NEXT: addi sp, sp, 16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 0 |
| ; CHECK-NEXT: ret |
| %vmv.v.x = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(<vscale x 8 x i64> poison, i64 %x, i64 -1) |
| store volatile <vscale x 8 x i64> %vmv.v.x, ptr %p |
| |
| %a = load volatile <vscale x 8 x i64>, ptr %p |
| %b = load volatile <vscale x 8 x i64>, ptr %p |
| %c = load volatile <vscale x 8 x i64>, ptr %p |
| %d = load volatile <vscale x 8 x i64>, ptr %p |
| store volatile <vscale x 8 x i64> %d, ptr %p |
| store volatile <vscale x 8 x i64> %c, ptr %p |
| store volatile <vscale x 8 x i64> %b, ptr %p |
| store volatile <vscale x 8 x i64> %a, ptr %p |
| |
| store volatile <vscale x 8 x i64> %vmv.v.x, ptr %p |
| ret void |
| } |
| |
| define void @vmv.v.x_live(ptr %p, i64 %x) { |
| ; CHECK-LABEL: vmv.v.x_live: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma |
| ; CHECK-NEXT: vmv.v.x v8, a1 |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: vl8re64.v v16, (a0) |
| ; CHECK-NEXT: vl8re64.v v24, (a0) |
| ; CHECK-NEXT: vl8re64.v v0, (a0) |
| ; CHECK-NEXT: vl8re64.v v8, (a0) |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: vs8r.v v0, (a0) |
| ; CHECK-NEXT: vs8r.v v24, (a0) |
| ; CHECK-NEXT: vs8r.v v16, (a0) |
| ; CHECK-NEXT: vmv.v.x v8, a1 |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: sd a1, 0(a0) |
| ; CHECK-NEXT: ret |
| %vmv.v.x = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(<vscale x 8 x i64> poison, i64 %x, i64 -1) |
| store volatile <vscale x 8 x i64> %vmv.v.x, ptr %p |
| |
| %a = load volatile <vscale x 8 x i64>, ptr %p |
| %b = load volatile <vscale x 8 x i64>, ptr %p |
| %c = load volatile <vscale x 8 x i64>, ptr %p |
| %d = load volatile <vscale x 8 x i64>, ptr %p |
| store volatile <vscale x 8 x i64> %d, ptr %p |
| store volatile <vscale x 8 x i64> %c, ptr %p |
| store volatile <vscale x 8 x i64> %b, ptr %p |
| store volatile <vscale x 8 x i64> %a, ptr %p |
| |
| store volatile <vscale x 8 x i64> %vmv.v.x, ptr %p |
| store volatile i64 %x, ptr %p |
| ret void |
| } |
| |
| define void @vfmv.v.f(ptr %p, double %x) { |
| ; CHECK-LABEL: vfmv.v.f: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma |
| ; CHECK-NEXT: vfmv.v.f v8, fa0 |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: vl8re64.v v16, (a0) |
| ; CHECK-NEXT: vl8re64.v v24, (a0) |
| ; CHECK-NEXT: vl8re64.v v0, (a0) |
| ; CHECK-NEXT: vl8re64.v v8, (a0) |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: vs8r.v v0, (a0) |
| ; CHECK-NEXT: vs8r.v v24, (a0) |
| ; CHECK-NEXT: vs8r.v v16, (a0) |
| ; CHECK-NEXT: vfmv.v.f v8, fa0 |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: fsd fa0, 0(a0) |
| ; CHECK-NEXT: ret |
| %vfmv.v.f = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(<vscale x 8 x double> poison, double %x, i64 -1) |
| store volatile <vscale x 8 x double> %vfmv.v.f, ptr %p |
| |
| %a = load volatile <vscale x 8 x double>, ptr %p |
| %b = load volatile <vscale x 8 x double>, ptr %p |
| %c = load volatile <vscale x 8 x double>, ptr %p |
| %d = load volatile <vscale x 8 x double>, ptr %p |
| store volatile <vscale x 8 x double> %d, ptr %p |
| store volatile <vscale x 8 x double> %c, ptr %p |
| store volatile <vscale x 8 x double> %b, ptr %p |
| store volatile <vscale x 8 x double> %a, ptr %p |
| |
| store volatile <vscale x 8 x double> %vfmv.v.f, ptr %p |
| store volatile double %x, ptr %p |
| ret void |
| } |
| |
| define void @vmv.s.x(ptr %p, i64 %x) { |
| ; CHECK-LABEL: vmv.s.x: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v8, a1 |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: vl8re64.v v16, (a0) |
| ; CHECK-NEXT: vl8re64.v v24, (a0) |
| ; CHECK-NEXT: vl8re64.v v0, (a0) |
| ; CHECK-NEXT: vl8re64.v v8, (a0) |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: vs8r.v v0, (a0) |
| ; CHECK-NEXT: vs8r.v v24, (a0) |
| ; CHECK-NEXT: vs8r.v v16, (a0) |
| ; CHECK-NEXT: vmv.s.x v8, a1 |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: sd a1, 0(a0) |
| ; CHECK-NEXT: ret |
| %vmv.s.x = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64(<vscale x 8 x i64> poison, i64 %x, i64 -1) |
| store volatile <vscale x 8 x i64> %vmv.s.x, ptr %p |
| |
| %a = load volatile <vscale x 8 x i64>, ptr %p |
| %b = load volatile <vscale x 8 x i64>, ptr %p |
| %c = load volatile <vscale x 8 x i64>, ptr %p |
| %d = load volatile <vscale x 8 x i64>, ptr %p |
| store volatile <vscale x 8 x i64> %d, ptr %p |
| store volatile <vscale x 8 x i64> %c, ptr %p |
| store volatile <vscale x 8 x i64> %b, ptr %p |
| store volatile <vscale x 8 x i64> %a, ptr %p |
| |
| store volatile <vscale x 8 x i64> %vmv.s.x, ptr %p |
| store volatile i64 %x, ptr %p |
| ret void |
| } |
| |
| define void @vfmv.s.f(ptr %p, double %x) { |
| ; CHECK-LABEL: vfmv.s.f: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma |
| ; CHECK-NEXT: vfmv.s.f v8, fa0 |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: vl8re64.v v16, (a0) |
| ; CHECK-NEXT: vl8re64.v v24, (a0) |
| ; CHECK-NEXT: vl8re64.v v0, (a0) |
| ; CHECK-NEXT: vl8re64.v v8, (a0) |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: vs8r.v v0, (a0) |
| ; CHECK-NEXT: vs8r.v v24, (a0) |
| ; CHECK-NEXT: vs8r.v v16, (a0) |
| ; CHECK-NEXT: vfmv.s.f v8, fa0 |
| ; CHECK-NEXT: vs8r.v v8, (a0) |
| ; CHECK-NEXT: fsd fa0, 0(a0) |
| ; CHECK-NEXT: ret |
| %vfmv.s.f = call <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double> poison, double %x, i64 -1) |
| store volatile <vscale x 8 x double> %vfmv.s.f, ptr %p |
| |
| %a = load volatile <vscale x 8 x double>, ptr %p |
| %b = load volatile <vscale x 8 x double>, ptr %p |
| %c = load volatile <vscale x 8 x double>, ptr %p |
| %d = load volatile <vscale x 8 x double>, ptr %p |
| store volatile <vscale x 8 x double> %d, ptr %p |
| store volatile <vscale x 8 x double> %c, ptr %p |
| store volatile <vscale x 8 x double> %b, ptr %p |
| store volatile <vscale x 8 x double> %a, ptr %p |
| |
| store volatile <vscale x 8 x double> %vfmv.s.f, ptr %p |
| store volatile double %x, ptr %p |
| ret void |
| } |