| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V |
| ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V |
| ; RUN: llc -mtriple=riscv32 -mattr=+zve32x,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVE32 |
| ; RUN: llc -mtriple=riscv64 -mattr=+zve32x,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVE32 |
| |
| define <vscale x 1 x i8> @masked_load_nxv1i8(ptr %a, <vscale x 1 x i1> %mask) nounwind { |
| ; V-LABEL: masked_load_nxv1i8: |
| ; V: # %bb.0: |
| ; V-NEXT: vsetvli a1, zero, e8, mf8, ta, ma |
| ; V-NEXT: vle8.v v8, (a0), v0.t |
| ; V-NEXT: ret |
| ; |
| ; ZVE32-LABEL: masked_load_nxv1i8: |
| ; ZVE32: # %bb.0: |
| ; ZVE32-NEXT: csrr a1, vlenb |
| ; ZVE32-NEXT: srli a1, a1, 3 |
| ; ZVE32-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| ; ZVE32-NEXT: vle8.v v8, (a0), v0.t |
| ; ZVE32-NEXT: ret |
| %load = call <vscale x 1 x i8> @llvm.masked.load.nxv1i8(ptr %a, i32 1, <vscale x 1 x i1> %mask, <vscale x 1 x i8> undef) |
| ret <vscale x 1 x i8> %load |
| } |
| |
| define <vscale x 1 x i8> @masked_load_passthru_nxv1i8(ptr %a, <vscale x 1 x i1> %mask) nounwind { |
| ; V-LABEL: masked_load_passthru_nxv1i8: |
| ; V: # %bb.0: |
| ; V-NEXT: vsetvli a1, zero, e8, mf8, ta, mu |
| ; V-NEXT: vmv.v.i v8, 0 |
| ; V-NEXT: vle8.v v8, (a0), v0.t |
| ; V-NEXT: ret |
| ; |
| ; ZVE32-LABEL: masked_load_passthru_nxv1i8: |
| ; ZVE32: # %bb.0: |
| ; ZVE32-NEXT: csrr a1, vlenb |
| ; ZVE32-NEXT: srli a1, a1, 3 |
| ; ZVE32-NEXT: vsetvli zero, a1, e8, mf4, ta, mu |
| ; ZVE32-NEXT: vmv.v.i v8, 0 |
| ; ZVE32-NEXT: vle8.v v8, (a0), v0.t |
| ; ZVE32-NEXT: ret |
| %load = call <vscale x 1 x i8> @llvm.masked.load.nxv1i8(ptr %a, i32 1, <vscale x 1 x i1> %mask, <vscale x 1 x i8> zeroinitializer) |
| ret <vscale x 1 x i8> %load |
| } |
| |
| define <vscale x 1 x i16> @masked_load_nxv1i16(ptr %a, <vscale x 1 x i1> %mask) nounwind { |
| ; V-LABEL: masked_load_nxv1i16: |
| ; V: # %bb.0: |
| ; V-NEXT: vsetvli a1, zero, e16, mf4, ta, ma |
| ; V-NEXT: vle16.v v8, (a0), v0.t |
| ; V-NEXT: ret |
| ; |
| ; ZVE32-LABEL: masked_load_nxv1i16: |
| ; ZVE32: # %bb.0: |
| ; ZVE32-NEXT: csrr a1, vlenb |
| ; ZVE32-NEXT: srli a1, a1, 3 |
| ; ZVE32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma |
| ; ZVE32-NEXT: vle16.v v8, (a0), v0.t |
| ; ZVE32-NEXT: ret |
| %load = call <vscale x 1 x i16> @llvm.masked.load.nxv1i16(ptr %a, i32 2, <vscale x 1 x i1> %mask, <vscale x 1 x i16> undef) |
| ret <vscale x 1 x i16> %load |
| } |
| declare <vscale x 1 x i16> @llvm.masked.load.nxv1i16(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i16>) |
| |
| define <vscale x 1 x i32> @masked_load_nxv1i32(ptr %a, <vscale x 1 x i1> %mask) nounwind { |
| ; V-LABEL: masked_load_nxv1i32: |
| ; V: # %bb.0: |
| ; V-NEXT: vsetvli a1, zero, e32, mf2, ta, ma |
| ; V-NEXT: vle32.v v8, (a0), v0.t |
| ; V-NEXT: ret |
| ; |
| ; ZVE32-LABEL: masked_load_nxv1i32: |
| ; ZVE32: # %bb.0: |
| ; ZVE32-NEXT: csrr a1, vlenb |
| ; ZVE32-NEXT: srli a1, a1, 3 |
| ; ZVE32-NEXT: vsetvli zero, a1, e32, m1, ta, ma |
| ; ZVE32-NEXT: vle32.v v8, (a0), v0.t |
| ; ZVE32-NEXT: ret |
| %load = call <vscale x 1 x i32> @llvm.masked.load.nxv1i32(ptr %a, i32 4, <vscale x 1 x i1> %mask, <vscale x 1 x i32> undef) |
| ret <vscale x 1 x i32> %load |
| } |
| declare <vscale x 1 x i32> @llvm.masked.load.nxv1i32(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i32>) |
| |
| define <vscale x 2 x i8> @masked_load_nxv2i8(ptr %a, <vscale x 2 x i1> %mask) nounwind { |
| ; CHECK-LABEL: masked_load_nxv2i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma |
| ; CHECK-NEXT: vle8.v v8, (a0), v0.t |
| ; CHECK-NEXT: ret |
| %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef) |
| ret <vscale x 2 x i8> %load |
| } |
| declare <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i8>) |
| |
| define <vscale x 2 x i16> @masked_load_nxv2i16(ptr %a, <vscale x 2 x i1> %mask) nounwind { |
| ; CHECK-LABEL: masked_load_nxv2i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma |
| ; CHECK-NEXT: vle16.v v8, (a0), v0.t |
| ; CHECK-NEXT: ret |
| %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef) |
| ret <vscale x 2 x i16> %load |
| } |
| declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i16>) |
| |
| define <vscale x 2 x i32> @masked_load_nxv2i32(ptr %a, <vscale x 2 x i1> %mask) nounwind { |
| ; CHECK-LABEL: masked_load_nxv2i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma |
| ; CHECK-NEXT: vle32.v v8, (a0), v0.t |
| ; CHECK-NEXT: ret |
| %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef) |
| ret <vscale x 2 x i32> %load |
| } |
| declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i32>) |
| |
| define <vscale x 4 x i8> @masked_load_nxv4i8(ptr %a, <vscale x 4 x i1> %mask) nounwind { |
| ; CHECK-LABEL: masked_load_nxv4i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma |
| ; CHECK-NEXT: vle8.v v8, (a0), v0.t |
| ; CHECK-NEXT: ret |
| %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef) |
| ret <vscale x 4 x i8> %load |
| } |
| declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i8>) |
| |
| define <vscale x 4 x i16> @masked_load_nxv4i16(ptr %a, <vscale x 4 x i1> %mask) nounwind { |
| ; CHECK-LABEL: masked_load_nxv4i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma |
| ; CHECK-NEXT: vle16.v v8, (a0), v0.t |
| ; CHECK-NEXT: ret |
| %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef) |
| ret <vscale x 4 x i16> %load |
| } |
| declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i16>) |
| |
| define <vscale x 4 x i32> @masked_load_nxv4i32(ptr %a, <vscale x 4 x i1> %mask) nounwind { |
| ; CHECK-LABEL: masked_load_nxv4i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vle32.v v8, (a0), v0.t |
| ; CHECK-NEXT: ret |
| %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef) |
| ret <vscale x 4 x i32> %load |
| } |
| declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i32>) |
| |
| define <vscale x 8 x i8> @masked_load_nxv8i8(ptr %a, <vscale x 8 x i1> %mask) nounwind { |
| ; CHECK-LABEL: masked_load_nxv8i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma |
| ; CHECK-NEXT: vle8.v v8, (a0), v0.t |
| ; CHECK-NEXT: ret |
| %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef) |
| ret <vscale x 8 x i8> %load |
| } |
| declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i8>) |
| |
| define <vscale x 8 x i16> @masked_load_nxv8i16(ptr %a, <vscale x 8 x i1> %mask) nounwind { |
| ; CHECK-LABEL: masked_load_nxv8i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma |
| ; CHECK-NEXT: vle16.v v8, (a0), v0.t |
| ; CHECK-NEXT: ret |
| %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef) |
| ret <vscale x 8 x i16> %load |
| } |
| declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i16>) |
| |
| define <vscale x 8 x i32> @masked_load_nxv8i32(ptr %a, <vscale x 8 x i1> %mask) nounwind { |
| ; CHECK-LABEL: masked_load_nxv8i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma |
| ; CHECK-NEXT: vle32.v v8, (a0), v0.t |
| ; CHECK-NEXT: ret |
| %load = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32(ptr %a, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x i32> undef) |
| ret <vscale x 8 x i32> %load |
| } |
| declare <vscale x 8 x i32> @llvm.masked.load.nxv8i32(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i32>) |
| |
| define <vscale x 16 x i8> @masked_load_nxv16i8(ptr %a, <vscale x 16 x i1> %mask) nounwind { |
| ; CHECK-LABEL: masked_load_nxv16i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma |
| ; CHECK-NEXT: vle8.v v8, (a0), v0.t |
| ; CHECK-NEXT: ret |
| %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %a, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef) |
| ret <vscale x 16 x i8> %load |
| } |
| declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i8>) |
| |
| define <vscale x 16 x i16> @masked_load_nxv16i16(ptr %a, <vscale x 16 x i1> %mask) nounwind { |
| ; CHECK-LABEL: masked_load_nxv16i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma |
| ; CHECK-NEXT: vle16.v v8, (a0), v0.t |
| ; CHECK-NEXT: ret |
| %load = call <vscale x 16 x i16> @llvm.masked.load.nxv16i16(ptr %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i16> undef) |
| ret <vscale x 16 x i16> %load |
| } |
| declare <vscale x 16 x i16> @llvm.masked.load.nxv16i16(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i16>) |
| |
| define <vscale x 16 x i32> @masked_load_nxv16i32(ptr %a, <vscale x 16 x i1> %mask) nounwind { |
| ; CHECK-LABEL: masked_load_nxv16i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma |
| ; CHECK-NEXT: vle32.v v8, (a0), v0.t |
| ; CHECK-NEXT: ret |
| %load = call <vscale x 16 x i32> @llvm.masked.load.nxv16i32(ptr %a, i32 4, <vscale x 16 x i1> %mask, <vscale x 16 x i32> undef) |
| ret <vscale x 16 x i32> %load |
| } |
| declare <vscale x 16 x i32> @llvm.masked.load.nxv16i32(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i32>) |
| |
| define <vscale x 32 x i8> @masked_load_nxv32i8(ptr %a, <vscale x 32 x i1> %mask) nounwind { |
| ; CHECK-LABEL: masked_load_nxv32i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma |
| ; CHECK-NEXT: vle8.v v8, (a0), v0.t |
| ; CHECK-NEXT: ret |
| %load = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr %a, i32 1, <vscale x 32 x i1> %mask, <vscale x 32 x i8> undef) |
| ret <vscale x 32 x i8> %load |
| } |
| declare <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr, i32, <vscale x 32 x i1>, <vscale x 32 x i8>) |
| |
| define <vscale x 32 x i16> @masked_load_nxv32i16(ptr %a, <vscale x 32 x i1> %mask) nounwind { |
| ; CHECK-LABEL: masked_load_nxv32i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma |
| ; CHECK-NEXT: vle16.v v8, (a0), v0.t |
| ; CHECK-NEXT: ret |
| %load = call <vscale x 32 x i16> @llvm.masked.load.nxv32i16(ptr %a, i32 2, <vscale x 32 x i1> %mask, <vscale x 32 x i16> undef) |
| ret <vscale x 32 x i16> %load |
| } |
| declare <vscale x 32 x i16> @llvm.masked.load.nxv32i16(ptr, i32, <vscale x 32 x i1>, <vscale x 32 x i16>) |
| |
| define <vscale x 64 x i8> @masked_load_nxv64i8(ptr %a, <vscale x 64 x i1> %mask) nounwind { |
| ; CHECK-LABEL: masked_load_nxv64i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma |
| ; CHECK-NEXT: vle8.v v8, (a0), v0.t |
| ; CHECK-NEXT: ret |
| %load = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8(ptr %a, i32 1, <vscale x 64 x i1> %mask, <vscale x 64 x i8> undef) |
| ret <vscale x 64 x i8> %load |
| } |
| declare <vscale x 64 x i8> @llvm.masked.load.nxv64i8(ptr, i32, <vscale x 64 x i1>, <vscale x 64 x i8>) |
| |
| define <vscale x 2 x i8> @masked_load_zero_mask(ptr %a) nounwind { |
| ; CHECK-LABEL: masked_load_zero_mask: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: ret |
| %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %a, i32 1, <vscale x 2 x i1> zeroinitializer, <vscale x 2 x i8> undef) |
| ret <vscale x 2 x i8> %load |
| } |
| |
| define <vscale x 2 x i8> @masked_load_allones_mask(ptr %a, <vscale x 2 x i8> %maskedoff) nounwind { |
| ; CHECK-LABEL: masked_load_allones_mask: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma |
| ; CHECK-NEXT: vle8.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %a, i32 1, <vscale x 2 x i1> splat (i1 1), <vscale x 2 x i8> %maskedoff) |
| ret <vscale x 2 x i8> %load |
| } |