| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
 | ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zvfbfmin,+v \ | 
 | ; RUN:     -verify-machineinstrs < %s | FileCheck %s | 
 | ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zvfbfmin,+v \ | 
 | ; RUN:     -verify-machineinstrs < %s | FileCheck %s | 
 | ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ | 
 | ; RUN:     -verify-machineinstrs < %s | FileCheck %s | 
 | ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ | 
 | ; RUN:     -verify-machineinstrs < %s | FileCheck %s | 
 |  | 
 | define { <vscale x 1 x i8>, i32 } @vploadff_nxv1i8(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv1i8: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma | 
 | ; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 1 x i8>, i32 } @llvm.vp.load.ff.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 1 x i8>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 1 x i8>, i32 } @vploadff_nxv1i8_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv1i8_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma | 
 | ; CHECK-NEXT:    vle8ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 1 x i8>, i32 } @llvm.vp.load.ff.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 1 x i8>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 2 x i8>, i32 } @vploadff_nxv2i8(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv2i8: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma | 
 | ; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 2 x i8>, i32 } @llvm.vp.load.ff.nxv2i8.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 2 x i8>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 2 x i8>, i32 } @vploadff_nxv2i8_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv2i8_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma | 
 | ; CHECK-NEXT:    vle8ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 2 x i8>, i32 } @llvm.vp.load.ff.nxv2i8.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 2 x i8>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 4 x i8>, i32 } @vploadff_nxv4i8(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv4i8: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma | 
 | ; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 4 x i8>, i32 } @llvm.vp.load.ff.nxv4i8.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 4 x i8>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 4 x i8>, i32 } @vploadff_nxv4i8_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv4i8_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma | 
 | ; CHECK-NEXT:    vle8ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 4 x i8>, i32 } @llvm.vp.load.ff.nxv4i8.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 4 x i8>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 8 x i8>, i32 } @vploadff_nxv8i8(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv8i8: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma | 
 | ; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 8 x i8>, i32 } @llvm.vp.load.ff.nxv8i8.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 8 x i8>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 8 x i8>, i32 } @vploadff_nxv8i8_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv8i8_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma | 
 | ; CHECK-NEXT:    vle8ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 8 x i8>, i32 } @llvm.vp.load.ff.nxv8i8.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 8 x i8>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 16 x i8>, i32 } @vploadff_nxv16i8(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv16i8: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma | 
 | ; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 16 x i8>, i32 } @llvm.vp.load.ff.nxv16i8.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 16 x i8>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 16 x i8>, i32 } @vploadff_nxv16i8_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv16i8_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma | 
 | ; CHECK-NEXT:    vle8ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 16 x i8>, i32 } @llvm.vp.load.ff.nxv16i8.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 16 x i8>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 32 x i8>, i32 } @vploadff_nxv32i8(ptr %ptr, <vscale x 32 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv32i8: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma | 
 | ; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 32 x i8>, i32 } @llvm.vp.load.ff.nxv32i8.p0(ptr %ptr, <vscale x 32 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 32 x i8>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 32 x i8>, i32 } @vploadff_nxv32i8_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv32i8_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma | 
 | ; CHECK-NEXT:    vle8ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 32 x i8>, i32 } @llvm.vp.load.ff.nxv32i8.p0(ptr %ptr, <vscale x 32 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 32 x i8>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 64 x i8>, i32 } @vploadff_nxv64i8(ptr %ptr, <vscale x 64 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv64i8: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma | 
 | ; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 64 x i8>, i32 } @llvm.vp.load.ff.nxv64i8.p0(ptr %ptr, <vscale x 64 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 64 x i8>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 64 x i8>, i32 } @vploadff_nxv64i8_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv64i8_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma | 
 | ; CHECK-NEXT:    vle8ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 64 x i8>, i32 } @llvm.vp.load.ff.nxv64i8.p0(ptr %ptr, <vscale x 64 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 64 x i8>, i32 } %load | 
 | } | 
 |  | 
 | define <vscale x 128 x i8> @vploadff_nxv128i8(ptr %ptr, ptr %evl_out, <vscale x 128 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv128i8: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    csrr a3, vlenb | 
 | ; CHECK-NEXT:    slli a3, a3, 3 | 
 | ; CHECK-NEXT:    bltu a2, a3, .LBB14_2 | 
 | ; CHECK-NEXT:  # %bb.1: | 
 | ; CHECK-NEXT:    mv a2, a3 | 
 | ; CHECK-NEXT:  .LBB14_2: | 
 | ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma | 
 | ; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    sw a0, 0(a1) | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 128 x i8>, i32 } @llvm.vp.load.ff.nxv128i8.p0(ptr %ptr, <vscale x 128 x i1> %m, i32 %evl) | 
 |   %result0 = extractvalue { <vscale x 128 x i8>, i32 } %load, 0 | 
 |   %result1 = extractvalue { <vscale x 128 x i8>, i32 } %load, 1 | 
 |   store i32 %result1, ptr %evl_out | 
 |   ret <vscale x 128 x i8> %result0 | 
 | } | 
 |  | 
 | define <vscale x 128 x i8> @vploadff_nxv128i8_allones_mask(ptr %ptr, ptr %evl_out, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv128i8_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    csrr a3, vlenb | 
 | ; CHECK-NEXT:    slli a3, a3, 3 | 
 | ; CHECK-NEXT:    bltu a2, a3, .LBB15_2 | 
 | ; CHECK-NEXT:  # %bb.1: | 
 | ; CHECK-NEXT:    mv a2, a3 | 
 | ; CHECK-NEXT:  .LBB15_2: | 
 | ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma | 
 | ; CHECK-NEXT:    vle8ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    sw a0, 0(a1) | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 128 x i8>, i32 } @llvm.vp.load.ff.nxv128i8.p0(ptr %ptr, <vscale x 128 x i1> splat (i1 true), i32 %evl) | 
 |   %result0 = extractvalue { <vscale x 128 x i8>, i32 } %load, 0 | 
 |   %result1 = extractvalue { <vscale x 128 x i8>, i32 } %load, 1 | 
 |   store i32 %result1, ptr %evl_out | 
 |   ret <vscale x 128 x i8> %result0 | 
 | } | 
 |  | 
 | define { <vscale x 1 x i16>, i32 } @vploadff_nxv1i16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv1i16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 1 x i16>, i32 } @llvm.vp.load.ff.nxv1i16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 1 x i16>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 1 x i16>, i32 } @vploadff_nxv1i16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv1i16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 1 x i16>, i32 } @llvm.vp.load.ff.nxv1i16.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 1 x i16>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 2 x i16>, i32 } @vploadff_nxv2i16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv2i16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 2 x i16>, i32 } @llvm.vp.load.ff.nxv2i16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 2 x i16>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 2 x i16>, i32 } @vploadff_nxv2i16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv2i16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 2 x i16>, i32 } @llvm.vp.load.ff.nxv2i16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 2 x i16>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 4 x i16>, i32 } @vploadff_nxv4i16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv4i16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 4 x i16>, i32 } @llvm.vp.load.ff.nxv4i16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 4 x i16>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 4 x i16>, i32 } @vploadff_nxv4i16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv4i16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 4 x i16>, i32 } @llvm.vp.load.ff.nxv4i16.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 4 x i16>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 8 x i16>, i32 } @vploadff_nxv8i16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv8i16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 8 x i16>, i32 } @llvm.vp.load.ff.nxv8i16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 8 x i16>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 8 x i16>, i32 } @vploadff_nxv8i16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv8i16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 8 x i16>, i32 } @llvm.vp.load.ff.nxv8i16.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 8 x i16>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 16 x i16>, i32 } @vploadff_nxv16i16(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv16i16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 16 x i16>, i32 } @llvm.vp.load.ff.nxv16i16.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 16 x i16>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 16 x i16>, i32 } @vploadff_nxv16i16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv16i16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 16 x i16>, i32 } @llvm.vp.load.ff.nxv16i16.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 16 x i16>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 32 x i16>, i32 } @vploadff_nxv32i16(ptr %ptr, <vscale x 32 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv32i16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 32 x i16>, i32 } @llvm.vp.load.ff.nxv32i16.p0(ptr %ptr, <vscale x 32 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 32 x i16>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 32 x i16>, i32 } @vploadff_nxv32i16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv32i16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 32 x i16>, i32 } @llvm.vp.load.ff.nxv32i16.p0(ptr %ptr, <vscale x 32 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 32 x i16>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 1 x i32>, i32 } @vploadff_nxv1i32(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv1i32: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 1 x i32>, i32 } @llvm.vp.load.ff.nxv1i32.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 1 x i32>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 1 x i32>, i32 } @vploadff_nxv1i32_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv1i32_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 1 x i32>, i32 } @llvm.vp.load.ff.nxv1i32.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 1 x i32>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 2 x i32>, i32 } @vploadff_nxv2i32(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv2i32: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 2 x i32>, i32 } @llvm.vp.load.ff.nxv2i32.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 2 x i32>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 2 x i32>, i32 } @vploadff_nxv2i32_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv2i32_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 2 x i32>, i32 } @llvm.vp.load.ff.nxv2i32.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 2 x i32>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 4 x i32>, i32 } @vploadff_nxv4i32(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv4i32: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 4 x i32>, i32 } @llvm.vp.load.ff.nxv4i32.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 4 x i32>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 4 x i32>, i32 } @vploadff_nxv4i32_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv4i32_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 4 x i32>, i32 } @llvm.vp.load.ff.nxv4i32.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 4 x i32>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 8 x i32>, i32 } @vploadff_nxv8i32(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv8i32: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 8 x i32>, i32 } @llvm.vp.load.ff.nxv8i32.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 8 x i32>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 8 x i32>, i32 } @vploadff_nxv8i32_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv8i32_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 8 x i32>, i32 } @llvm.vp.load.ff.nxv8i32.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 8 x i32>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 16 x i32>, i32 } @vploadff_nxv16i32(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv16i32: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 16 x i32>, i32 } @llvm.vp.load.ff.nxv16i32.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 16 x i32>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 16 x i32>, i32 } @vploadff_nxv16i32_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv16i32_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 16 x i32>, i32 } @llvm.vp.load.ff.nxv16i32.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 16 x i32>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 1 x i64>, i32 } @vploadff_nxv1i64(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv1i64: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma | 
 | ; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 1 x i64>, i32 } @llvm.vp.load.ff.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 1 x i64>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 1 x i64>, i32 } @vploadff_nxv1i64_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv1i64_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma | 
 | ; CHECK-NEXT:    vle64ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 1 x i64>, i32 } @llvm.vp.load.ff.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 1 x i64>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 2 x i64>, i32 } @vploadff_nxv2i64(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv2i64: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma | 
 | ; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 2 x i64>, i32 } @llvm.vp.load.ff.nxv2i64.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 2 x i64>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 2 x i64>, i32 } @vploadff_nxv2i64_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv2i64_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma | 
 | ; CHECK-NEXT:    vle64ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 2 x i64>, i32 } @llvm.vp.load.ff.nxv2i64.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 2 x i64>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 4 x i64>, i32 } @vploadff_nxv4i64(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv4i64: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma | 
 | ; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 4 x i64>, i32 } @llvm.vp.load.ff.nxv4i64.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 4 x i64>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 4 x i64>, i32 } @vploadff_nxv4i64_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv4i64_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma | 
 | ; CHECK-NEXT:    vle64ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 4 x i64>, i32 } @llvm.vp.load.ff.nxv4i64.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 4 x i64>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 8 x i64>, i32 } @vploadff_nxv8i64(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv8i64: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma | 
 | ; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 8 x i64>, i32 } @llvm.vp.load.ff.nxv8i64.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 8 x i64>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 8 x i64>, i32 } @vploadff_nxv8i64_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv8i64_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma | 
 | ; CHECK-NEXT:    vle64ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 8 x i64>, i32 } @llvm.vp.load.ff.nxv8i64.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 8 x i64>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 1 x half>, i32 } @vploadff_nxv1f16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv1f16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 1 x half>, i32 } @llvm.vp.load.ff.nxv1f16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 1 x half>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 1 x half>, i32 } @vploadff_nxv1f16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv1f16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 1 x half>, i32 } @llvm.vp.load.ff.nxv1f16.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 1 x half>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 2 x half>, i32 } @vploadff_nxv2f16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv2f16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 2 x half>, i32 } @llvm.vp.load.ff.nxv2f16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 2 x half>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 2 x half>, i32 } @vploadff_nxv2f16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv2f16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 2 x half>, i32 } @llvm.vp.load.ff.nxv2f16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 2 x half>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 4 x half>, i32 } @vploadff_nxv4f16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv4f16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 4 x half>, i32 } @llvm.vp.load.ff.nxv4f16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 4 x half>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 4 x half>, i32 } @vploadff_nxv4f16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv4f16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 4 x half>, i32 } @llvm.vp.load.ff.nxv4f16.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 4 x half>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 8 x half>, i32 } @vploadff_nxv8f16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv8f16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 8 x half>, i32 } @llvm.vp.load.ff.nxv8f16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 8 x half>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 8 x half>, i32 } @vploadff_nxv8f16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv8f16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 8 x half>, i32 } @llvm.vp.load.ff.nxv8f16.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 8 x half>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 16 x half>, i32 } @vploadff_nxv16f16(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv16f16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 16 x half>, i32 } @llvm.vp.load.ff.nxv16f16.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 16 x half>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 16 x half>, i32 } @vploadff_nxv16f16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv16f16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 16 x half>, i32 } @llvm.vp.load.ff.nxv16f16.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 16 x half>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 32 x half>, i32 } @vploadff_nxv32f16(ptr %ptr, <vscale x 32 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv32f16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 32 x half>, i32 } @llvm.vp.load.ff.nxv32f16.p0(ptr %ptr, <vscale x 32 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 32 x half>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 32 x half>, i32 } @vploadff_nxv32f16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv32f16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 32 x half>, i32 } @llvm.vp.load.ff.nxv32f16.p0(ptr %ptr, <vscale x 32 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 32 x half>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 1 x float>, i32 } @vploadff_nxv1f32(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv1f32: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 1 x float>, i32 } @llvm.vp.load.ff.nxv1f32.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 1 x float>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 1 x float>, i32 } @vploadff_nxv1f32_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv1f32_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 1 x float>, i32 } @llvm.vp.load.ff.nxv1f32.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 1 x float>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 2 x float>, i32 } @vploadff_nxv2f32(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv2f32: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 2 x float>, i32 } @llvm.vp.load.ff.nxv2f32.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 2 x float>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 2 x float>, i32 } @vploadff_nxv2f32_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv2f32_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 2 x float>, i32 } @llvm.vp.load.ff.nxv2f32.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 2 x float>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 4 x float>, i32 } @vploadff_nxv4f32(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv4f32: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 4 x float>, i32 } @llvm.vp.load.ff.nxv4f32.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 4 x float>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 4 x float>, i32 } @vploadff_nxv4f32_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv4f32_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 4 x float>, i32 } @llvm.vp.load.ff.nxv4f32.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 4 x float>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 8 x float>, i32 } @vploadff_nxv8f32(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv8f32: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 8 x float>, i32 } @llvm.vp.load.ff.nxv8f32.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 8 x float>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 8 x float>, i32 } @vploadff_nxv8f32_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv8f32_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 8 x float>, i32 } @llvm.vp.load.ff.nxv8f32.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 8 x float>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 16 x float>, i32 } @vploadff_nxv16f32(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv16f32: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 16 x float>, i32 } @llvm.vp.load.ff.nxv16f32.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 16 x float>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 16 x float>, i32 } @vploadff_nxv16f32_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv16f32_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma | 
 | ; CHECK-NEXT:    vle32ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 16 x float>, i32 } @llvm.vp.load.ff.nxv16f32.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 16 x float>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 1 x double>, i32 } @vploadff_nxv1f64(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv1f64: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma | 
 | ; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 1 x double>, i32 } @llvm.vp.load.ff.nxv1f64.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 1 x double>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 1 x double>, i32 } @vploadff_nxv1f64_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv1f64_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma | 
 | ; CHECK-NEXT:    vle64ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 1 x double>, i32 } @llvm.vp.load.ff.nxv1f64.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 1 x double>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 2 x double>, i32 } @vploadff_nxv2f64(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv2f64: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma | 
 | ; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 2 x double>, i32 } @llvm.vp.load.ff.nxv2f64.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 2 x double>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 2 x double>, i32 } @vploadff_nxv2f64_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv2f64_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma | 
 | ; CHECK-NEXT:    vle64ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 2 x double>, i32 } @llvm.vp.load.ff.nxv2f64.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 2 x double>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 4 x double>, i32 } @vploadff_nxv4f64(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv4f64: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma | 
 | ; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 4 x double>, i32 } @llvm.vp.load.ff.nxv4f64.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 4 x double>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 4 x double>, i32 } @vploadff_nxv4f64_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv4f64_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma | 
 | ; CHECK-NEXT:    vle64ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 4 x double>, i32 } @llvm.vp.load.ff.nxv4f64.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 4 x double>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 8 x double>, i32 } @vploadff_nxv8f64(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv8f64: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma | 
 | ; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 8 x double>, i32 } @llvm.vp.load.ff.nxv8f64.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 8 x double>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 8 x double>, i32 } @vploadff_nxv8f64_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv8f64_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma | 
 | ; CHECK-NEXT:    vle64ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 8 x double>, i32 } @llvm.vp.load.ff.nxv8f64.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 8 x double>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 1 x bfloat>, i32 } @vploadff_nxv1bf16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv1bf16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 1 x bfloat>, i32 } @llvm.vp.load.ff.nxv1bf16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 1 x bfloat>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 1 x bfloat>, i32 } @vploadff_nxv1bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv1bf16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 1 x bfloat>, i32 } @llvm.vp.load.ff.nxv1bf16.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 1 x bfloat>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 2 x bfloat>, i32 } @vploadff_nxv2bf16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv2bf16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 2 x bfloat>, i32 } @llvm.vp.load.ff.nxv2bf16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 2 x bfloat>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 2 x bfloat>, i32 } @vploadff_nxv2bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv2bf16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 2 x bfloat>, i32 } @llvm.vp.load.ff.nxv2bf16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 2 x bfloat>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 4 x bfloat>, i32 } @vploadff_nxv4bf16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv4bf16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 4 x bfloat>, i32 } @llvm.vp.load.ff.nxv4bf16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 4 x bfloat>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 4 x bfloat>, i32 } @vploadff_nxv4bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv4bf16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 4 x bfloat>, i32 } @llvm.vp.load.ff.nxv4bf16.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 4 x bfloat>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 8 x bfloat>, i32 } @vploadff_nxv8bf16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv8bf16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 8 x bfloat>, i32 } @llvm.vp.load.ff.nxv8bf16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 8 x bfloat>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 8 x bfloat>, i32 } @vploadff_nxv8bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv8bf16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 8 x bfloat>, i32 } @llvm.vp.load.ff.nxv8bf16.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 8 x bfloat>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 16 x bfloat>, i32 } @vploadff_nxv16bf16(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv16bf16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 16 x bfloat>, i32 } @llvm.vp.load.ff.nxv16bf16.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 16 x bfloat>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 16 x bfloat>, i32 } @vploadff_nxv16bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv16bf16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 16 x bfloat>, i32 } @llvm.vp.load.ff.nxv16bf16.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 16 x bfloat>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 32 x bfloat>, i32 } @vploadff_nxv32bf16(ptr %ptr, <vscale x 32 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv32bf16: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 32 x bfloat>, i32 } @llvm.vp.load.ff.nxv32bf16.p0(ptr %ptr, <vscale x 32 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 32 x bfloat>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 32 x bfloat>, i32 } @vploadff_nxv32bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv32bf16_allones_mask: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma | 
 | ; CHECK-NEXT:    vle16ff.v v8, (a0) | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 32 x bfloat>, i32 } @llvm.vp.load.ff.nxv32bf16.p0(ptr %ptr, <vscale x 32 x i1> splat (i1 true), i32 %evl) | 
 |   ret { <vscale x 32 x bfloat>, i32 } %load | 
 | } | 
 |  | 
 | define { <vscale x 3 x i8>, i32 } @vploadff_nxv3i8(ptr %ptr, <vscale x 3 x i1> %m, i32 zeroext %evl) { | 
 | ; CHECK-LABEL: vploadff_nxv3i8: | 
 | ; CHECK:       # %bb.0: | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma | 
 | ; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t | 
 | ; CHECK-NEXT:    csrr a0, vl | 
 | ; CHECK-NEXT:    ret | 
 |   %load = call { <vscale x 3 x i8>, i32 } @llvm.vp.load.ff.nxv3i8.p0(ptr %ptr, <vscale x 3 x i1> %m, i32 %evl) | 
 |   ret { <vscale x 3 x i8>, i32 } %load | 
 | } |