|  | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 | 
|  | ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v,+m \ | 
|  | ; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ | 
|  | ; RUN:     --check-prefixes=CHECK,ZVFH | 
|  | ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v,+m \ | 
|  | ; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ | 
|  | ; RUN:     --check-prefixes=CHECK,ZVFH | 
|  | ; RUN: llc -mtriple=riscv32 \ | 
|  | ; RUN:     -mattr=+d,+zvfhmin,+zfbfmin,+zvfbfmin,+v,+m \ | 
|  | ; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ | 
|  | ; RUN:     --check-prefixes=CHECK,ZVFHMIN | 
|  | ; RUN: llc -mtriple=riscv64 \ | 
|  | ; RUN:     -mattr=+d,+zvfhmin,+zfbfmin,+zvfbfmin,+v,+m \ | 
|  | ; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ | 
|  | ; RUN:     --check-prefixes=CHECK,ZVFHMIN | 
|  |  | 
|  | declare <vscale x 1 x bfloat> @llvm.vp.minimum.nxv1bf16(<vscale x 1 x bfloat>, <vscale x 1 x bfloat>, <vscale x 1 x i1>, i32) | 
|  |  | 
|  | define <vscale x 1 x bfloat> @vfmin_vv_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv1bf16: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma | 
|  | ; CHECK-NEXT:    vmv1r.v v10, v0 | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v11, v9, v0.t | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8, v0.t | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v9, v9, v0.t | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v9, v11, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v10 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v11, v11, v0.t | 
|  | ; CHECK-NEXT:    vmerge.vvm v9, v11, v9, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v10 | 
|  | ; CHECK-NEXT:    vfmin.vv v9, v9, v8, v0.t | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma | 
|  | ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9, v0.t | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 1 x bfloat> @llvm.vp.minimum.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb, <vscale x 1 x i1> %m, i32 %evl) | 
|  | ret <vscale x 1 x bfloat> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 1 x bfloat> @vfmin_vv_nxv1bf16_unmasked(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv1bf16_unmasked: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v10, v10 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma | 
|  | ; CHECK-NEXT:    vmerge.vvm v9, v10, v8, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0 | 
|  | ; CHECK-NEXT:    vfmin.vv v9, v8, v9 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma | 
|  | ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 1 x bfloat> @llvm.vp.minimum.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 1 x bfloat> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 2 x bfloat> @llvm.vp.minimum.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x i1>, i32) | 
|  |  | 
|  | define <vscale x 2 x bfloat> @vfmin_vv_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv2bf16: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma | 
|  | ; CHECK-NEXT:    vmv1r.v v10, v0 | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v11, v9, v0.t | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8, v0.t | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v9, v9, v0.t | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v9, v11, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v10 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v11, v11, v0.t | 
|  | ; CHECK-NEXT:    vmerge.vvm v9, v11, v9, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v10 | 
|  | ; CHECK-NEXT:    vfmin.vv v9, v9, v8, v0.t | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma | 
|  | ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9, v0.t | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 2 x bfloat> @llvm.vp.minimum.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %m, i32 %evl) | 
|  | ret <vscale x 2 x bfloat> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 2 x bfloat> @vfmin_vv_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv2bf16_unmasked: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v10, v10 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma | 
|  | ; CHECK-NEXT:    vmerge.vvm v9, v10, v8, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0 | 
|  | ; CHECK-NEXT:    vfmin.vv v9, v8, v9 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma | 
|  | ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 2 x bfloat> @llvm.vp.minimum.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 2 x bfloat> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 4 x bfloat> @llvm.vp.minimum.nxv4bf16(<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x i1>, i32) | 
|  |  | 
|  | define <vscale x 4 x bfloat> @vfmin_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv4bf16: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma | 
|  | ; CHECK-NEXT:    vmv1r.v v10, v0 | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v9, v0.t | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v14, v8, v0.t | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v8, v14, v14, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v8 | 
|  | ; CHECK-NEXT:    vmerge.vvm v16, v14, v12, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v10 | 
|  | ; CHECK-NEXT:    vmfeq.vv v8, v12, v12, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v8 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v12, v14, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v10 | 
|  | ; CHECK-NEXT:    vfmin.vv v12, v8, v16, v0.t | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma | 
|  | ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12, v0.t | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 4 x bfloat> @llvm.vp.minimum.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb, <vscale x 4 x i1> %m, i32 %evl) | 
|  | ret <vscale x 4 x bfloat> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 4 x bfloat> @vfmin_vv_nxv4bf16_unmasked(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv4bf16_unmasked: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v10, v10 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v9 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v10, v12, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v12, v12 | 
|  | ; CHECK-NEXT:    vmerge.vvm v10, v12, v10, v0 | 
|  | ; CHECK-NEXT:    vfmin.vv v10, v10, v8 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma | 
|  | ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 4 x bfloat> @llvm.vp.minimum.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 4 x bfloat> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 8 x bfloat> @llvm.vp.minimum.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x i1>, i32) | 
|  |  | 
|  | define <vscale x 8 x bfloat> @vfmin_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv8bf16: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma | 
|  | ; CHECK-NEXT:    vmv1r.v v12, v0 | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v10, v0.t | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v20, v8, v0.t | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v8, v20, v20, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v8 | 
|  | ; CHECK-NEXT:    vmerge.vvm v24, v20, v16, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v12 | 
|  | ; CHECK-NEXT:    vmfeq.vv v8, v16, v16, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v8 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v16, v20, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v12 | 
|  | ; CHECK-NEXT:    vfmin.vv v16, v8, v24, v0.t | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma | 
|  | ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16, v0.t | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 8 x bfloat> @llvm.vp.minimum.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb, <vscale x 8 x i1> %m, i32 %evl) | 
|  | ret <vscale x 8 x bfloat> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 8 x bfloat> @vfmin_vv_nxv8bf16_unmasked(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv8bf16_unmasked: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v12, v12 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v10 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v12, v16, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v16, v16 | 
|  | ; CHECK-NEXT:    vmerge.vvm v12, v16, v12, v0 | 
|  | ; CHECK-NEXT:    vfmin.vv v12, v12, v8 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma | 
|  | ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 8 x bfloat> @llvm.vp.minimum.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 8 x bfloat> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 16 x bfloat> @llvm.vp.minimum.nxv16bf16(<vscale x 16 x bfloat>, <vscale x 16 x bfloat>, <vscale x 16 x i1>, i32) | 
|  |  | 
|  | define <vscale x 16 x bfloat> @vfmin_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv16bf16: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    addi sp, sp, -16 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa_offset 16 | 
|  | ; CHECK-NEXT:    csrr a1, vlenb | 
|  | ; CHECK-NEXT:    slli a1, a1, 3 | 
|  | ; CHECK-NEXT:    sub sp, sp, a1 | 
|  | ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma | 
|  | ; CHECK-NEXT:    vmv1r.v v7, v0 | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12, v0.t | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8, v0.t | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v8, v16, v16, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v8 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v16, v24, v0 | 
|  | ; CHECK-NEXT:    addi a0, sp, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    vmfeq.vv v8, v24, v24, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v8 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v24, v16, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vfmin.vv v16, v8, v16, v0.t | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16, v0.t | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 3 | 
|  | ; CHECK-NEXT:    add sp, sp, a0 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa sp, 16 | 
|  | ; CHECK-NEXT:    addi sp, sp, 16 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa_offset 0 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 16 x bfloat> @llvm.vp.minimum.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb, <vscale x 16 x i1> %m, i32 %evl) | 
|  | ret <vscale x 16 x bfloat> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 16 x bfloat> @vfmin_vv_nxv16bf16_unmasked(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv16bf16_unmasked: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v12 | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v8 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v24, v24 | 
|  | ; CHECK-NEXT:    vmfeq.vv v7, v16, v16 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v24, v16, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0 | 
|  | ; CHECK-NEXT:    vfmin.vv v16, v16, v8 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 16 x bfloat> @llvm.vp.minimum.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 16 x bfloat> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 32 x bfloat> @llvm.vp.minimum.nxv32bf16(<vscale x 32 x bfloat>, <vscale x 32 x bfloat>, <vscale x 32 x i1>, i32) | 
|  |  | 
|  | define <vscale x 32 x bfloat> @vfmin_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, <vscale x 32 x i1> %m, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv32bf16: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    addi sp, sp, -16 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa_offset 16 | 
|  | ; CHECK-NEXT:    csrr a1, vlenb | 
|  | ; CHECK-NEXT:    li a2, 24 | 
|  | ; CHECK-NEXT:    mul a1, a1, a2 | 
|  | ; CHECK-NEXT:    sub sp, sp, a1 | 
|  | ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb | 
|  | ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma | 
|  | ; CHECK-NEXT:    vmv1r.v v7, v0 | 
|  | ; CHECK-NEXT:    csrr a1, vlenb | 
|  | ; CHECK-NEXT:    slli a1, a1, 4 | 
|  | ; CHECK-NEXT:    add a1, sp, a1 | 
|  | ; CHECK-NEXT:    addi a1, a1, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v16, (a1) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    csrr a2, vlenb | 
|  | ; CHECK-NEXT:    slli a1, a2, 1 | 
|  | ; CHECK-NEXT:    srli a2, a2, 2 | 
|  | ; CHECK-NEXT:    sub a3, a0, a1 | 
|  | ; CHECK-NEXT:    vslidedown.vx v6, v0, a2 | 
|  | ; CHECK-NEXT:    sltu a2, a0, a3 | 
|  | ; CHECK-NEXT:    addi a2, a2, -1 | 
|  | ; CHECK-NEXT:    and a2, a2, a3 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v6 | 
|  | ; CHECK-NEXT:    addi a3, sp, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v8, (a3) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v12, v0.t | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v5, v16, v16, v0.t | 
|  | ; CHECK-NEXT:    vmv8r.v v8, v16 | 
|  | ; CHECK-NEXT:    csrr a2, vlenb | 
|  | ; CHECK-NEXT:    slli a2, a2, 4 | 
|  | ; CHECK-NEXT:    add a2, sp, a2 | 
|  | ; CHECK-NEXT:    addi a2, a2, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v24, (a2) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v28, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v5 | 
|  | ; CHECK-NEXT:    vmv8r.v v24, v8 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0 | 
|  | ; CHECK-NEXT:    csrr a2, vlenb | 
|  | ; CHECK-NEXT:    slli a2, a2, 3 | 
|  | ; CHECK-NEXT:    add a2, sp, a2 | 
|  | ; CHECK-NEXT:    addi a2, a2, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v6 | 
|  | ; CHECK-NEXT:    vmfeq.vv v12, v16, v16, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v12 | 
|  | ; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v6 | 
|  | ; CHECK-NEXT:    csrr a2, vlenb | 
|  | ; CHECK-NEXT:    slli a2, a2, 3 | 
|  | ; CHECK-NEXT:    add a2, sp, a2 | 
|  | ; CHECK-NEXT:    addi a2, a2, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vfmin.vv v16, v16, v8, v0.t | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v16, v0.t | 
|  | ; CHECK-NEXT:    bltu a0, a1, .LBB10_2 | 
|  | ; CHECK-NEXT:  # %bb.1: | 
|  | ; CHECK-NEXT:    mv a0, a1 | 
|  | ; CHECK-NEXT:  .LBB10_2: | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    addi a1, sp, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v24, (a1) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v24, v0.t | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 3 | 
|  | ; CHECK-NEXT:    add a0, sp, a0 | 
|  | ; CHECK-NEXT:    addi a0, a0, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v8, v16, v16, v0.t | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 4 | 
|  | ; CHECK-NEXT:    add a0, sp, a0 | 
|  | ; CHECK-NEXT:    addi a0, a0, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v24, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v8 | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 3 | 
|  | ; CHECK-NEXT:    add a0, sp, a0 | 
|  | ; CHECK-NEXT:    addi a0, a0, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; CHECK-NEXT:    vmerge.vvm v24, v24, v16, v0 | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 4 | 
|  | ; CHECK-NEXT:    add a0, sp, a0 | 
|  | ; CHECK-NEXT:    addi a0, a0, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    vmfeq.vv v8, v16, v16, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v8 | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 3 | 
|  | ; CHECK-NEXT:    add a0, sp, a0 | 
|  | ; CHECK-NEXT:    addi a0, a0, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 4 | 
|  | ; CHECK-NEXT:    add a0, sp, a0 | 
|  | ; CHECK-NEXT:    addi a0, a0, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vfmin.vv v16, v16, v24, v0.t | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16, v0.t | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    li a1, 24 | 
|  | ; CHECK-NEXT:    mul a0, a0, a1 | 
|  | ; CHECK-NEXT:    add sp, sp, a0 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa sp, 16 | 
|  | ; CHECK-NEXT:    addi sp, sp, 16 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa_offset 0 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 32 x bfloat> @llvm.vp.minimum.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, <vscale x 32 x i1> %m, i32 %evl) | 
|  | ret <vscale x 32 x bfloat> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 32 x bfloat> @vfmin_vv_nxv32bf16_unmasked(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv32bf16_unmasked: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    addi sp, sp, -16 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa_offset 16 | 
|  | ; CHECK-NEXT:    csrr a1, vlenb | 
|  | ; CHECK-NEXT:    li a2, 24 | 
|  | ; CHECK-NEXT:    mul a1, a1, a2 | 
|  | ; CHECK-NEXT:    sub sp, sp, a1 | 
|  | ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb | 
|  | ; CHECK-NEXT:    csrr a1, vlenb | 
|  | ; CHECK-NEXT:    slli a1, a1, 4 | 
|  | ; CHECK-NEXT:    add a1, sp, a1 | 
|  | ; CHECK-NEXT:    addi a1, a1, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v16, (a1) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    csrr a2, vlenb | 
|  | ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma | 
|  | ; CHECK-NEXT:    vmset.m v16 | 
|  | ; CHECK-NEXT:    slli a1, a2, 1 | 
|  | ; CHECK-NEXT:    srli a2, a2, 2 | 
|  | ; CHECK-NEXT:    sub a3, a0, a1 | 
|  | ; CHECK-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma | 
|  | ; CHECK-NEXT:    vslidedown.vx v7, v16, a2 | 
|  | ; CHECK-NEXT:    sltu a2, a0, a3 | 
|  | ; CHECK-NEXT:    addi a2, a2, -1 | 
|  | ; CHECK-NEXT:    and a2, a2, a3 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    csrr a3, vlenb | 
|  | ; CHECK-NEXT:    slli a3, a3, 3 | 
|  | ; CHECK-NEXT:    add a3, sp, a3 | 
|  | ; CHECK-NEXT:    addi a3, a3, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v8, (a3) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v12, v0.t | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v6, v16, v16, v0.t | 
|  | ; CHECK-NEXT:    vmv8r.v v8, v16 | 
|  | ; CHECK-NEXT:    csrr a2, vlenb | 
|  | ; CHECK-NEXT:    slli a2, a2, 4 | 
|  | ; CHECK-NEXT:    add a2, sp, a2 | 
|  | ; CHECK-NEXT:    addi a2, a2, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v24, (a2) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v28, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v6 | 
|  | ; CHECK-NEXT:    vmv8r.v v24, v8 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0 | 
|  | ; CHECK-NEXT:    addi a2, sp, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    vmfeq.vv v12, v16, v16, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v12 | 
|  | ; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vfmin.vv v16, v16, v8, v0.t | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v16, v0.t | 
|  | ; CHECK-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    bltu a0, a1, .LBB11_2 | 
|  | ; CHECK-NEXT:  # %bb.1: | 
|  | ; CHECK-NEXT:    mv a0, a1 | 
|  | ; CHECK-NEXT:  .LBB11_2: | 
|  | ; CHECK-NEXT:    csrr a1, vlenb | 
|  | ; CHECK-NEXT:    slli a1, a1, 3 | 
|  | ; CHECK-NEXT:    add a1, sp, a1 | 
|  | ; CHECK-NEXT:    addi a1, a1, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v16, (a1) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v16 | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 4 | 
|  | ; CHECK-NEXT:    add a0, sp, a0 | 
|  | ; CHECK-NEXT:    addi a0, a0, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v0, (a0) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v0 | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; CHECK-NEXT:    vmfeq.vv v7, v16, v16 | 
|  | ; CHECK-NEXT:    vmerge.vvm v24, v8, v16, v0 | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 4 | 
|  | ; CHECK-NEXT:    add a0, sp, a0 | 
|  | ; CHECK-NEXT:    addi a0, a0, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    vmerge.vvm v16, v16, v8, v0 | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 4 | 
|  | ; CHECK-NEXT:    add a0, sp, a0 | 
|  | ; CHECK-NEXT:    addi a0, a0, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vfmin.vv v16, v16, v24 | 
|  | ; CHECK-NEXT:    addi a0, sp, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16 | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    li a1, 24 | 
|  | ; CHECK-NEXT:    mul a0, a0, a1 | 
|  | ; CHECK-NEXT:    add sp, sp, a0 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa sp, 16 | 
|  | ; CHECK-NEXT:    addi sp, sp, 16 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa_offset 0 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 32 x bfloat> @llvm.vp.minimum.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 32 x bfloat> %v | 
|  | } | 
|  | declare <vscale x 1 x half> @llvm.vp.minimum.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x i1>, i32) | 
|  |  | 
|  | define <vscale x 1 x half> @vfmin_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) { | 
|  | ; ZVFH-LABEL: vfmin_vv_nxv1f16: | 
|  | ; ZVFH:       # %bb.0: | 
|  | ; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma | 
|  | ; ZVFH-NEXT:    vmv1r.v v10, v0 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v0, v8, v8, v0.t | 
|  | ; ZVFH-NEXT:    vmerge.vvm v11, v8, v9, v0 | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v10 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v0, v9, v9, v0.t | 
|  | ; ZVFH-NEXT:    vmerge.vvm v8, v9, v8, v0 | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v10 | 
|  | ; ZVFH-NEXT:    vfmin.vv v8, v8, v11, v0.t | 
|  | ; ZVFH-NEXT:    ret | 
|  | ; | 
|  | ; ZVFHMIN-LABEL: vfmin_vv_nxv1f16: | 
|  | ; ZVFHMIN:       # %bb.0: | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v10, v0 | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v9, v0.t | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8, v0.t | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v0, v9, v9, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v8, v9, v11, v0 | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v10 | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v0, v11, v11, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v9, v11, v9, v0 | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v10 | 
|  | ; ZVFHMIN-NEXT:    vfmin.vv v9, v9, v8, v0.t | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9, v0.t | 
|  | ; ZVFHMIN-NEXT:    ret | 
|  | %v = call <vscale x 1 x half> @llvm.vp.minimum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl) | 
|  | ret <vscale x 1 x half> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 1 x half> @vfmin_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, i32 zeroext %evl) { | 
|  | ; ZVFH-LABEL: vfmin_vv_nxv1f16_unmasked: | 
|  | ; ZVFH:       # %bb.0: | 
|  | ; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma | 
|  | ; ZVFH-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v10, v8, v9, v0 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v0, v9, v9 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v8, v9, v8, v0 | 
|  | ; ZVFH-NEXT:    vfmin.vv v8, v8, v10 | 
|  | ; ZVFH-NEXT:    ret | 
|  | ; | 
|  | ; ZVFHMIN-LABEL: vfmin_vv_nxv1f16_unmasked: | 
|  | ; ZVFHMIN:       # %bb.0: | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v0, v10, v10 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v9, v10, v8, v0 | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v8, v8, v10, v0 | 
|  | ; ZVFHMIN-NEXT:    vfmin.vv v9, v8, v9 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9 | 
|  | ; ZVFHMIN-NEXT:    ret | 
|  | %v = call <vscale x 1 x half> @llvm.vp.minimum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 1 x half> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 2 x half> @llvm.vp.minimum.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x i1>, i32) | 
|  |  | 
|  | define <vscale x 2 x half> @vfmin_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) { | 
|  | ; ZVFH-LABEL: vfmin_vv_nxv2f16: | 
|  | ; ZVFH:       # %bb.0: | 
|  | ; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma | 
|  | ; ZVFH-NEXT:    vmv1r.v v10, v0 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v0, v8, v8, v0.t | 
|  | ; ZVFH-NEXT:    vmerge.vvm v11, v8, v9, v0 | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v10 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v0, v9, v9, v0.t | 
|  | ; ZVFH-NEXT:    vmerge.vvm v8, v9, v8, v0 | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v10 | 
|  | ; ZVFH-NEXT:    vfmin.vv v8, v8, v11, v0.t | 
|  | ; ZVFH-NEXT:    ret | 
|  | ; | 
|  | ; ZVFHMIN-LABEL: vfmin_vv_nxv2f16: | 
|  | ; ZVFHMIN:       # %bb.0: | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v10, v0 | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v9, v0.t | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8, v0.t | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v0, v9, v9, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v8, v9, v11, v0 | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v10 | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v0, v11, v11, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v9, v11, v9, v0 | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v10 | 
|  | ; ZVFHMIN-NEXT:    vfmin.vv v9, v9, v8, v0.t | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9, v0.t | 
|  | ; ZVFHMIN-NEXT:    ret | 
|  | %v = call <vscale x 2 x half> @llvm.vp.minimum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl) | 
|  | ret <vscale x 2 x half> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 2 x half> @vfmin_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 zeroext %evl) { | 
|  | ; ZVFH-LABEL: vfmin_vv_nxv2f16_unmasked: | 
|  | ; ZVFH:       # %bb.0: | 
|  | ; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma | 
|  | ; ZVFH-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v10, v8, v9, v0 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v0, v9, v9 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v8, v9, v8, v0 | 
|  | ; ZVFH-NEXT:    vfmin.vv v8, v8, v10 | 
|  | ; ZVFH-NEXT:    ret | 
|  | ; | 
|  | ; ZVFHMIN-LABEL: vfmin_vv_nxv2f16_unmasked: | 
|  | ; ZVFHMIN:       # %bb.0: | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v0, v10, v10 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v9, v10, v8, v0 | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v8, v8, v10, v0 | 
|  | ; ZVFHMIN-NEXT:    vfmin.vv v9, v8, v9 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9 | 
|  | ; ZVFHMIN-NEXT:    ret | 
|  | %v = call <vscale x 2 x half> @llvm.vp.minimum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 2 x half> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 4 x half> @llvm.vp.minimum.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x i1>, i32) | 
|  |  | 
|  | define <vscale x 4 x half> @vfmin_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) { | 
|  | ; ZVFH-LABEL: vfmin_vv_nxv4f16: | 
|  | ; ZVFH:       # %bb.0: | 
|  | ; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma | 
|  | ; ZVFH-NEXT:    vmv1r.v v10, v0 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v0, v8, v8, v0.t | 
|  | ; ZVFH-NEXT:    vmerge.vvm v11, v8, v9, v0 | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v10 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v0, v9, v9, v0.t | 
|  | ; ZVFH-NEXT:    vmerge.vvm v8, v9, v8, v0 | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v10 | 
|  | ; ZVFH-NEXT:    vfmin.vv v8, v8, v11, v0.t | 
|  | ; ZVFH-NEXT:    ret | 
|  | ; | 
|  | ; ZVFHMIN-LABEL: vfmin_vv_nxv4f16: | 
|  | ; ZVFHMIN:       # %bb.0: | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v10, v0 | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9, v0.t | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v8, v0.t | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v8, v14, v14, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v8 | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v16, v14, v12, v0 | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v10 | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v8, v12, v12, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v8 | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v8, v12, v14, v0 | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v10 | 
|  | ; ZVFHMIN-NEXT:    vfmin.vv v12, v8, v16, v0.t | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t | 
|  | ; ZVFHMIN-NEXT:    ret | 
|  | %v = call <vscale x 4 x half> @llvm.vp.minimum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl) | 
|  | ret <vscale x 4 x half> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 4 x half> @vfmin_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, i32 zeroext %evl) { | 
|  | ; ZVFH-LABEL: vfmin_vv_nxv4f16_unmasked: | 
|  | ; ZVFH:       # %bb.0: | 
|  | ; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma | 
|  | ; ZVFH-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v10, v8, v9, v0 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v0, v9, v9 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v8, v9, v8, v0 | 
|  | ; ZVFH-NEXT:    vfmin.vv v8, v8, v10 | 
|  | ; ZVFH-NEXT:    ret | 
|  | ; | 
|  | ; ZVFHMIN-LABEL: vfmin_vv_nxv4f16_unmasked: | 
|  | ; ZVFHMIN:       # %bb.0: | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v0, v10, v10 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v8, v10, v12, v0 | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v0, v12, v12 | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v10, v12, v10, v0 | 
|  | ; ZVFHMIN-NEXT:    vfmin.vv v10, v10, v8 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10 | 
|  | ; ZVFHMIN-NEXT:    ret | 
|  | %v = call <vscale x 4 x half> @llvm.vp.minimum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 4 x half> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 8 x half> @llvm.vp.minimum.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, i32) | 
|  |  | 
|  | define <vscale x 8 x half> @vfmin_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) { | 
|  | ; ZVFH-LABEL: vfmin_vv_nxv8f16: | 
|  | ; ZVFH:       # %bb.0: | 
|  | ; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma | 
|  | ; ZVFH-NEXT:    vmv1r.v v12, v0 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v13, v8, v8, v0.t | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v13 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v14, v8, v10, v0 | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v12 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v13, v10, v10, v0.t | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v13 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v8, v10, v8, v0 | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v12 | 
|  | ; ZVFH-NEXT:    vfmin.vv v8, v8, v14, v0.t | 
|  | ; ZVFH-NEXT:    ret | 
|  | ; | 
|  | ; ZVFHMIN-LABEL: vfmin_vv_nxv8f16: | 
|  | ; ZVFHMIN:       # %bb.0: | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v12, v0 | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v10, v0.t | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v20, v8, v0.t | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v8, v20, v20, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v8 | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v24, v20, v16, v0 | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v12 | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v8, v16, v16, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v8 | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v8, v16, v20, v0 | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v12 | 
|  | ; ZVFHMIN-NEXT:    vfmin.vv v16, v8, v24, v0.t | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16, v0.t | 
|  | ; ZVFHMIN-NEXT:    ret | 
|  | %v = call <vscale x 8 x half> @llvm.vp.minimum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl) | 
|  | ret <vscale x 8 x half> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 8 x half> @vfmin_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, i32 zeroext %evl) { | 
|  | ; ZVFH-LABEL: vfmin_vv_nxv8f16_unmasked: | 
|  | ; ZVFH:       # %bb.0: | 
|  | ; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma | 
|  | ; ZVFH-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v12, v8, v10, v0 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v0, v10, v10 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v8, v10, v8, v0 | 
|  | ; ZVFH-NEXT:    vfmin.vv v8, v8, v12 | 
|  | ; ZVFH-NEXT:    ret | 
|  | ; | 
|  | ; ZVFHMIN-LABEL: vfmin_vv_nxv8f16_unmasked: | 
|  | ; ZVFHMIN:       # %bb.0: | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v0, v12, v12 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v10 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v8, v12, v16, v0 | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v0, v16, v16 | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v12, v16, v12, v0 | 
|  | ; ZVFHMIN-NEXT:    vfmin.vv v12, v12, v8 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12 | 
|  | ; ZVFHMIN-NEXT:    ret | 
|  | %v = call <vscale x 8 x half> @llvm.vp.minimum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 8 x half> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 16 x half> @llvm.vp.minimum.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x i1>, i32) | 
|  |  | 
|  | define <vscale x 16 x half> @vfmin_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) { | 
|  | ; ZVFH-LABEL: vfmin_vv_nxv16f16: | 
|  | ; ZVFH:       # %bb.0: | 
|  | ; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma | 
|  | ; ZVFH-NEXT:    vmv1r.v v16, v0 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v17, v8, v8, v0.t | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v17 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v20, v8, v12, v0 | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v16 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v17, v12, v12, v0.t | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v17 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v8, v12, v8, v0 | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v16 | 
|  | ; ZVFH-NEXT:    vfmin.vv v8, v8, v20, v0.t | 
|  | ; ZVFH-NEXT:    ret | 
|  | ; | 
|  | ; ZVFHMIN-LABEL: vfmin_vv_nxv16f16: | 
|  | ; ZVFHMIN:       # %bb.0: | 
|  | ; ZVFHMIN-NEXT:    addi sp, sp, -16 | 
|  | ; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16 | 
|  | ; ZVFHMIN-NEXT:    csrr a1, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a1, a1, 3 | 
|  | ; ZVFHMIN-NEXT:    sub sp, sp, a1 | 
|  | ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v7, v0 | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v8, v16, v16, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v8 | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v8, v16, v24, v0 | 
|  | ; ZVFHMIN-NEXT:    addi a0, sp, 16 | 
|  | ; ZVFHMIN-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v7 | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v8, v24, v24, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v8 | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v8, v24, v16, v0 | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v7 | 
|  | ; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload | 
|  | ; ZVFHMIN-NEXT:    vfmin.vv v16, v8, v16, v0.t | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16, v0.t | 
|  | ; ZVFHMIN-NEXT:    csrr a0, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a0, a0, 3 | 
|  | ; ZVFHMIN-NEXT:    add sp, sp, a0 | 
|  | ; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16 | 
|  | ; ZVFHMIN-NEXT:    addi sp, sp, 16 | 
|  | ; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0 | 
|  | ; ZVFHMIN-NEXT:    ret | 
|  | %v = call <vscale x 16 x half> @llvm.vp.minimum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl) | 
|  | ret <vscale x 16 x half> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 16 x half> @vfmin_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, i32 zeroext %evl) { | 
|  | ; ZVFH-LABEL: vfmin_vv_nxv16f16_unmasked: | 
|  | ; ZVFH:       # %bb.0: | 
|  | ; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma | 
|  | ; ZVFH-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v16, v8, v12, v0 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v0, v12, v12 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v8, v12, v8, v0 | 
|  | ; ZVFH-NEXT:    vfmin.vv v8, v8, v16 | 
|  | ; ZVFH-NEXT:    ret | 
|  | ; | 
|  | ; ZVFHMIN-LABEL: vfmin_vv_nxv16f16_unmasked: | 
|  | ; ZVFHMIN:       # %bb.0: | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12 | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v0, v24, v24 | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v7, v16, v16 | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v8, v24, v16, v0 | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v7 | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v16, v16, v24, v0 | 
|  | ; ZVFHMIN-NEXT:    vfmin.vv v16, v16, v8 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16 | 
|  | ; ZVFHMIN-NEXT:    ret | 
|  | %v = call <vscale x 16 x half> @llvm.vp.minimum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 16 x half> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 32 x half> @llvm.vp.minimum.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x i1>, i32) | 
|  |  | 
|  | define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 zeroext %evl) { | 
|  | ; ZVFH-LABEL: vfmin_vv_nxv32f16: | 
|  | ; ZVFH:       # %bb.0: | 
|  | ; ZVFH-NEXT:    addi sp, sp, -16 | 
|  | ; ZVFH-NEXT:    .cfi_def_cfa_offset 16 | 
|  | ; ZVFH-NEXT:    csrr a1, vlenb | 
|  | ; ZVFH-NEXT:    slli a1, a1, 3 | 
|  | ; ZVFH-NEXT:    sub sp, sp, a1 | 
|  | ; ZVFH-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb | 
|  | ; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma | 
|  | ; ZVFH-NEXT:    vmv1r.v v7, v0 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v25, v8, v8, v0.t | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v25 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v24, v8, v16, v0 | 
|  | ; ZVFH-NEXT:    addi a0, sp, 16 | 
|  | ; ZVFH-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v7 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v25, v16, v16, v0.t | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v25 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v8, v16, v8, v0 | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v7 | 
|  | ; ZVFH-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload | 
|  | ; ZVFH-NEXT:    vfmin.vv v8, v8, v16, v0.t | 
|  | ; ZVFH-NEXT:    csrr a0, vlenb | 
|  | ; ZVFH-NEXT:    slli a0, a0, 3 | 
|  | ; ZVFH-NEXT:    add sp, sp, a0 | 
|  | ; ZVFH-NEXT:    .cfi_def_cfa sp, 16 | 
|  | ; ZVFH-NEXT:    addi sp, sp, 16 | 
|  | ; ZVFH-NEXT:    .cfi_def_cfa_offset 0 | 
|  | ; ZVFH-NEXT:    ret | 
|  | ; | 
|  | ; ZVFHMIN-LABEL: vfmin_vv_nxv32f16: | 
|  | ; ZVFHMIN:       # %bb.0: | 
|  | ; ZVFHMIN-NEXT:    addi sp, sp, -16 | 
|  | ; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16 | 
|  | ; ZVFHMIN-NEXT:    csrr a1, vlenb | 
|  | ; ZVFHMIN-NEXT:    li a2, 24 | 
|  | ; ZVFHMIN-NEXT:    mul a1, a1, a2 | 
|  | ; ZVFHMIN-NEXT:    sub sp, sp, a1 | 
|  | ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb | 
|  | ; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v7, v0 | 
|  | ; ZVFHMIN-NEXT:    csrr a1, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a1, a1, 4 | 
|  | ; ZVFHMIN-NEXT:    add a1, sp, a1 | 
|  | ; ZVFHMIN-NEXT:    addi a1, a1, 16 | 
|  | ; ZVFHMIN-NEXT:    vs8r.v v16, (a1) # vscale x 64-byte Folded Spill | 
|  | ; ZVFHMIN-NEXT:    csrr a2, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a1, a2, 1 | 
|  | ; ZVFHMIN-NEXT:    srli a2, a2, 2 | 
|  | ; ZVFHMIN-NEXT:    sub a3, a0, a1 | 
|  | ; ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a2 | 
|  | ; ZVFHMIN-NEXT:    sltu a2, a0, a3 | 
|  | ; ZVFHMIN-NEXT:    addi a2, a2, -1 | 
|  | ; ZVFHMIN-NEXT:    and a2, a2, a3 | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v6 | 
|  | ; ZVFHMIN-NEXT:    addi a3, sp, 16 | 
|  | ; ZVFHMIN-NEXT:    vs8r.v v8, (a3) # vscale x 64-byte Folded Spill | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12, v0.t | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v5, v16, v16, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmv8r.v v8, v16 | 
|  | ; ZVFHMIN-NEXT:    csrr a2, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a2, a2, 4 | 
|  | ; ZVFHMIN-NEXT:    add a2, sp, a2 | 
|  | ; ZVFHMIN-NEXT:    addi a2, a2, 16 | 
|  | ; ZVFHMIN-NEXT:    vl8r.v v24, (a2) # vscale x 64-byte Folded Reload | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v28, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v5 | 
|  | ; ZVFHMIN-NEXT:    vmv8r.v v24, v8 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v8, v8, v16, v0 | 
|  | ; ZVFHMIN-NEXT:    csrr a2, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a2, a2, 3 | 
|  | ; ZVFHMIN-NEXT:    add a2, sp, a2 | 
|  | ; ZVFHMIN-NEXT:    addi a2, a2, 16 | 
|  | ; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v6 | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v12, v16, v16, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v12 | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v16, v16, v24, v0 | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v6 | 
|  | ; ZVFHMIN-NEXT:    csrr a2, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a2, a2, 3 | 
|  | ; ZVFHMIN-NEXT:    add a2, sp, a2 | 
|  | ; ZVFHMIN-NEXT:    addi a2, a2, 16 | 
|  | ; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload | 
|  | ; ZVFHMIN-NEXT:    vfmin.vv v16, v16, v8, v0.t | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v16, v0.t | 
|  | ; ZVFHMIN-NEXT:    bltu a0, a1, .LBB22_2 | 
|  | ; ZVFHMIN-NEXT:  # %bb.1: | 
|  | ; ZVFHMIN-NEXT:    mv a0, a1 | 
|  | ; ZVFHMIN-NEXT:  .LBB22_2: | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v7 | 
|  | ; ZVFHMIN-NEXT:    addi a1, sp, 16 | 
|  | ; ZVFHMIN-NEXT:    vl8r.v v24, (a1) # vscale x 64-byte Folded Reload | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v24, v0.t | 
|  | ; ZVFHMIN-NEXT:    csrr a0, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a0, a0, 3 | 
|  | ; ZVFHMIN-NEXT:    add a0, sp, a0 | 
|  | ; ZVFHMIN-NEXT:    addi a0, a0, 16 | 
|  | ; ZVFHMIN-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v8, v16, v16, v0.t | 
|  | ; ZVFHMIN-NEXT:    csrr a0, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a0, a0, 4 | 
|  | ; ZVFHMIN-NEXT:    add a0, sp, a0 | 
|  | ; ZVFHMIN-NEXT:    addi a0, a0, 16 | 
|  | ; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v24, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v8 | 
|  | ; ZVFHMIN-NEXT:    csrr a0, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a0, a0, 3 | 
|  | ; ZVFHMIN-NEXT:    add a0, sp, a0 | 
|  | ; ZVFHMIN-NEXT:    addi a0, a0, 16 | 
|  | ; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v24, v24, v16, v0 | 
|  | ; ZVFHMIN-NEXT:    csrr a0, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a0, a0, 4 | 
|  | ; ZVFHMIN-NEXT:    add a0, sp, a0 | 
|  | ; ZVFHMIN-NEXT:    addi a0, a0, 16 | 
|  | ; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v7 | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v8, v16, v16, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v8 | 
|  | ; ZVFHMIN-NEXT:    csrr a0, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a0, a0, 3 | 
|  | ; ZVFHMIN-NEXT:    add a0, sp, a0 | 
|  | ; ZVFHMIN-NEXT:    addi a0, a0, 16 | 
|  | ; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v16, v16, v24, v0 | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v7 | 
|  | ; ZVFHMIN-NEXT:    csrr a0, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a0, a0, 4 | 
|  | ; ZVFHMIN-NEXT:    add a0, sp, a0 | 
|  | ; ZVFHMIN-NEXT:    addi a0, a0, 16 | 
|  | ; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload | 
|  | ; ZVFHMIN-NEXT:    vfmin.vv v16, v16, v24, v0.t | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16, v0.t | 
|  | ; ZVFHMIN-NEXT:    csrr a0, vlenb | 
|  | ; ZVFHMIN-NEXT:    li a1, 24 | 
|  | ; ZVFHMIN-NEXT:    mul a0, a0, a1 | 
|  | ; ZVFHMIN-NEXT:    add sp, sp, a0 | 
|  | ; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16 | 
|  | ; ZVFHMIN-NEXT:    addi sp, sp, 16 | 
|  | ; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0 | 
|  | ; ZVFHMIN-NEXT:    ret | 
|  | %v = call <vscale x 32 x half> @llvm.vp.minimum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl) | 
|  | ret <vscale x 32 x half> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, i32 zeroext %evl) { | 
|  | ; ZVFH-LABEL: vfmin_vv_nxv32f16_unmasked: | 
|  | ; ZVFH:       # %bb.0: | 
|  | ; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma | 
|  | ; ZVFH-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; ZVFH-NEXT:    vmfeq.vv v7, v16, v16 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v24, v8, v16, v0 | 
|  | ; ZVFH-NEXT:    vmv1r.v v0, v7 | 
|  | ; ZVFH-NEXT:    vmerge.vvm v8, v16, v8, v0 | 
|  | ; ZVFH-NEXT:    vfmin.vv v8, v8, v24 | 
|  | ; ZVFH-NEXT:    ret | 
|  | ; | 
|  | ; ZVFHMIN-LABEL: vfmin_vv_nxv32f16_unmasked: | 
|  | ; ZVFHMIN:       # %bb.0: | 
|  | ; ZVFHMIN-NEXT:    addi sp, sp, -16 | 
|  | ; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16 | 
|  | ; ZVFHMIN-NEXT:    csrr a1, vlenb | 
|  | ; ZVFHMIN-NEXT:    li a2, 24 | 
|  | ; ZVFHMIN-NEXT:    mul a1, a1, a2 | 
|  | ; ZVFHMIN-NEXT:    sub sp, sp, a1 | 
|  | ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb | 
|  | ; ZVFHMIN-NEXT:    csrr a1, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a1, a1, 4 | 
|  | ; ZVFHMIN-NEXT:    add a1, sp, a1 | 
|  | ; ZVFHMIN-NEXT:    addi a1, a1, 16 | 
|  | ; ZVFHMIN-NEXT:    vs8r.v v16, (a1) # vscale x 64-byte Folded Spill | 
|  | ; ZVFHMIN-NEXT:    csrr a2, vlenb | 
|  | ; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmset.m v16 | 
|  | ; ZVFHMIN-NEXT:    slli a1, a2, 1 | 
|  | ; ZVFHMIN-NEXT:    srli a2, a2, 2 | 
|  | ; ZVFHMIN-NEXT:    sub a3, a0, a1 | 
|  | ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vslidedown.vx v7, v16, a2 | 
|  | ; ZVFHMIN-NEXT:    sltu a2, a0, a3 | 
|  | ; ZVFHMIN-NEXT:    addi a2, a2, -1 | 
|  | ; ZVFHMIN-NEXT:    and a2, a2, a3 | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v7 | 
|  | ; ZVFHMIN-NEXT:    csrr a3, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a3, a3, 3 | 
|  | ; ZVFHMIN-NEXT:    add a3, sp, a3 | 
|  | ; ZVFHMIN-NEXT:    addi a3, a3, 16 | 
|  | ; ZVFHMIN-NEXT:    vs8r.v v8, (a3) # vscale x 64-byte Folded Spill | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12, v0.t | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v6, v16, v16, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmv8r.v v8, v16 | 
|  | ; ZVFHMIN-NEXT:    csrr a2, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a2, a2, 4 | 
|  | ; ZVFHMIN-NEXT:    add a2, sp, a2 | 
|  | ; ZVFHMIN-NEXT:    addi a2, a2, 16 | 
|  | ; ZVFHMIN-NEXT:    vl8r.v v24, (a2) # vscale x 64-byte Folded Reload | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v28, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v6 | 
|  | ; ZVFHMIN-NEXT:    vmv8r.v v24, v8 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v8, v8, v16, v0 | 
|  | ; ZVFHMIN-NEXT:    addi a2, sp, 16 | 
|  | ; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v7 | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v12, v16, v16, v0.t | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v12 | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v16, v16, v24, v0 | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v7 | 
|  | ; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload | 
|  | ; ZVFHMIN-NEXT:    vfmin.vv v16, v16, v8, v0.t | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v16, v0.t | 
|  | ; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill | 
|  | ; ZVFHMIN-NEXT:    bltu a0, a1, .LBB23_2 | 
|  | ; ZVFHMIN-NEXT:  # %bb.1: | 
|  | ; ZVFHMIN-NEXT:    mv a0, a1 | 
|  | ; ZVFHMIN-NEXT:  .LBB23_2: | 
|  | ; ZVFHMIN-NEXT:    csrr a1, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a1, a1, 3 | 
|  | ; ZVFHMIN-NEXT:    add a1, sp, a1 | 
|  | ; ZVFHMIN-NEXT:    addi a1, a1, 16 | 
|  | ; ZVFHMIN-NEXT:    vl8r.v v16, (a1) # vscale x 64-byte Folded Reload | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v16 | 
|  | ; ZVFHMIN-NEXT:    csrr a0, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a0, a0, 4 | 
|  | ; ZVFHMIN-NEXT:    add a0, sp, a0 | 
|  | ; ZVFHMIN-NEXT:    addi a0, a0, 16 | 
|  | ; ZVFHMIN-NEXT:    vl8r.v v0, (a0) # vscale x 64-byte Folded Reload | 
|  | ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v0 | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; ZVFHMIN-NEXT:    vmfeq.vv v7, v16, v16 | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v24, v8, v16, v0 | 
|  | ; ZVFHMIN-NEXT:    csrr a0, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a0, a0, 4 | 
|  | ; ZVFHMIN-NEXT:    add a0, sp, a0 | 
|  | ; ZVFHMIN-NEXT:    addi a0, a0, 16 | 
|  | ; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill | 
|  | ; ZVFHMIN-NEXT:    vmv1r.v v0, v7 | 
|  | ; ZVFHMIN-NEXT:    vmerge.vvm v16, v16, v8, v0 | 
|  | ; ZVFHMIN-NEXT:    csrr a0, vlenb | 
|  | ; ZVFHMIN-NEXT:    slli a0, a0, 4 | 
|  | ; ZVFHMIN-NEXT:    add a0, sp, a0 | 
|  | ; ZVFHMIN-NEXT:    addi a0, a0, 16 | 
|  | ; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload | 
|  | ; ZVFHMIN-NEXT:    vfmin.vv v16, v16, v24 | 
|  | ; ZVFHMIN-NEXT:    addi a0, sp, 16 | 
|  | ; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload | 
|  | ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma | 
|  | ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16 | 
|  | ; ZVFHMIN-NEXT:    csrr a0, vlenb | 
|  | ; ZVFHMIN-NEXT:    li a1, 24 | 
|  | ; ZVFHMIN-NEXT:    mul a0, a0, a1 | 
|  | ; ZVFHMIN-NEXT:    add sp, sp, a0 | 
|  | ; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16 | 
|  | ; ZVFHMIN-NEXT:    addi sp, sp, 16 | 
|  | ; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0 | 
|  | ; ZVFHMIN-NEXT:    ret | 
|  | %v = call <vscale x 32 x half> @llvm.vp.minimum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 32 x half> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 1 x float> @llvm.vp.minimum.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i32) | 
|  |  | 
|  | define <vscale x 1 x float> @vfmin_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv1f32: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma | 
|  | ; CHECK-NEXT:    vmv1r.v v10, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v8, v8, v0.t | 
|  | ; CHECK-NEXT:    vmerge.vvm v11, v8, v9, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v10 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v9, v9, v0.t | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v10 | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v11, v0.t | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 1 x float> @llvm.vp.minimum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl) | 
|  | ret <vscale x 1 x float> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 1 x float> @vfmin_vv_nxv1f32_unmasked(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv1f32_unmasked: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; CHECK-NEXT:    vmerge.vvm v10, v8, v9, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v9, v9 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0 | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v10 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 1 x float> @llvm.vp.minimum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 1 x float> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 2 x float> @llvm.vp.minimum.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i32) | 
|  |  | 
|  | define <vscale x 2 x float> @vfmin_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv2f32: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma | 
|  | ; CHECK-NEXT:    vmv1r.v v10, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v8, v8, v0.t | 
|  | ; CHECK-NEXT:    vmerge.vvm v11, v8, v9, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v10 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v9, v9, v0.t | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v10 | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v11, v0.t | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 2 x float> @llvm.vp.minimum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl) | 
|  | ret <vscale x 2 x float> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 2 x float> @vfmin_vv_nxv2f32_unmasked(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv2f32_unmasked: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; CHECK-NEXT:    vmerge.vvm v10, v8, v9, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v9, v9 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0 | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v10 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 2 x float> @llvm.vp.minimum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 2 x float> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 4 x float> @llvm.vp.minimum.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, i32) | 
|  |  | 
|  | define <vscale x 4 x float> @vfmin_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv4f32: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma | 
|  | ; CHECK-NEXT:    vmv1r.v v12, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v13, v8, v8, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v13 | 
|  | ; CHECK-NEXT:    vmerge.vvm v14, v8, v10, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v12 | 
|  | ; CHECK-NEXT:    vmfeq.vv v13, v10, v10, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v13 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v12 | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v14, v0.t | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 4 x float> @llvm.vp.minimum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl) | 
|  | ret <vscale x 4 x float> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 4 x float> @vfmin_vv_nxv4f32_unmasked(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv4f32_unmasked: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; CHECK-NEXT:    vmerge.vvm v12, v8, v10, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v10, v10 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0 | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v12 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 4 x float> @llvm.vp.minimum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 4 x float> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 8 x float> @llvm.vp.minimum.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x i1>, i32) | 
|  |  | 
|  | define <vscale x 8 x float> @vfmin_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv8f32: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma | 
|  | ; CHECK-NEXT:    vmv1r.v v16, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v17, v8, v8, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v17 | 
|  | ; CHECK-NEXT:    vmerge.vvm v20, v8, v12, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v16 | 
|  | ; CHECK-NEXT:    vmfeq.vv v17, v12, v12, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v17 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v16 | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v20, v0.t | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 8 x float> @llvm.vp.minimum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl) | 
|  | ret <vscale x 8 x float> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 8 x float> @vfmin_vv_nxv8f32_unmasked(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv8f32_unmasked: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; CHECK-NEXT:    vmerge.vvm v16, v8, v12, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v12, v12 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0 | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v16 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 8 x float> @llvm.vp.minimum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 8 x float> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 1 x double> @llvm.vp.minimum.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x i1>, i32) | 
|  |  | 
|  | define <vscale x 1 x double> @vfmin_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv1f64: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma | 
|  | ; CHECK-NEXT:    vmv1r.v v10, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v8, v8, v0.t | 
|  | ; CHECK-NEXT:    vmerge.vvm v11, v8, v9, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v10 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v9, v9, v0.t | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v10 | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v11, v0.t | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 1 x double> @llvm.vp.minimum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl) | 
|  | ret <vscale x 1 x double> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 1 x double> @vfmin_vv_nxv1f64_unmasked(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv1f64_unmasked: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; CHECK-NEXT:    vmerge.vvm v10, v8, v9, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v9, v9 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0 | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v10 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 1 x double> @llvm.vp.minimum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 1 x double> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 2 x double> @llvm.vp.minimum.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, i32) | 
|  |  | 
|  | define <vscale x 2 x double> @vfmin_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv2f64: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma | 
|  | ; CHECK-NEXT:    vmv1r.v v12, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v13, v8, v8, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v13 | 
|  | ; CHECK-NEXT:    vmerge.vvm v14, v8, v10, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v12 | 
|  | ; CHECK-NEXT:    vmfeq.vv v13, v10, v10, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v13 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v12 | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v14, v0.t | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 2 x double> @llvm.vp.minimum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl) | 
|  | ret <vscale x 2 x double> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 2 x double> @vfmin_vv_nxv2f64_unmasked(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv2f64_unmasked: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; CHECK-NEXT:    vmerge.vvm v12, v8, v10, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v10, v10 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0 | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v12 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 2 x double> @llvm.vp.minimum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 2 x double> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 4 x double> @llvm.vp.minimum.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x i1>, i32) | 
|  |  | 
|  | define <vscale x 4 x double> @vfmin_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv4f64: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma | 
|  | ; CHECK-NEXT:    vmv1r.v v16, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v17, v8, v8, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v17 | 
|  | ; CHECK-NEXT:    vmerge.vvm v20, v8, v12, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v16 | 
|  | ; CHECK-NEXT:    vmfeq.vv v17, v12, v12, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v17 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v16 | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v20, v0.t | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 4 x double> @llvm.vp.minimum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl) | 
|  | ret <vscale x 4 x double> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 4 x double> @vfmin_vv_nxv4f64_unmasked(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv4f64_unmasked: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; CHECK-NEXT:    vmerge.vvm v16, v8, v12, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v12, v12 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0 | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v16 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 4 x double> @llvm.vp.minimum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 4 x double> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 8 x double> @llvm.vp.minimum.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x i1>, i32) | 
|  |  | 
|  | define <vscale x 8 x double> @vfmin_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv8f64: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    addi sp, sp, -16 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa_offset 16 | 
|  | ; CHECK-NEXT:    csrr a1, vlenb | 
|  | ; CHECK-NEXT:    slli a1, a1, 3 | 
|  | ; CHECK-NEXT:    sub sp, sp, a1 | 
|  | ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma | 
|  | ; CHECK-NEXT:    vmv1r.v v7, v0 | 
|  | ; CHECK-NEXT:    vmfeq.vv v25, v8, v8, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v25 | 
|  | ; CHECK-NEXT:    vmerge.vvm v24, v8, v16, v0 | 
|  | ; CHECK-NEXT:    addi a0, sp, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    vmfeq.vv v25, v16, v16, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v25 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v16, v0.t | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 3 | 
|  | ; CHECK-NEXT:    add sp, sp, a0 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa sp, 16 | 
|  | ; CHECK-NEXT:    addi sp, sp, 16 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa_offset 0 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 8 x double> @llvm.vp.minimum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl) | 
|  | ret <vscale x 8 x double> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 8 x double> @vfmin_vv_nxv8f64_unmasked(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv8f64_unmasked: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v8, v8 | 
|  | ; CHECK-NEXT:    vmfeq.vv v7, v16, v16 | 
|  | ; CHECK-NEXT:    vmerge.vvm v24, v8, v16, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0 | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v24 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 8 x double> @llvm.vp.minimum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 8 x double> %v | 
|  | } | 
|  |  | 
|  | declare <vscale x 16 x double> @llvm.vp.minimum.nxv16f64(<vscale x 16 x double>, <vscale x 16 x double>, <vscale x 16 x i1>, i32) | 
|  |  | 
|  | define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv16f64: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    addi sp, sp, -16 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa_offset 16 | 
|  | ; CHECK-NEXT:    csrr a1, vlenb | 
|  | ; CHECK-NEXT:    li a3, 24 | 
|  | ; CHECK-NEXT:    mul a1, a1, a3 | 
|  | ; CHECK-NEXT:    sub sp, sp, a1 | 
|  | ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb | 
|  | ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma | 
|  | ; CHECK-NEXT:    vmv1r.v v7, v0 | 
|  | ; CHECK-NEXT:    csrr a1, vlenb | 
|  | ; CHECK-NEXT:    slli a1, a1, 4 | 
|  | ; CHECK-NEXT:    add a1, sp, a1 | 
|  | ; CHECK-NEXT:    addi a1, a1, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v8, (a1) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    csrr a1, vlenb | 
|  | ; CHECK-NEXT:    slli a3, a1, 3 | 
|  | ; CHECK-NEXT:    srli a4, a1, 3 | 
|  | ; CHECK-NEXT:    vslidedown.vx v6, v0, a4 | 
|  | ; CHECK-NEXT:    sub a4, a2, a1 | 
|  | ; CHECK-NEXT:    add a3, a0, a3 | 
|  | ; CHECK-NEXT:    vl8re64.v v8, (a3) | 
|  | ; CHECK-NEXT:    sltu a3, a2, a4 | 
|  | ; CHECK-NEXT:    addi a3, a3, -1 | 
|  | ; CHECK-NEXT:    and a3, a3, a4 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v6 | 
|  | ; CHECK-NEXT:    vsetvli zero, a3, e64, m8, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v26, v16, v16, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v26 | 
|  | ; CHECK-NEXT:    vmerge.vvm v24, v16, v8, v0 | 
|  | ; CHECK-NEXT:    csrr a3, vlenb | 
|  | ; CHECK-NEXT:    slli a3, a3, 3 | 
|  | ; CHECK-NEXT:    add a3, sp, a3 | 
|  | ; CHECK-NEXT:    addi a3, a3, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v24, (a3) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    vmv8r.v v24, v16 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v6 | 
|  | ; CHECK-NEXT:    vmfeq.vv v5, v8, v8, v0.t | 
|  | ; CHECK-NEXT:    vl8re64.v v16, (a0) | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v5 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v8, v24, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v6 | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 3 | 
|  | ; CHECK-NEXT:    add a0, sp, a0 | 
|  | ; CHECK-NEXT:    addi a0, a0, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v24, v0.t | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 3 | 
|  | ; CHECK-NEXT:    add a0, sp, a0 | 
|  | ; CHECK-NEXT:    addi a0, a0, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    bltu a2, a1, .LBB40_2 | 
|  | ; CHECK-NEXT:  # %bb.1: | 
|  | ; CHECK-NEXT:    mv a2, a1 | 
|  | ; CHECK-NEXT:  .LBB40_2: | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 4 | 
|  | ; CHECK-NEXT:    add a0, sp, a0 | 
|  | ; CHECK-NEXT:    addi a0, a0, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v25, v8, v8, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v25 | 
|  | ; CHECK-NEXT:    vmerge.vvm v24, v8, v16, v0 | 
|  | ; CHECK-NEXT:    addi a0, sp, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    vmfeq.vv v25, v16, v16, v0.t | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v25 | 
|  | ; CHECK-NEXT:    vmerge.vvm v16, v16, v8, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v16, v8, v0.t | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 3 | 
|  | ; CHECK-NEXT:    add a0, sp, a0 | 
|  | ; CHECK-NEXT:    addi a0, a0, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    li a1, 24 | 
|  | ; CHECK-NEXT:    mul a0, a0, a1 | 
|  | ; CHECK-NEXT:    add sp, sp, a0 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa sp, 16 | 
|  | ; CHECK-NEXT:    addi sp, sp, 16 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa_offset 0 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 16 x double> @llvm.vp.minimum.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, <vscale x 16 x i1> %m, i32 %evl) | 
|  | ret <vscale x 16 x double> %v | 
|  | } | 
|  |  | 
|  | define <vscale x 16 x double> @vfmin_vv_nxv16f64_unmasked(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, i32 zeroext %evl) { | 
|  | ; CHECK-LABEL: vfmin_vv_nxv16f64_unmasked: | 
|  | ; CHECK:       # %bb.0: | 
|  | ; CHECK-NEXT:    addi sp, sp, -16 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa_offset 16 | 
|  | ; CHECK-NEXT:    csrr a1, vlenb | 
|  | ; CHECK-NEXT:    li a3, 24 | 
|  | ; CHECK-NEXT:    mul a1, a1, a3 | 
|  | ; CHECK-NEXT:    sub sp, sp, a1 | 
|  | ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb | 
|  | ; CHECK-NEXT:    csrr a1, vlenb | 
|  | ; CHECK-NEXT:    slli a1, a1, 4 | 
|  | ; CHECK-NEXT:    add a1, sp, a1 | 
|  | ; CHECK-NEXT:    addi a1, a1, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v8, (a1) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    csrr a1, vlenb | 
|  | ; CHECK-NEXT:    slli a3, a1, 3 | 
|  | ; CHECK-NEXT:    sub a4, a2, a1 | 
|  | ; CHECK-NEXT:    add a3, a0, a3 | 
|  | ; CHECK-NEXT:    vl8re64.v v24, (a3) | 
|  | ; CHECK-NEXT:    sltu a3, a2, a4 | 
|  | ; CHECK-NEXT:    addi a3, a3, -1 | 
|  | ; CHECK-NEXT:    and a3, a3, a4 | 
|  | ; CHECK-NEXT:    vsetvli zero, a3, e64, m8, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v16, v16 | 
|  | ; CHECK-NEXT:    vmfeq.vv v7, v24, v24 | 
|  | ; CHECK-NEXT:    vl8re64.v v8, (a0) | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 3 | 
|  | ; CHECK-NEXT:    add a0, sp, a0 | 
|  | ; CHECK-NEXT:    addi a0, a0, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v16, v24, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    vmerge.vvm v16, v24, v16, v0 | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v16, v8 | 
|  | ; CHECK-NEXT:    addi a0, sp, 16 | 
|  | ; CHECK-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill | 
|  | ; CHECK-NEXT:    bltu a2, a1, .LBB41_2 | 
|  | ; CHECK-NEXT:  # %bb.1: | 
|  | ; CHECK-NEXT:    mv a2, a1 | 
|  | ; CHECK-NEXT:  .LBB41_2: | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 4 | 
|  | ; CHECK-NEXT:    add a0, sp, a0 | 
|  | ; CHECK-NEXT:    addi a0, a0, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma | 
|  | ; CHECK-NEXT:    vmfeq.vv v0, v16, v16 | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    slli a0, a0, 3 | 
|  | ; CHECK-NEXT:    add a0, sp, a0 | 
|  | ; CHECK-NEXT:    addi a0, a0, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    vmfeq.vv v7, v8, v8 | 
|  | ; CHECK-NEXT:    vmerge.vvm v24, v16, v8, v0 | 
|  | ; CHECK-NEXT:    vmv1r.v v0, v7 | 
|  | ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0 | 
|  | ; CHECK-NEXT:    vfmin.vv v8, v8, v24 | 
|  | ; CHECK-NEXT:    addi a0, sp, 16 | 
|  | ; CHECK-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload | 
|  | ; CHECK-NEXT:    csrr a0, vlenb | 
|  | ; CHECK-NEXT:    li a1, 24 | 
|  | ; CHECK-NEXT:    mul a0, a0, a1 | 
|  | ; CHECK-NEXT:    add sp, sp, a0 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa sp, 16 | 
|  | ; CHECK-NEXT:    addi sp, sp, 16 | 
|  | ; CHECK-NEXT:    .cfi_def_cfa_offset 0 | 
|  | ; CHECK-NEXT:    ret | 
|  | %v = call <vscale x 16 x double> @llvm.vp.minimum.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl) | 
|  | ret <vscale x 16 x double> %v | 
|  | } |