| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ |
| ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s |
| ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ |
| ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s |
| |
| define <vscale x 1 x i1> @intrinsic_vmfgt_vv_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1bf16_nxv1bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma |
| ; CHECK-NEXT: vmflt.vv v0, v9, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16( |
| <vscale x 1 x bfloat> %0, |
| <vscale x 1 x bfloat> %1, |
| iXLen %2) |
| |
| ret <vscale x 1 x i1> %a |
| } |
| |
| define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1bf16_nxv1bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x bfloat> %3, iXLen %4) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1bf16_nxv1bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu |
| ; CHECK-NEXT: vmv1r.v v11, v0 |
| ; CHECK-NEXT: vmflt.vv v0, v9, v8 |
| ; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v11 |
| ; CHECK-NEXT: ret |
| entry: |
| %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16( |
| <vscale x 1 x bfloat> %1, |
| <vscale x 1 x bfloat> %2, |
| iXLen %4) |
| %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16( |
| <vscale x 1 x i1> %0, |
| <vscale x 1 x bfloat> %2, |
| <vscale x 1 x bfloat> %3, |
| <vscale x 1 x i1> %mask, |
| iXLen %4) |
| |
| ret <vscale x 1 x i1> %a |
| } |
| |
| define <vscale x 2 x i1> @intrinsic_vmfgt_vv_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2bf16_nxv2bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma |
| ; CHECK-NEXT: vmflt.vv v0, v9, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16( |
| <vscale x 2 x bfloat> %0, |
| <vscale x 2 x bfloat> %1, |
| iXLen %2) |
| |
| ret <vscale x 2 x i1> %a |
| } |
| |
| define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2bf16_nxv2bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x bfloat> %3, iXLen %4) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2bf16_nxv2bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu |
| ; CHECK-NEXT: vmv1r.v v11, v0 |
| ; CHECK-NEXT: vmflt.vv v0, v9, v8 |
| ; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v11 |
| ; CHECK-NEXT: ret |
| entry: |
| %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16( |
| <vscale x 2 x bfloat> %1, |
| <vscale x 2 x bfloat> %2, |
| iXLen %4) |
| %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16( |
| <vscale x 2 x i1> %0, |
| <vscale x 2 x bfloat> %2, |
| <vscale x 2 x bfloat> %3, |
| <vscale x 2 x i1> %mask, |
| iXLen %4) |
| |
| ret <vscale x 2 x i1> %a |
| } |
| |
| define <vscale x 4 x i1> @intrinsic_vmfgt_vv_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4bf16_nxv4bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma |
| ; CHECK-NEXT: vmflt.vv v0, v9, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16( |
| <vscale x 4 x bfloat> %0, |
| <vscale x 4 x bfloat> %1, |
| iXLen %2) |
| |
| ret <vscale x 4 x i1> %a |
| } |
| |
| define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4bf16_nxv4bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x bfloat> %3, iXLen %4) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4bf16_nxv4bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu |
| ; CHECK-NEXT: vmv1r.v v11, v0 |
| ; CHECK-NEXT: vmflt.vv v0, v9, v8 |
| ; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t |
| ; CHECK-NEXT: vmv.v.v v0, v11 |
| ; CHECK-NEXT: ret |
| entry: |
| %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16( |
| <vscale x 4 x bfloat> %1, |
| <vscale x 4 x bfloat> %2, |
| iXLen %4) |
| %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16( |
| <vscale x 4 x i1> %0, |
| <vscale x 4 x bfloat> %2, |
| <vscale x 4 x bfloat> %3, |
| <vscale x 4 x i1> %mask, |
| iXLen %4) |
| |
| ret <vscale x 4 x i1> %a |
| } |
| |
| define <vscale x 8 x i1> @intrinsic_vmfgt_vv_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv8bf16_nxv8bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma |
| ; CHECK-NEXT: vmflt.vv v0, v10, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16( |
| <vscale x 8 x bfloat> %0, |
| <vscale x 8 x bfloat> %1, |
| iXLen %2) |
| |
| ret <vscale x 8 x i1> %a |
| } |
| |
| define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8bf16_nxv8bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x bfloat> %3, iXLen %4) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8bf16_nxv8bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu |
| ; CHECK-NEXT: vmv1r.v v14, v0 |
| ; CHECK-NEXT: vmflt.vv v0, v10, v8 |
| ; CHECK-NEXT: vmflt.vv v14, v12, v10, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v14 |
| ; CHECK-NEXT: ret |
| entry: |
| %mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16( |
| <vscale x 8 x bfloat> %1, |
| <vscale x 8 x bfloat> %2, |
| iXLen %4) |
| %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16( |
| <vscale x 8 x i1> %0, |
| <vscale x 8 x bfloat> %2, |
| <vscale x 8 x bfloat> %3, |
| <vscale x 8 x i1> %mask, |
| iXLen %4) |
| |
| ret <vscale x 8 x i1> %a |
| } |
| |
| define <vscale x 16 x i1> @intrinsic_vmfgt_vv_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv16bf16_nxv16bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma |
| ; CHECK-NEXT: vmflt.vv v0, v12, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16( |
| <vscale x 16 x bfloat> %0, |
| <vscale x 16 x bfloat> %1, |
| iXLen %2) |
| |
| ret <vscale x 16 x i1> %a |
| } |
| |
| define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vv_nxv16bf16_nxv16bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x bfloat> %3, iXLen %4) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16bf16_nxv16bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu |
| ; CHECK-NEXT: vmv1r.v v20, v0 |
| ; CHECK-NEXT: vmflt.vv v0, v12, v8 |
| ; CHECK-NEXT: vmflt.vv v20, v16, v12, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v20 |
| ; CHECK-NEXT: ret |
| entry: |
| %mask = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16( |
| <vscale x 16 x bfloat> %1, |
| <vscale x 16 x bfloat> %2, |
| iXLen %4) |
| %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16( |
| <vscale x 16 x i1> %0, |
| <vscale x 16 x bfloat> %2, |
| <vscale x 16 x bfloat> %3, |
| <vscale x 16 x i1> %mask, |
| iXLen %4) |
| |
| ret <vscale x 16 x i1> %a |
| } |
| |
| define <vscale x 1 x i1> @intrinsic_vmfgt_vf_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1bf16_bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma |
| ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.bf16( |
| <vscale x 1 x bfloat> %0, |
| bfloat %1, |
| iXLen %2) |
| |
| ret <vscale x 1 x i1> %a |
| } |
| |
| define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1bf16_bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1bf16_bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu |
| ; CHECK-NEXT: vmv1r.v v10, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v10 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16( |
| <vscale x 1 x i1> %0, |
| <vscale x 1 x bfloat> %1, |
| bfloat %2, |
| <vscale x 1 x i1> %3, |
| iXLen %4) |
| |
| ret <vscale x 1 x i1> %a |
| } |
| |
| define <vscale x 2 x i1> @intrinsic_vmfgt_vf_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2bf16_bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma |
| ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.bf16( |
| <vscale x 2 x bfloat> %0, |
| bfloat %1, |
| iXLen %2) |
| |
| ret <vscale x 2 x i1> %a |
| } |
| |
| define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2bf16_bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2bf16_bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu |
| ; CHECK-NEXT: vmv1r.v v10, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v10 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16( |
| <vscale x 2 x i1> %0, |
| <vscale x 2 x bfloat> %1, |
| bfloat %2, |
| <vscale x 2 x i1> %3, |
| iXLen %4) |
| |
| ret <vscale x 2 x i1> %a |
| } |
| |
| define <vscale x 4 x i1> @intrinsic_vmfgt_vf_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4bf16_bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma |
| ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.bf16( |
| <vscale x 4 x bfloat> %0, |
| bfloat %1, |
| iXLen %2) |
| |
| ret <vscale x 4 x i1> %a |
| } |
| |
| define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4bf16_bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4bf16_bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu |
| ; CHECK-NEXT: vmv1r.v v10, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t |
| ; CHECK-NEXT: vmv.v.v v0, v10 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16( |
| <vscale x 4 x i1> %0, |
| <vscale x 4 x bfloat> %1, |
| bfloat %2, |
| <vscale x 4 x i1> %3, |
| iXLen %4) |
| |
| ret <vscale x 4 x i1> %a |
| } |
| |
| define <vscale x 8 x i1> @intrinsic_vmfgt_vf_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv8bf16_bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma |
| ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.bf16( |
| <vscale x 8 x bfloat> %0, |
| bfloat %1, |
| iXLen %2) |
| |
| ret <vscale x 8 x i1> %a |
| } |
| |
| define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8bf16_bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8bf16_bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu |
| ; CHECK-NEXT: vmv1r.v v11, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v10 |
| ; CHECK-NEXT: vmfgt.vf v11, v8, fa0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v11 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16( |
| <vscale x 8 x i1> %0, |
| <vscale x 8 x bfloat> %1, |
| bfloat %2, |
| <vscale x 8 x i1> %3, |
| iXLen %4) |
| |
| ret <vscale x 8 x i1> %a |
| } |
| |
| define <vscale x 16 x i1> @intrinsic_vmfgt_vf_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv16bf16_bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma |
| ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.bf16( |
| <vscale x 16 x bfloat> %0, |
| bfloat %1, |
| iXLen %2) |
| |
| ret <vscale x 16 x i1> %a |
| } |
| |
| define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vf_nxv16bf16_bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { |
| ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16bf16_bf16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu |
| ; CHECK-NEXT: vmv1r.v v13, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v12 |
| ; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v13 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16( |
| <vscale x 16 x i1> %0, |
| <vscale x 16 x bfloat> %1, |
| bfloat %2, |
| <vscale x 16 x i1> %3, |
| iXLen %4) |
| |
| ret <vscale x 16 x i1> %a |
| } |
| |