| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvbb,+zvbc,+zvfbfwma -verify-machineinstrs | FileCheck %s |
| ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb,+zvbc,+zvfbfwma -verify-machineinstrs | FileCheck %s |
| |
| ; The purpose of this file is to check the behavior of specific instructions as it relates to the VL optimizer |
| |
| define <vscale x 4 x i32> @vadd_vi(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vadd_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vi v10, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 5, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vadd_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vadd_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vadd_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vadd_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsub_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vsub_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vsub.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsub_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vsub_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vsub.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vrsub_vi(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vrsub_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vrsub.vi v10, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 5, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vrsub_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vrsub_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vrsub.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vand_vi(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vand_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vand.vi v10, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 5, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vand_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vand_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vand.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vand_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vand_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vand.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vor_vi(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vor_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vor.vi v10, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 5, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vor_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vor_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vor.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vor_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vor_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vor.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vxor_vi(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vxor_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vxor.vi v10, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 5, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vxor_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vxor_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vxor.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vxor_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vxor_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vxor.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsll_vi(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vsll_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vsll.vi v10, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen 5, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsll_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vsll_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vsll.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsll_vx(<vscale x 4 x i32> %a, iXLen %b, iXLen %vl) { |
| ; CHECK-LABEL: vsll_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vsll.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwaddu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vwaddu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vwaddu.vv v12, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsrl_vi(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vsrl_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vsrl.vi v10, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen 5, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsrl_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vsrl_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vsrl.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsrl_vx(<vscale x 4 x i32> %a, iXLen %b, iXLen %vl) { |
| ; CHECK-LABEL: vsrl_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vsrl.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsra_vi(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vsra_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vsra.vi v10, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen 5, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsra_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vsra_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vsra.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsra_vx(<vscale x 4 x i32> %a, iXLen %b, iXLen %vl) { |
| ; CHECK-LABEL: vsra_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vsra.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwaddu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vwaddu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vwaddu.vx v12, v8, a0 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwsubu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vwsubu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vwsubu.vv v12, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwsubu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vwsubu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vwsubu.vx v12, v8, a0 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwadd_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vwadd_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vwadd.vv v12, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwadd_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vwadd_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vwadd.vx v12, v8, a0 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwsub_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vwsub_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vwsub.vv v12, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwsub_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vwsub_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vwsub.vx v12, v8, a0 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwaddu_wv(<vscale x 4 x i64> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vwaddu_wv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vwaddu.wv v8, v8, v12 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.nxv4i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwaddu_wx(<vscale x 4 x i64> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vwaddu_wx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vwaddu.wx v8, v8, a0 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.xv4i64.nxv4i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwsubu_wv(<vscale x 4 x i64> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vwsubu_wv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vwsubu.wv v8, v8, v12 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.nxv4i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwsubu_wx(<vscale x 4 x i64> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vwsubu_wx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vwsubu.wx v8, v8, a0 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwadd_wv(<vscale x 4 x i64> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vwadd_wv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vwadd.wv v8, v8, v12 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.nxv4i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwadd_wx(<vscale x 4 x i64> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vwadd_wx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vwadd.wx v8, v8, a0 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwsub_wv(<vscale x 4 x i64> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vwsub_wv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vwsub.wv v8, v8, v12 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.nxv4i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwsub_wx(<vscale x 4 x i64> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vwsub_wx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vwsub.wx v8, v8, a0 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsext_vf2(<vscale x 4 x i16> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vsext_vf2: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vsext.vf2 v12, v8 |
| ; CHECK-NEXT: vadd.vv v8, v12, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16(<vscale x 4 x i32> poison, <vscale x 4 x i16> %a, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsext_vf4(<vscale x 4 x i8> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vsext_vf4: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vsext.vf4 v12, v8 |
| ; CHECK-NEXT: vadd.vv v8, v12, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8(<vscale x 4 x i32> poison, <vscale x 4 x i8> %a, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i64> @vsext_vf8(<vscale x 4 x i8> %a, <vscale x 4 x i64> %b, iXLen %vl) { |
| ; CHECK-LABEL: vsext_vf8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vsext.vf8 v16, v8 |
| ; CHECK-NEXT: vadd.vv v8, v16, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i32.nxv4i8(<vscale x 4 x i64> poison, <vscale x 4 x i8> %a, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %b, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i32> @vzext_vf2(<vscale x 4 x i16> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vzext_vf2: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vzext.vf2 v12, v8 |
| ; CHECK-NEXT: vadd.vv v8, v12, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(<vscale x 4 x i32> poison, <vscale x 4 x i16> %a, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vzext_vf4(<vscale x 4 x i8> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vzext_vf4: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vzext.vf4 v12, v8 |
| ; CHECK-NEXT: vadd.vv v8, v12, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(<vscale x 4 x i32> poison, <vscale x 4 x i8> %a, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i64> @vzext_vf8(<vscale x 4 x i8> %a, <vscale x 4 x i64> %b, iXLen %vl) { |
| ; CHECK-LABEL: vzext_vf8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma |
| ; CHECK-NEXT: vzext.vf8 v16, v8 |
| ; CHECK-NEXT: vadd.vv v8, v16, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i32.nxv4i8(<vscale x 4 x i64> poison, <vscale x 4 x i8> %a, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %b, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmadc_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) { |
| ; CHECK-LABEL: vmadc_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmadc.vi v10, v8, 5 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmadc_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) { |
| ; CHECK-LABEL: vmadc_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmadc.vx v10, v8, a0 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmadc_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmadc_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmadc.vv v12, v8, v10 |
| ; CHECK-NEXT: vmand.mm v0, v12, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmadc_vim(<vscale x 4 x i32> %a, <vscale x 4 x i1> %mask, <vscale x 4 x i1> %b, iXLen %vl) { |
| ; CHECK-LABEL: vmadc_vim: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmadc.vim v11, v8, 5, v0 |
| ; CHECK-NEXT: vmand.mm v0, v11, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, <vscale x 4 x i1> %mask, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmadc_vxm(<vscale x 4 x i32> %a, <vscale x 4 x i1> %mask, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) { |
| ; CHECK-LABEL: vmadc_vxm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmadc.vxm v11, v8, a0, v0 |
| ; CHECK-NEXT: vmand.mm v0, v11, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, <vscale x 4 x i1> %mask, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmadc_vvm(<vscale x 4 x i32> %a, <vscale x 4 x i1> %mask, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmadc_vvm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmadc.vvm v11, v8, v12, v0 |
| ; CHECK-NEXT: vmand.mm v0, v11, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, <vscale x 4 x i1> %mask, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsbc_vvm(<vscale x 4 x i32> %a, <vscale x 4 x i1> %mask, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmsbc_vvm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsbc.vvm v11, v8, v12, v0 |
| ; CHECK-NEXT: vmand.mm v0, v11, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, <vscale x 4 x i1> %mask, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsbc_vxm(<vscale x 4 x i32> %a, <vscale x 4 x i1> %mask, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) { |
| ; CHECK-LABEL: vmsbc_vxm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsbc.vxm v11, v8, a0, v0 |
| ; CHECK-NEXT: vmand.mm v0, v11, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, <vscale x 4 x i1> %mask, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsbc_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) { |
| ; CHECK-LABEL: vmsbc_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsbc.vx v10, v8, a0 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsbc_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmsbc_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsbc.vv v12, v8, v10 |
| ; CHECK-NEXT: vmand.mm v0, v12, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i16> @vnsrl_wi(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, iXLen %vl) { |
| ; CHECK-LABEL: vnsrl_wi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; CHECK-NEXT: vnsrl.wi v11, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v11, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(<vscale x 4 x i16> poison, <vscale x 4 x i32> %a, iXLen 5, iXLen -1) |
| %2 = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %1, <vscale x 4 x i16> %b, iXLen %vl) |
| ret <vscale x 4 x i16> %2 |
| } |
| |
| define <vscale x 4 x i16> @vnsrl_wx(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, iXLen %c, iXLen %vl) { |
| ; CHECK-LABEL: vnsrl_wx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma |
| ; CHECK-NEXT: vnsrl.wx v11, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v11, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(<vscale x 4 x i16> poison, <vscale x 4 x i32> %a, iXLen %c, iXLen -1) |
| %2 = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %1, <vscale x 4 x i16> %b, iXLen %vl) |
| ret <vscale x 4 x i16> %2 |
| } |
| |
| define <vscale x 4 x i16> @vnsrl_wv(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen %vl) { |
| ; CHECK-LABEL: vnsrl_wv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; CHECK-NEXT: vnsrl.wv v12, v8, v11 |
| ; CHECK-NEXT: vadd.vv v8, v12, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16(<vscale x 4 x i16> poison, <vscale x 4 x i32> %a, <vscale x 4 x i16> %c, iXLen -1) |
| %2 = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %1, <vscale x 4 x i16> %b, iXLen %vl) |
| ret <vscale x 4 x i16> %2 |
| } |
| |
| define <vscale x 4 x i16> @vnsra_wi(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, iXLen %vl) { |
| ; CHECK-LABEL: vnsra_wi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; CHECK-NEXT: vnsra.wi v11, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v11, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(<vscale x 4 x i16> poison, <vscale x 4 x i32> %a, iXLen 5, iXLen -1) |
| %2 = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %1, <vscale x 4 x i16> %b, iXLen %vl) |
| ret <vscale x 4 x i16> %2 |
| } |
| |
| define <vscale x 4 x i16> @vnsra_wx(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, iXLen %c, iXLen %vl) { |
| ; CHECK-LABEL: vnsra_wx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma |
| ; CHECK-NEXT: vnsra.wx v11, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v11, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(<vscale x 4 x i16> poison, <vscale x 4 x i32> %a, iXLen %c, iXLen -1) |
| %2 = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %1, <vscale x 4 x i16> %b, iXLen %vl) |
| ret <vscale x 4 x i16> %2 |
| } |
| |
| define <vscale x 4 x i16> @vnsra_wv(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen %vl) { |
| ; CHECK-LABEL: vnsra_wv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; CHECK-NEXT: vnsra.wv v12, v8, v11 |
| ; CHECK-NEXT: vadd.vv v8, v12, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16(<vscale x 4 x i16> poison, <vscale x 4 x i32> %a, <vscale x 4 x i16> %c, iXLen -1) |
| %2 = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %1, <vscale x 4 x i16> %b, iXLen %vl) |
| ret <vscale x 4 x i16> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmseq_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) { |
| ; CHECK-LABEL: vmseq_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmseq.vi v10, v8, 5 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmseq_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) { |
| ; CHECK-LABEL: vmseq_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmseq.vx v10, v8, a0 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmseq_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmseq_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmseq.vv v12, v8, v10 |
| ; CHECK-NEXT: vmand.mm v0, v12, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsne_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) { |
| ; CHECK-LABEL: vmsne_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsne.vi v10, v8, 5 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsne_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) { |
| ; CHECK-LABEL: vmsne_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsne.vx v10, v8, a0 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsne_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmsne_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsne.vv v12, v8, v10 |
| ; CHECK-NEXT: vmand.mm v0, v12, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsltu_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) { |
| ; CHECK-LABEL: vmsltu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsltu.vx v10, v8, a0 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsltu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmsltu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsltu.vv v12, v8, v10 |
| ; CHECK-NEXT: vmand.mm v0, v12, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmslt_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) { |
| ; CHECK-LABEL: vmslt_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmslt.vx v10, v8, a0 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmslt_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmslt_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmslt.vv v12, v8, v10 |
| ; CHECK-NEXT: vmand.mm v0, v12, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsleu_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) { |
| ; CHECK-LABEL: vmsleu_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsleu.vi v10, v8, 5 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsleu_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) { |
| ; CHECK-LABEL: vmsleu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsleu.vx v10, v8, a0 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsleu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmsleu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsleu.vv v12, v8, v10 |
| ; CHECK-NEXT: vmand.mm v0, v12, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsle_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) { |
| ; CHECK-LABEL: vmsle_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsle.vi v10, v8, 5 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsle_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) { |
| ; CHECK-LABEL: vmsle_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsle.vx v10, v8, a0 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsle_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmsle_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsle.vv v12, v8, v10 |
| ; CHECK-NEXT: vmand.mm v0, v12, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsgtu_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) { |
| ; CHECK-LABEL: vmsgtu_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsgtu.vi v10, v8, 5 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsgtu_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) { |
| ; CHECK-LABEL: vmsgtu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsgtu.vx v10, v8, a0 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsgt_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) { |
| ; CHECK-LABEL: vmsgt_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsgt.vi v10, v8, 5 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmsgt_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) { |
| ; CHECK-LABEL: vmsgt_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmsgt.vx v10, v8, a0 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i32> @vminu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vminu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vminu.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vminu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vminu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vminu.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmin_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vmin_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmin.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmin_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vmin_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmin.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmaxu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vmaxu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmaxu.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmaxu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vmaxu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmaxu.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmax_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vmax_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmax.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmax_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vmax_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmax.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmul_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vmul_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmul.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmul_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vmul_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmul.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmulh_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vmulh_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmulh.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmulh_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vmulh_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmulh.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmulhu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vmulhu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmulhu.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmulhu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vmulhu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmulhu.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmulhsu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vmulhsu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmulhsu.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmulhsu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vmulhsu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmulhsu.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vdivu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vdivu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vdivu.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vdivu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vdivu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vdivu.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vdiv_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vdiv_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vdiv.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vdiv_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vdiv_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vdiv.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vremu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vremu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vremu.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vremu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vremu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vremu.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vrem_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vrem_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vrem.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vrem_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vrem_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vrem.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwmul_vv(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl) { |
| ; CHECK-LABEL: vwmul_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; CHECK-NEXT: vwmul.vv v12, v8, v9 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vwmul.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwmul_vx(<vscale x 4 x i16> %a, i16 %b, i32 %c, iXLen %vl) { |
| ; CHECK-LABEL: vwmul_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma |
| ; CHECK-NEXT: vwmul.vx v12, v8, a0 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vwmul.vx v8, v12, a1 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16(<vscale x 4 x i32> poison, <vscale x 4 x i16> %a, i16 %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %1, i32 %c, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwmulsu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vwmulsu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vwmulsu.vv v12, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwmulsu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vwmulsu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vwmulsu.vx v12, v8, a0 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwmulu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vwmulu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vwmulu.vv v12, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i64> @vwmulu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vwmulu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vwmulu.vx v12, v8, a0 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl) |
| ret <vscale x 4 x i64> %2 |
| } |
| |
| define <vscale x 4 x i32> @vwmacc_vv(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i32> %d, iXLen %vl) { |
| ; CHECK-LABEL: vwmacc_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma |
| ; CHECK-NEXT: vwmacc.vv v8, v10, v11 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %d, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmacc_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vmacc_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma |
| ; CHECK-NEXT: vmacc.vv v8, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmacc_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vmacc_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vmacc.vx v10, a0, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %b, <vscale x 4 x i32> %a, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmadd_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vmadd_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma |
| ; CHECK-NEXT: vmadd.vv v8, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmadd_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vmadd_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vmadd.vx v10, a0, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, i32 %b, <vscale x 4 x i32> %a, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vnmsac_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vnmsac_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma |
| ; CHECK-NEXT: vnmsac.vv v8, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vnmsac_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vnmsac_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vnmsac.vx v10, a0, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, i32 %b, <vscale x 4 x i32> %a, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vnmsub_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vnmsub_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma |
| ; CHECK-NEXT: vnmsub.vv v8, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vnmsub_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vnmsub_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vnmsub.vx v10, a0, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, i32 %b, <vscale x 4 x i32> %a, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vwmacc_vx(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, iXLen %vl) { |
| ; CHECK-LABEL: vwmacc_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma |
| ; CHECK-NEXT: vwmacc.vx v8, a0, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vwmaccu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i32> %d, iXLen %vl) { |
| ; CHECK-LABEL: vwmaccu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma |
| ; CHECK-NEXT: vwmaccu.vv v8, v10, v11 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %d, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vwmaccu_vx(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, <vscale x 4 x i32> %d, i32 %e, iXLen %vl) { |
| ; CHECK-LABEL: vwmaccu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a2, e16, m1, tu, ma |
| ; CHECK-NEXT: vwmaccu.vx v8, a0, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %d, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vwmaccsu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen %vl) { |
| ; CHECK-LABEL: vwmaccsu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma |
| ; CHECK-NEXT: vwmaccsu.vv v8, v10, v11 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vwmaccsu_vx(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, iXLen %vl) { |
| ; CHECK-LABEL: vwmaccsu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma |
| ; CHECK-NEXT: vwmaccsu.vx v8, a0, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vwmaccus_vx(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, iXLen %vl) { |
| ; CHECK-LABEL: vwmaccus_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma |
| ; CHECK-NEXT: vwmaccus.vx v8, a0, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsaddu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vsaddu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vsaddu.vv v10, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsaddu(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsaddu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vsaddu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vsaddu.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsaddu(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsaddu_vi(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vsaddu_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vsaddu.vi v10, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsaddu(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 5, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsadd_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vsadd_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vsadd.vv v10, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsadd_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vsadd_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vsadd.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsadd_vi(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vsadd_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vsadd.vi v10, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 5, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vssubu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vssubu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vssubu.vv v10, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vssubu(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vssubu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vssubu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vssubu.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vssubu(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vssub_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vssub_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vssub.vv v10, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vssub(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vssub_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vssub_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vssub.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vssub(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsmul_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vsmul_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vsmul.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsmul_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vsmul_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vsmul.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vssrl_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vssrl_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vssrl.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vssrl_vx(<vscale x 4 x i32> %a, iXLen %b, iXLen %vl) { |
| ; CHECK-LABEL: vssrl_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vssrl.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vssrl_vi(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vssrl_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vssrl.vi v10, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen 5, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vssra_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vssra_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vssra.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vssra_vx(<vscale x 4 x i32> %a, iXLen %b, iXLen %vl) { |
| ; CHECK-LABEL: vssra_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vssra.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vssra_vi(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vssra_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vssra.vi v10, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen 5, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vnclipu_vv(<vscale x 4 x i64> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vnclipu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vnclipu.wv v14, v8, v12 |
| ; CHECK-NEXT: vadd.vv v8, v14, v14 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vnclipu(<vscale x 4 x i32> poison, <vscale x 4 x i64> %a, <vscale x 4 x i32> %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vnclipu_vx(<vscale x 4 x i64> %a, iXLen %b, iXLen %vl) { |
| ; CHECK-LABEL: vnclipu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vnclipu.wx v12, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vnclipu(<vscale x 4 x i32> poison, <vscale x 4 x i64> %a, iXLen %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vnclipu_vi(<vscale x 4 x i64> %a, iXLen %vl) { |
| ; CHECK-LABEL: vnclipu_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vnclipu.wi v12, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vnclipu(<vscale x 4 x i32> poison, <vscale x 4 x i64> %a, iXLen 5, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vnclip_vv(<vscale x 4 x i64> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vnclip_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vnclip.wv v14, v8, v12 |
| ; CHECK-NEXT: vadd.vv v8, v14, v14 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vnclip(<vscale x 4 x i32> poison, <vscale x 4 x i64> %a, <vscale x 4 x i32> %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vnclip_vx(<vscale x 4 x i64> %a, iXLen %b, iXLen %vl) { |
| ; CHECK-LABEL: vnclip_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vnclip.wx v12, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vnclip(<vscale x 4 x i32> poison, <vscale x 4 x i64> %a, iXLen %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vnclip_vi(<vscale x 4 x i64> %a, iXLen %vl) { |
| ; CHECK-LABEL: vnclip_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vnclip.wi v12, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vnclip(<vscale x 4 x i32> poison, <vscale x 4 x i64> %a, iXLen 5, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmv_v_i(<vscale x 4 x i32> %a, i32 %x, iXLen %vl) { |
| ; CHECK-LABEL: vmv_v_i: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmv.v.i v10, 5 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(<vscale x 4 x i32> poison, i32 5, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmv_v_x(<vscale x 4 x i32> %a, i32 %x, iXLen %vl) { |
| ; CHECK-LABEL: vmv_v_x: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmv.v.x v10, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(<vscale x 4 x i32> poison, i32 %x, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| ; The vmv.v.v is optimized away if we use a vadd as the user. |
| define <vscale x 1 x i8> @vmv_v_v(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, iXLen %vl) { |
| ; CHECK-LABEL: vmv_v_v: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma |
| ; CHECK-NEXT: vmv.v.v v8, v9 |
| ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 |
| ; CHECK-NEXT: ret |
| %2 = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1) |
| %3 = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(<vscale x 1 x i8> poison, <vscale x 1 x i8> %2, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, iXLen %vl) |
| ret <vscale x 1 x i8> %3 |
| } |
| |
| define <vscale x 4 x i32> @vwsll_vi(<vscale x 4 x i16> %a, iXLen %vl) { |
| ; CHECK-LABEL: vwsll_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; CHECK-NEXT: vwsll.vi v10, v8, 1 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vwsll.nxv4i32.nxv4i16(<vscale x 4 x i32> poison, <vscale x 4 x i16> %a, iXLen 1, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vwsll_vx(<vscale x 4 x i16> %a, iXLen %b, iXLen %vl) { |
| ; CHECK-LABEL: vwsll_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma |
| ; CHECK-NEXT: vwsll.vx v10, v8, a0 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vwsll.nxv4i32.nxv4i16(<vscale x 4 x i32> poison, <vscale x 4 x i16> %a, iXLen %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vwsll_vv(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl) { |
| ; CHECK-LABEL: vwsll_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| ; CHECK-NEXT: vwsll.vv v10, v8, v9 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v10, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vwsll.nxv4i32.nxv4i16(<vscale x 4 x i32> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 1 x i32> @vmand_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmand_mm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmand.mm v8, v0, v8 |
| ; CHECK-NEXT: vmand.mm v0, v0, v8 |
| ; CHECK-NEXT: vmv1r.v v8, v9 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v9, v9, v0.t |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1) |
| %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl) |
| %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0) |
| ret <vscale x 1 x i32> %3 |
| } |
| |
| define <vscale x 1 x i32> @vmnand_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmnand_mm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmnand.mm v8, v0, v8 |
| ; CHECK-NEXT: vmand.mm v0, v0, v8 |
| ; CHECK-NEXT: vmv1r.v v8, v9 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v9, v9, v0.t |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1) |
| %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl) |
| %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0) |
| ret <vscale x 1 x i32> %3 |
| } |
| |
| define <vscale x 1 x i32> @vmandn_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmandn_mm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmandn.mm v8, v0, v8 |
| ; CHECK-NEXT: vmand.mm v0, v0, v8 |
| ; CHECK-NEXT: vmv1r.v v8, v9 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v9, v9, v0.t |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1) |
| %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl) |
| %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0) |
| ret <vscale x 1 x i32> %3 |
| } |
| |
| define <vscale x 1 x i32> @vmxor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmxor_mm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmxor.mm v8, v0, v8 |
| ; CHECK-NEXT: vmand.mm v0, v0, v8 |
| ; CHECK-NEXT: vmv1r.v v8, v9 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v9, v9, v0.t |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1) |
| %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl) |
| %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0) |
| ret <vscale x 1 x i32> %3 |
| } |
| |
| define <vscale x 1 x i32> @vmor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmor_mm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmor.mm v8, v0, v8 |
| ; CHECK-NEXT: vmand.mm v0, v0, v8 |
| ; CHECK-NEXT: vmv1r.v v8, v9 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v9, v9, v0.t |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1) |
| %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl) |
| %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0) |
| ret <vscale x 1 x i32> %3 |
| } |
| |
| |
| define <vscale x 1 x i32> @vmnor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmnor_mm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmnor.mm v8, v0, v8 |
| ; CHECK-NEXT: vmand.mm v0, v0, v8 |
| ; CHECK-NEXT: vmv1r.v v8, v9 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v9, v9, v0.t |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1) |
| %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl) |
| %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0) |
| ret <vscale x 1 x i32> %3 |
| } |
| |
| define <vscale x 1 x i32> @vmorn_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmorn_mm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmorn.mm v8, v0, v8 |
| ; CHECK-NEXT: vmand.mm v0, v0, v8 |
| ; CHECK-NEXT: vmv1r.v v8, v9 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v9, v9, v0.t |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1) |
| %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl) |
| %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0) |
| ret <vscale x 1 x i32> %3 |
| } |
| |
| define <vscale x 1 x i32> @vmxnor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmxnor_mm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmxnor.mm v8, v0, v8 |
| ; CHECK-NEXT: vmand.mm v0, v0, v8 |
| ; CHECK-NEXT: vmv1r.v v8, v9 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v9, v9, v0.t |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1) |
| %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl) |
| %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0) |
| ret <vscale x 1 x i32> %3 |
| } |
| |
| define <vscale x 1 x i32> @vmsbf_m(<vscale x 1 x i1> %a, <vscale x 1 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmsbf_m: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmsbf.m v9, v0 |
| ; CHECK-NEXT: vmand.mm v0, v0, v9 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v8, v8, v0.t |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(<vscale x 1 x i1> %a, iXLen -1) |
| %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl) |
| %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0) |
| ret <vscale x 1 x i32> %3 |
| } |
| |
| define <vscale x 1 x i32> @vmsif_m(<vscale x 1 x i1> %a, <vscale x 1 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmsif_m: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmsif.m v9, v0 |
| ; CHECK-NEXT: vmand.mm v0, v0, v9 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v8, v8, v0.t |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(<vscale x 1 x i1> %a, iXLen -1) |
| %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl) |
| %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0) |
| ret <vscale x 1 x i32> %3 |
| } |
| |
| define <vscale x 1 x i32> @vmsof_m(<vscale x 1 x i1> %a, <vscale x 1 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmsof_m: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmsof.m v9, v0 |
| ; CHECK-NEXT: vmand.mm v0, v0, v9 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v8, v8, v0.t |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(<vscale x 1 x i1> %a, iXLen -1) |
| %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl) |
| %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0) |
| ret <vscale x 1 x i32> %3 |
| } |
| |
| define <vscale x 4 x i32> @viota_m(<vscale x 4 x i1> %a, <vscale x 4 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: viota_m: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: viota.m v10, v0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i1> %a, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %c, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vid.v(<vscale x 4 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vid.v: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vid.v v10 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(<vscale x 4 x i32> poison, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %c, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vslideup_vx(<vscale x 4 x i32> %a, iXLen %b, iXLen %vl) { |
| ; CHECK-LABEL: vslideup_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vslideup.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vslideup(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen %b, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vslideup_vi(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vslideup_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vslideup.vi v10, v8, 2 |
| ; CHECK-NEXT: vadd.vv v8, v10, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vslideup(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen 2, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vslidedown_vx(<vscale x 4 x i32> %a, iXLen %b, iXLen %vl) { |
| ; CHECK-LABEL: vslidedown_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v8, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vslidedown(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen %b, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vslidedown_vi(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vslidedown_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v8, v8, 2 |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vslidedown(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen 2, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vslide1up_vx(<vscale x 4 x i32> %a, iXLen %b, iXLen %vl) { |
| ; CHECK-LABEL: vslide1up_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vslide1up.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vslide1up(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x float> @vfslide1up_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfslide1up_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 |
| ; CHECK-NEXT: vfadd.vv v8, v10, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfslide1up(<vscale x 4 x float> poison, <vscale x 4 x float> %a, float %b, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %1, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| ; Negative test – not safe to reduce vl |
| |
| define <vscale x 4 x i32> @vslide1down_vx(<vscale x 4 x i32> %a, iXLen %b, iXLen %vl) { |
| ; CHECK-LABEL: vslide1down_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vslide1down.vx v8, v8, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vslide1down(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| ; Negative test – not safe to reduce vl |
| |
| define <vscale x 4 x float> @vfslide1down_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfslide1down_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfslide1down(<vscale x 4 x float> poison, <vscale x 4 x float> %a, float %b, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %1, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfadd_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen %vl) { |
| ; CHECK-LABEL: vfadd_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %b, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfadd_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfadd_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfadd.vf v10, v8, fa0 |
| ; CHECK-NEXT: vfadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, float %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %a, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfsub_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen %vl) { |
| ; CHECK-LABEL: vfsub_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfsub.vv v8, v8, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %b, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfsub_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfsub_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfsub.vf v10, v8, fa0 |
| ; CHECK-NEXT: vfadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, float %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %a, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfrsub_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfrsub_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfrsub.vf v10, v8, fa0 |
| ; CHECK-NEXT: vfadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, float %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %a, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwadd_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen %vl) { |
| ; CHECK-LABEL: vfwadd_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfwadd.vv v12, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32(<vscale x 4 x double> poison, <vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %1, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwadd_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfwadd_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfwadd.vf v12, v8, fa0 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32(<vscale x 4 x double> poison, <vscale x 4 x float> %a, float %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %1, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwsub_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen %vl) { |
| ; CHECK-LABEL: vfwsub_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfwsub.vv v12, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32(<vscale x 4 x double> poison, <vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %1, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwsub_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfwsub_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfwsub.vf v12, v8, fa0 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32(<vscale x 4 x double> poison, <vscale x 4 x float> %a, float %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %1, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwadd_wv(<vscale x 4 x double> %a, <vscale x 4 x float> %b, iXLen %vl) { |
| ; CHECK-LABEL: vfwadd_wv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfwadd.wv v8, v8, v12 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.nxv4f32(<vscale x 4 x double> poison, <vscale x 4 x double> %a, <vscale x 4 x float> %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %1, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwadd_wf(<vscale x 4 x double> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfwadd_wf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.f32(<vscale x 4 x double> poison, <vscale x 4 x double> %a, float %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %1, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwsub_wv(<vscale x 4 x double> %a, <vscale x 4 x float> %b, iXLen %vl) { |
| ; CHECK-LABEL: vfwsub_wv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfwsub.wv v8, v8, v12 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.nxv4f32(<vscale x 4 x double> poison, <vscale x 4 x double> %a, <vscale x 4 x float> %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %1, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwsub_wf(<vscale x 4 x double> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfwsub_wf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.f32(<vscale x 4 x double> poison, <vscale x 4 x double> %a, float %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %1, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x float> @vfmul_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen %vl) { |
| ; CHECK-LABEL: vfmul_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfmul.vv v8, v8, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %b, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfmul_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfmul_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfmul.vf v10, v8, fa0 |
| ; CHECK-NEXT: vfadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, float %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %a, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfdiv_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen %vl) { |
| ; CHECK-LABEL: vfdiv_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfdiv.vv v8, v8, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %b, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfdiv_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfdiv_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfdiv.vf v10, v8, fa0 |
| ; CHECK-NEXT: vfadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, float %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %a, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfrdiv_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfrdiv_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfrdiv.vf v10, v8, fa0 |
| ; CHECK-NEXT: vfadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, float %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %a, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwmul_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen %vl) { |
| ; CHECK-LABEL: vfwmul_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfwmul.vv v12, v8, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32(<vscale x 4 x double> poison, <vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %1, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwmul_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfwmul_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfwmul.vf v12, v8, fa0 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v12, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32(<vscale x 4 x double> poison, <vscale x 4 x float> %a, float %b, iXLen 7, iXLen -1) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %1, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmfeq_vf(<vscale x 4 x float> %a, <vscale x 4 x i1> %b, float%c, iXLen %vl) { |
| ; CHECK-LABEL: vmfeq_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmfeq.vf v10, v8, fa0 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.f32(<vscale x 4 x float> %a, float %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmfeq_vv(<vscale x 4 x float> %a, <vscale x 4 x i1> %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmfeq_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmfeq.vv v12, v8, v10 |
| ; CHECK-NEXT: vmand.mm v0, v12, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmfne_vf(<vscale x 4 x float> %a, <vscale x 4 x i1> %b, float%c, iXLen %vl) { |
| ; CHECK-LABEL: vmfne_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmfne.vf v10, v8, fa0 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32(<vscale x 4 x float> %a, float %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmfne_vv(<vscale x 4 x float> %a, <vscale x 4 x i1> %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmfne_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmfne.vv v12, v8, v10 |
| ; CHECK-NEXT: vmand.mm v0, v12, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmflt_vf(<vscale x 4 x float> %a, <vscale x 4 x i1> %b, float%c, iXLen %vl) { |
| ; CHECK-LABEL: vmflt_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmflt.vf v10, v8, fa0 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.f32(<vscale x 4 x float> %a, float %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmflt_vv(<vscale x 4 x float> %a, <vscale x 4 x i1> %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmflt_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmflt.vv v12, v8, v10 |
| ; CHECK-NEXT: vmand.mm v0, v12, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmfle_vf(<vscale x 4 x float> %a, <vscale x 4 x i1> %b, float%c, iXLen %vl) { |
| ; CHECK-LABEL: vmfle_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmfle.vf v10, v8, fa0 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32(<vscale x 4 x float> %a, float %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmfle_vv(<vscale x 4 x float> %a, <vscale x 4 x i1> %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmfle_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmfle.vv v12, v8, v10 |
| ; CHECK-NEXT: vmand.mm v0, v12, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmfgt_vf(<vscale x 4 x float> %a, <vscale x 4 x i1> %b, float%c, iXLen %vl) { |
| ; CHECK-LABEL: vmfgt_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 |
| ; CHECK-NEXT: vmand.mm v0, v10, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.f32(<vscale x 4 x float> %a, float %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i1> @vmfgt_vv(<vscale x 4 x float> %a, <vscale x 4 x i1> %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmfgt_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmflt.vv v12, v10, v8 |
| ; CHECK-NEXT: vmand.mm v0, v12, v0 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %c, iXLen -1) |
| %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl) |
| ret <vscale x 4 x i1> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmerge_vvm(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmerge_vvm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %c, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmerge_vxm(<vscale x 4 x i32> %a, i32 %b, <vscale x 4 x i1> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmerge_vxm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, <vscale x 4 x i1> %c, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vmerge_vim(<vscale x 4 x i32> %a, <vscale x 4 x i1> %c, iXLen %vl) { |
| ; CHECK-LABEL: vmerge_vim: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 9, <vscale x 4 x i1> %c, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vadc_vvm(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %c, iXLen %vl) { |
| ; CHECK-LABEL: vadc_vvm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vadc.vvm v8, v8, v10, v0 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %c, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vadc_vxm(<vscale x 4 x i32> %a, i32 %b, <vscale x 4 x i1> %c, iXLen %vl) { |
| ; CHECK-LABEL: vadc_vxm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, <vscale x 4 x i1> %c, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vadc_vim(<vscale x 4 x i32> %a, <vscale x 4 x i1> %c, iXLen %vl) { |
| ; CHECK-LABEL: vadc_vim: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 |
| ; CHECK-NEXT: vadd.vv v8, v8, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 9, <vscale x 4 x i1> %c, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vaadd_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vaadd_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vaadd.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vaadd_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vaadd_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vaadd.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vasub_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vasub_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vasub.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vasub_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vasub_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vasub.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vaaddu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vaaddu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vaaddu.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vaaddu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vaaddu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vaaddu.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vasubu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vasubu_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vasubu.vv v8, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vasubu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vasubu_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: csrwi vxrm, 0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vasubu.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen 0, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x float> @vfmax_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen %vl) { |
| ; CHECK-LABEL: vfmax_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfmax.vv v8, v8, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %b, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfmax_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfmax_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfmax.vf v10, v8, fa0 |
| ; CHECK-NEXT: vfadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, float %b, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %a, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfmin_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen %vl) { |
| ; CHECK-LABEL: vfmin_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfmin.vv v8, v8, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %b, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfmin_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfmin_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfmin.vf v10, v8, fa0 |
| ; CHECK-NEXT: vfadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, float %b, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %a, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfsgnj_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen %vl) { |
| ; CHECK-LABEL: vfsgnj_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %b, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfsgnj_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfsgnj_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfsgnj.vf v10, v8, fa0 |
| ; CHECK-NEXT: vfadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, float %b, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %a, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfsgnjn_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen %vl) { |
| ; CHECK-LABEL: vfsgnjn_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %b, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfsgnjn_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfsgnjn_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfsgnjn.vf v10, v8, fa0 |
| ; CHECK-NEXT: vfadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, float %b, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %a, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfsgnjx_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen %vl) { |
| ; CHECK-LABEL: vfsgnjx_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfsgnjx.vv v8, v8, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %b, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfsgnjx_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfsgnjx_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfsgnjx.vf v10, v8, fa0 |
| ; CHECK-NEXT: vfadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, float %b, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %a, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfmerge_vf(<vscale x 4 x float> %a, float %b, <vscale x 4 x i1> %c, iXLen %vl) { |
| ; CHECK-LABEL: vfmerge_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfmerge.vfm v10, v8, fa0, v0 |
| ; CHECK-NEXT: vfadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfmerge(<vscale x 4 x float> poison, <vscale x 4 x float> %a, float %b, <vscale x 4 x i1> %c, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %a, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfmv_v_f(<vscale x 4 x float> %a, float %b, iXLen %vl) { |
| ; CHECK-LABEL: vfmv_v_f: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfmv.v.f v10, fa0 |
| ; CHECK-NEXT: vfadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f(<vscale x 4 x float> poison, float %b, iXLen -1) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %a, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfmacc_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vfmacc_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfmacc.vv v8, v12, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfmacc(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %c, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfmacc_vf(<vscale x 4 x float> %a, float %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vfmacc_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfmacc.vf v8, fa0, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfmacc(<vscale x 4 x float> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %c, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfnmacc_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vfnmacc_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfnmacc.vv v8, v12, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfnmacc(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %c, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfnmacc_vf(<vscale x 4 x float> %a, float %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vfnmacc_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfnmacc(<vscale x 4 x float> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %c, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfmsac_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vfmsac_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfmsac.vv v8, v12, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfmsac(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %c, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfmsac_vf(<vscale x 4 x float> %a, float %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vfmsac_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfmsac.vf v8, fa0, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfmsac(<vscale x 4 x float> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %c, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfnmsac_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vfnmsac_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfnmsac.vv v8, v12, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfnmsac(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %c, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfnmsac_vf(<vscale x 4 x float> %a, float %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vfnmsac_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfnmsac(<vscale x 4 x float> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %c, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfmadd_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vfmadd_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfmadd.vv v8, v10, v12 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfmadd(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %c, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfmadd_vf(<vscale x 4 x float> %a, float %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vfmadd_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfmadd(<vscale x 4 x float> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %c, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfnmadd_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vfnmadd_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfnmadd(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %c, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfnmadd_vf(<vscale x 4 x float> %a, float %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vfnmadd_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfnmadd(<vscale x 4 x float> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %c, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfmsub_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vfmsub_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfmsub.vv v8, v10, v12 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfmsub(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %c, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfmsub_vf(<vscale x 4 x float> %a, float %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vfmsub_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfmsub(<vscale x 4 x float> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %c, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfnmsub_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vfnmsub_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfnmsub.vv v8, v10, v12 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfnmsub(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %c, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x float> @vfnmsub_vf(<vscale x 4 x float> %a, float %b, <vscale x 4 x float> %c, iXLen %vl) { |
| ; CHECK-LABEL: vfnmsub_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 |
| ; CHECK-NEXT: vfadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfnmsub(<vscale x 4 x float> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 3) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %c, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwmacc_vv(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x double> %d, iXLen %vl) { |
| ; CHECK-LABEL: vfwmacc_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma |
| ; CHECK-NEXT: vfwmacc.vv v8, v12, v14 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v8, v16 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwmacc(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %d, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwmacc_vf(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, <vscale x 4 x double> %d, iXLen %vl) { |
| ; CHECK-LABEL: vfwmacc_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma |
| ; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v8, v16 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwmacc(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %d, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwnmacc_vv(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x double> %d, iXLen %vl) { |
| ; CHECK-LABEL: vfwnmacc_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma |
| ; CHECK-NEXT: vfwnmacc.vv v8, v12, v14 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v8, v16 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwnmacc(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %d, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwnmacc_vf(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, <vscale x 4 x double> %d, iXLen %vl) { |
| ; CHECK-LABEL: vfwnmacc_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma |
| ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v8, v16 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwnmacc(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %d, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwmsac_vv(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x double> %d, iXLen %vl) { |
| ; CHECK-LABEL: vfwmsac_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma |
| ; CHECK-NEXT: vfwmsac.vv v8, v12, v14 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v8, v16 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwmsac(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %d, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwmsac_vf(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, <vscale x 4 x double> %d, iXLen %vl) { |
| ; CHECK-LABEL: vfwmsac_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma |
| ; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v8, v16 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwmsac(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %d, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwnmsac_vv(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x double> %d, iXLen %vl) { |
| ; CHECK-LABEL: vfwnmsac_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma |
| ; CHECK-NEXT: vfwnmsac.vv v8, v12, v14 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v8, v16 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwnmsac(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %d, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x double> @vfwnmsac_vf(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, <vscale x 4 x double> %d, iXLen %vl) { |
| ; CHECK-LABEL: vfwnmsac_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma |
| ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 |
| ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v8, v16 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x double> @llvm.riscv.vfwnmsac(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfadd(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %d, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x float> @vfwmaccbf16_vv(<vscale x 4 x float> %a, <vscale x 4 x bfloat> %b, <vscale x 4 x bfloat> %c, <vscale x 4 x float> %d, iXLen %vl) { |
| ; CHECK-LABEL: vfwmaccbf16_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma |
| ; CHECK-NEXT: vfwmaccbf16.vv v8, v10, v11 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfwmaccbf16(<vscale x 4 x float> %a, <vscale x 4 x bfloat> %b, <vscale x 4 x bfloat> %c, iXLen 7, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %d, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsbc_vvm(<vscale x 4 x i32> %a, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, iXLen %vl) { |
| ; CHECK-LABEL: vsbc_vvm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 |
| ; CHECK-NEXT: vadd.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.nxv4i1(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %c, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vsbc_vxm(<vscale x 4 x i32> %a, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %b, i32 %c, iXLen %vl) { |
| ; CHECK-LABEL: vsbc_vxm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.nxv4i1(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %c, <vscale x 4 x i1> %mask, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vfclass_v(<vscale x 4 x float> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vfclass_v: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vfclass.v v8, v8 |
| ; CHECK-NEXT: vadd.vv v8, v8, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x float> %a, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vrgather_vi(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vrgather_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vrgather.vi v12, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v12, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.iXLen(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen 5, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vrgather_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %idx, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vrgather_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vrgather.vv v12, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v12, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %idx, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vrgather_vx(<vscale x 4 x i32> %a, iXLen %idx, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vrgather_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vrgather.vx v12, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v12, v10 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.iXLen(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen %idx, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vrgatherei16_vv(<vscale x 4 x i32> %a, <vscale x 4 x i16> %idx, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vrgatherei16_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vrgatherei16.vv v12, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v12, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i16> %idx, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x float> @vfwmaccbf16_vf(<vscale x 4 x float> %a, bfloat %b, <vscale x 4 x bfloat> %c, <vscale x 4 x float> %d, iXLen %vl) { |
| ; CHECK-LABEL: vfwmaccbf16_vf: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma |
| ; CHECK-NEXT: vfwmaccbf16.vf v8, fa0, v10 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vfadd.vv v8, v8, v12 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfwmaccbf16(<vscale x 4 x float> %a, bfloat %b, <vscale x 4 x bfloat> %c, iXLen 7, iXLen -1, iXLen 0) |
| %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %d, iXLen 7, iXLen %vl) |
| ret <vscale x 4 x float> %2 |
| } |
| |
| define <vscale x 4 x double> @vfsqrt(<vscale x 4 x float> %a) { |
| ; CHECK-LABEL: vfsqrt: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 6, e32, m2, ta, ma |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: fsrmi a0, 0 |
| ; CHECK-NEXT: vfsqrt.v v14, v8 |
| ; CHECK-NEXT: fsrm a0 |
| ; CHECK-NEXT: vfwmacc.vv v8, v12, v14 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, iXLen 0, iXLen 7) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfwmacc(<vscale x 4 x double> poison, <vscale x 4 x float> %a, <vscale x 4 x float> %1, iXLen 7, iXLen 6, iXLen 0) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x double> @vfrsqrt7(<vscale x 4 x float> %a) { |
| ; CHECK-LABEL: vfrsqrt7: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 6, e32, m2, ta, ma |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vfrsqrt7.v v14, v8 |
| ; CHECK-NEXT: vfwmacc.vv v8, v12, v14 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, iXLen 7) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfwmacc(<vscale x 4 x double> poison, <vscale x 4 x float> %a, <vscale x 4 x float> %1, iXLen 7, iXLen 6, iXLen 0) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x double> @vfrec7(<vscale x 4 x float> %a) { |
| ; CHECK-LABEL: vfrec7: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 6, e32, m2, ta, ma |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: fsrmi a0, 0 |
| ; CHECK-NEXT: vfrec7.v v14, v8 |
| ; CHECK-NEXT: fsrm a0 |
| ; CHECK-NEXT: vfwmacc.vv v8, v12, v14 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, iXLen 0, iXLen 7) |
| %2 = call <vscale x 4 x double> @llvm.riscv.vfwmacc(<vscale x 4 x double> poison, <vscale x 4 x float> %a, <vscale x 4 x float> %1, iXLen 7, iXLen 6, iXLen 0) |
| ret <vscale x 4 x double> %2 |
| } |
| |
| define <vscale x 4 x i32> @vandn_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vandn_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vandn.vv v10, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vandn_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vandn_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vandn.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vbrev_v(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vbrev_v: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vbrev.v v10, v8 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vbrev.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vbrev8_v(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vbrev8_v: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vbrev8.v v10, v8 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vbrev8.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vrev8_v(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vrev8_v: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vrev8.v v10, v8 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vrev8.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vclz_v(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vclz_v: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vclz.v v10, v8 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vclz.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vcpop_v(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vcpop_v: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vcpop.v v10, v8 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vcpopv.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vctz_v(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vctz_v: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vctz.v v10, v8 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vctz.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vror_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vror_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vror.vv v10, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vror_vx(<vscale x 4 x i32> %a, iXLen %b, iXLen %vl) { |
| ; CHECK-LABEL: vror_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vror.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.iXLen(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vror_vi(<vscale x 4 x i32> %a, iXLen %vl) { |
| ; CHECK-LABEL: vror_vi: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vror.vi v10, v8, 5 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.iXLen(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen 5, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vrol_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) { |
| ; CHECK-LABEL: vrol_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma |
| ; CHECK-NEXT: vrol.vv v10, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 4 x i32> @vrol_vx(<vscale x 4 x i32> %a, iXLen %b, iXLen %vl) { |
| ; CHECK-LABEL: vrol_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma |
| ; CHECK-NEXT: vrol.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32.iXLen(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen %b, iXLen -1) |
| %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl) |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 2 x i64> @vclmul_vv(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, iXLen %vl) { |
| ; CHECK-LABEL: vclmul_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma |
| ; CHECK-NEXT: vclmul.vv v10, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b, iXLen -1) |
| %2 = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %1, <vscale x 2 x i64> %a, iXLen %vl) |
| ret <vscale x 2 x i64> %2 |
| } |
| |
| define <vscale x 2 x i64> @vclmul_vx(<vscale x 2 x i64> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vclmul_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma |
| ; CHECK-NEXT: vclmul.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.i32(<vscale x 2 x i64> poison, <vscale x 2 x i64> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %1, <vscale x 2 x i64> %a, iXLen %vl) |
| ret <vscale x 2 x i64> %2 |
| } |
| |
| define <vscale x 2 x i64> @vclmulh_vv(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, iXLen %vl) { |
| ; CHECK-LABEL: vclmulh_vv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma |
| ; CHECK-NEXT: vclmulh.vv v10, v8, v10 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b, iXLen -1) |
| %2 = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %1, <vscale x 2 x i64> %a, iXLen %vl) |
| ret <vscale x 2 x i64> %2 |
| } |
| |
| define <vscale x 2 x i64> @vclmulh_vx(<vscale x 2 x i64> %a, i32 %b, iXLen %vl) { |
| ; CHECK-LABEL: vclmulh_vx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma |
| ; CHECK-NEXT: vclmulh.vx v10, v8, a0 |
| ; CHECK-NEXT: vadd.vv v8, v10, v8 |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.i32(<vscale x 2 x i64> poison, <vscale x 2 x i64> %a, i32 %b, iXLen -1) |
| %2 = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %1, <vscale x 2 x i64> %a, iXLen %vl) |
| ret <vscale x 2 x i64> %2 |
| } |