| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s |
| |
| declare <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i64); |
| declare <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i64); |
| |
| define <vscale x 2 x i32> @vpmerge_vadd(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 %vl) { |
| ; CHECK-LABEL: vpmerge_vadd: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 %vl, i64 1) |
| %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl) |
| ret <vscale x 2 x i32> %b |
| } |
| declare <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) |
| |
| define <vscale x 2 x i32> @vpmerge_vsub(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 %vl) { |
| ; CHECK-LABEL: vpmerge_vsub: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu |
| ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 %vl, i64 1) |
| %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl) |
| ret <vscale x 2 x i32> %b |
| } |
| declare <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) |
| |
| define <vscale x 2 x float> @vpmerge_vfadd(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i64 %vl) { |
| ; CHECK-LABEL: vpmerge_vfadd: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu |
| ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i64 7, i64 %vl, i64 1) |
| %b = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl) |
| |
| ret <vscale x 2 x float> %b |
| } |
| declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i64, i64, i64) |
| |
| define <vscale x 2 x float> @vpmerge_vfsub(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i64 %vl) { |
| ; CHECK-LABEL: vpmerge_vfsub: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu |
| ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i64 7, i64 %vl, i64 1) |
| %b = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl) |
| ret <vscale x 2 x float> %b |
| } |
| declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i64, i64, i64) |
| |
| define <vscale x 2 x i32> @vpmerge_vwadd(<vscale x 2 x i32> %passthru, <vscale x 2 x i16> %x, <vscale x 2 x i16> %y, <vscale x 2 x i1> %m, i64 %vl) { |
| ; CHECK-LABEL: vpmerge_vwadd: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu |
| ; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(<vscale x 2 x i32> %passthru, <vscale x 2 x i16> %x, <vscale x 2 x i16> %y, <vscale x 2 x i1> %m, i64 %vl, i64 1) |
| %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl) |
| ret <vscale x 2 x i32> %b |
| } |
| declare <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(<vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) |
| |
| define <vscale x 2 x i32> @vpmerge_vle(<vscale x 2 x i32> %passthru, ptr %p, <vscale x 2 x i1> %m, i64 %vl) { |
| ; CHECK-LABEL: vpmerge_vle: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu |
| ; CHECK-NEXT: vle32.v v8, (a0), v0.t |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32( |
| <vscale x 2 x i32> %passthru, |
| ptr %p, |
| <vscale x 2 x i1> %m, |
| i64 %vl, i64 1) |
| %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl) |
| ret <vscale x 2 x i32> %b |
| } |
| declare <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i64, i64) |
| |
| declare <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i64, <vscale x 2 x i1>, i64, i64) |
| define <vscale x 2 x i32> @vpmerge_vslideup(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i64 %x, <vscale x 2 x i1> %m, i64 %vl) { |
| ; CHECK-LABEL: vpmerge_vslideup: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu |
| ; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i64 %x, <vscale x 2 x i1> %m, i64 %vl, i64 0) |
| %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl) |
| ret <vscale x 2 x i32> %b |
| } |
| |
| declare <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i64, <vscale x 2 x i1>, i64, i64) |
| define <vscale x 2 x i32> @vpmerge_vslidedown(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i64 %x, <vscale x 2 x i1> %m, i64 %vl) { |
| ; CHECK-LABEL: vpmerge_vslidedown: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu |
| ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i64 %x, <vscale x 2 x i1> %m, i64 %vl, i64 0) |
| %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl) |
| ret <vscale x 2 x i32> %b |
| } |
| |
| declare <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i32, <vscale x 2 x i1>, i64, i64) |
| define <vscale x 2 x i32> @vpmerge_vslide1up(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i32 %x, <vscale x 2 x i1> %m, i64 %vl) { |
| ; CHECK-LABEL: vpmerge_vslide1up: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu |
| ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i32 %x, <vscale x 2 x i1> %m, i64 %vl, i64 0) |
| %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl) |
| ret <vscale x 2 x i32> %b |
| } |
| |
| declare <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i32, <vscale x 2 x i1>, i64, i64) |
| define <vscale x 2 x i32> @vpmerge_vslide1down(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i32 %x, <vscale x 2 x i1> %m, i64 %vl) { |
| ; CHECK-LABEL: vpmerge_vslide1down: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu |
| ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i32 %x, <vscale x 2 x i1> %m, i64 %vl, i64 0) |
| %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl) |
| ret <vscale x 2 x i32> %b |
| } |
| |
| ; Tests for folding vmerge into its ops when their VLs differ |
| |
| ; Can fold with VL=2 |
| define <vscale x 2 x i32> @vmerge_smaller_vl_same_passthru(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) { |
| ; CHECK-LABEL: vmerge_smaller_vl_same_passthru: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 3, i64 0) |
| %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 2) |
| ret <vscale x 2 x i32> %b |
| } |
| |
| ; Can fold with VL=2 |
| define <vscale x 2 x i32> @vmerge_larger_vl_same_passthru(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) { |
| ; CHECK-LABEL: vmerge_larger_vl_same_passthru: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 2, i64 0) |
| %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 3) |
| ret <vscale x 2 x i32> %b |
| } |
| |
| ; Can fold with VL=2 |
| define <vscale x 2 x i32> @vmerge_smaller_vl_different_passthru(<vscale x 2 x i32> %pt1, <vscale x 2 x i32> %pt2, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) { |
| ; CHECK-LABEL: vmerge_smaller_vl_different_passthru: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v10, v11, v0.t |
| ; CHECK-NEXT: vmv.v.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v8, v9 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt1, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 3, i64 0) |
| %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt2, <vscale x 2 x i32> %pt2, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 2) |
| ret <vscale x 2 x i32> %b |
| } |
| |
| ; Can't fold this because we need to take elements from both %pt1 and %pt2 |
| define <vscale x 2 x i32> @vmerge_larger_vl_different_passthru(<vscale x 2 x i32> %pt1, <vscale x 2 x i32> %pt2, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) { |
| ; CHECK-LABEL: vmerge_larger_vl_different_passthru: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v10, v11, v0.t |
| ; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma |
| ; CHECK-NEXT: vmv.v.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v8, v9 |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt1, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 2, i64 0) |
| %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt2, <vscale x 2 x i32> %pt2, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 3) |
| ret <vscale x 2 x i32> %b |
| } |
| |
| ; Can fold with VL=2 |
| define <vscale x 2 x i32> @vmerge_smaller_vl_poison_passthru(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) { |
| ; CHECK-LABEL: vmerge_smaller_vl_poison_passthru: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 3, i64 0) |
| %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 2) |
| ret <vscale x 2 x i32> %b |
| } |
| |
| ; Can fold with VL=2 |
| define <vscale x 2 x i32> @vmerge_larger_vl_poison_passthru(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) { |
| ; CHECK-LABEL: vmerge_larger_vl_poison_passthru: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 2, i64 0) |
| %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 3) |
| ret <vscale x 2 x i32> %b |
| } |
| |
| ; Test VFCVT_RM |
| declare <vscale x 2 x float> @llvm.floor.nxv2f32(<vscale x 2 x float>) |
| declare <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32) |
| define <vscale x 2 x i32> @vmerge_vfcvt_rm(<vscale x 2 x i32> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vmerge_vfcvt_rm: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: fsrmi a1, 2 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu |
| ; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t |
| ; CHECK-NEXT: fsrm a1 |
| ; CHECK-NEXT: ret |
| entry: |
| %floor = call <vscale x 2 x float> @llvm.floor.nxv2f32(<vscale x 2 x float> %a) |
| %i = fptosi <vscale x 2 x float> %floor to <vscale x 2 x i32> |
| %res = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %i, <vscale x 2 x i32> %passthru, i32 %evl) |
| ret <vscale x 2 x i32> %res |
| } |
| |
| ; Test VIOTA_M |
| declare <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, <vscale x 2 x i1>, i64, i64) |
| define <vscale x 2 x i32> @vpmerge_viota(<vscale x 2 x i32> %passthru, <vscale x 2 x i1> %m, <vscale x 2 x i1> %vm, i32 zeroext %vl) { |
| ; CHECK-LABEL: vpmerge_viota: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu |
| ; CHECK-NEXT: viota.m v8, v9, v0.t |
| ; CHECK-NEXT: ret |
| %1 = zext i32 %vl to i64 |
| %a = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(<vscale x 2 x i32> poison, <vscale x 2 x i1> %vm, <vscale x 2 x i1> %m, i64 %1, i64 0) |
| %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %1) |
| ret <vscale x 2 x i32> %b |
| } |
| |
| define <vscale x 2 x i32> @vpmerge_vadd_same_mask(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 %vl) { |
| ; CHECK-LABEL: vpmerge_vadd_same_mask: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu |
| ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t |
| ; CHECK-NEXT: ret |
| %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 %vl, i64 1) |
| %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 %vl) |
| ret <vscale x 2 x i32> %b |
| } |