| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ |
| ; RUN: < %s | FileCheck %s |
| |
| declare <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1>, <vscale x 1 x i1>, i32, <vscale x 1 x i1>, i32, i32) |
| declare <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, i32, <vscale x 2 x i1>, i32, i32) |
| declare <vscale x 4 x i1> @llvm.experimental.vp.splice.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, i32, <vscale x 4 x i1>, i32, i32) |
| declare <vscale x 8 x i1> @llvm.experimental.vp.splice.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, i32, <vscale x 8 x i1>, i32, i32) |
| declare <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, i32, <vscale x 16 x i1>, i32, i32) |
| declare <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>, i32, <vscale x 32 x i1>, i32, i32) |
| declare <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1>, <vscale x 64 x i1>, i32, <vscale x 64 x i1>, i32, i32) |
| |
| define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv1i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmv1r.v v9, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v8, 0 |
| ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmv.v.i v10, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v9, v9, 5 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vslideup.vx v9, v8, a0 |
| ; CHECK-NEXT: vmsne.vi v0, v9, 0 |
| ; CHECK-NEXT: ret |
| |
| %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb) |
| ret <vscale x 1 x i1> %v |
| } |
| |
| define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv1i1_negative_offset: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmv1r.v v9, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v8, 0 |
| ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmv.v.i v10, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vsetivli zero, 5, e8, mf8, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v9, v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vslideup.vi v9, v8, 5 |
| ; CHECK-NEXT: vmsne.vi v0, v9, 0 |
| ; CHECK-NEXT: ret |
| |
| %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 -5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb) |
| ret <vscale x 1 x i1> %v |
| } |
| |
| define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv1i1_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmv1r.v v10, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v8, 0 |
| ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmv.v.i v11, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v10 |
| ; CHECK-NEXT: vmerge.vim v10, v11, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t |
| ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t |
| ; CHECK-NEXT: ret |
| %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 5, <vscale x 1 x i1> %mask, i32 %evla, i32 %evlb) |
| ret <vscale x 1 x i1> %v |
| } |
| |
| define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv2i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| ; CHECK-NEXT: vmv1r.v v9, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v8, 0 |
| ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma |
| ; CHECK-NEXT: vmv.v.i v10, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v9, v9, 5 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| ; CHECK-NEXT: vslideup.vx v9, v8, a0 |
| ; CHECK-NEXT: vmsne.vi v0, v9, 0 |
| ; CHECK-NEXT: ret |
| |
| %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb) |
| ret <vscale x 2 x i1> %v |
| } |
| |
| define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv2i1_negative_offset: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| ; CHECK-NEXT: vmv1r.v v9, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v8, 0 |
| ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma |
| ; CHECK-NEXT: vmv.v.i v10, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vsetivli zero, 5, e8, mf4, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v9, v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| ; CHECK-NEXT: vslideup.vi v9, v8, 5 |
| ; CHECK-NEXT: vmsne.vi v0, v9, 0 |
| ; CHECK-NEXT: ret |
| |
| %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb) |
| ret <vscale x 2 x i1> %v |
| } |
| |
| define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv2i1_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| ; CHECK-NEXT: vmv1r.v v10, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v8, 0 |
| ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma |
| ; CHECK-NEXT: vmv.v.i v11, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v10 |
| ; CHECK-NEXT: vmerge.vim v10, v11, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu |
| ; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t |
| ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma |
| ; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t |
| ; CHECK-NEXT: ret |
| %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb) |
| ret <vscale x 2 x i1> %v |
| } |
| |
| define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv4i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma |
| ; CHECK-NEXT: vmv1r.v v9, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v8, 0 |
| ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma |
| ; CHECK-NEXT: vmv.v.i v10, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v9, v9, 5 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslideup.vx v9, v8, a0 |
| ; CHECK-NEXT: vmsne.vi v0, v9, 0 |
| ; CHECK-NEXT: ret |
| |
| %v = call <vscale x 4 x i1> @llvm.experimental.vp.splice.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 5, <vscale x 4 x i1> splat (i1 1), i32 %evla, i32 %evlb) |
| ret <vscale x 4 x i1> %v |
| } |
| |
| define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv4i1_negative_offset: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma |
| ; CHECK-NEXT: vmv1r.v v9, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v8, 0 |
| ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma |
| ; CHECK-NEXT: vmv.v.i v10, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vsetivli zero, 5, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v9, v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslideup.vi v9, v8, 5 |
| ; CHECK-NEXT: vmsne.vi v0, v9, 0 |
| ; CHECK-NEXT: ret |
| |
| %v = call <vscale x 4 x i1> @llvm.experimental.vp.splice.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 -5, <vscale x 4 x i1> splat (i1 1), i32 %evla, i32 %evlb) |
| ret <vscale x 4 x i1> %v |
| } |
| |
| define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, <vscale x 4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv4i1_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma |
| ; CHECK-NEXT: vmv1r.v v10, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v8, 0 |
| ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma |
| ; CHECK-NEXT: vmv.v.i v11, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v10 |
| ; CHECK-NEXT: vmerge.vim v10, v11, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t |
| ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma |
| ; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t |
| ; CHECK-NEXT: ret |
| %v = call <vscale x 4 x i1> @llvm.experimental.vp.splice.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 5, <vscale x 4 x i1> %mask, i32 %evla, i32 %evlb) |
| ret <vscale x 4 x i1> %v |
| } |
| |
| define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv8i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv1r.v v9, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v8, 0 |
| ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.v.i v10, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v9, v9, 5 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma |
| ; CHECK-NEXT: vslideup.vx v9, v8, a0 |
| ; CHECK-NEXT: vmsne.vi v0, v9, 0 |
| ; CHECK-NEXT: ret |
| |
| %v = call <vscale x 8 x i1> @llvm.experimental.vp.splice.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 5, <vscale x 8 x i1> splat (i1 1), i32 %evla, i32 %evlb) |
| ret <vscale x 8 x i1> %v |
| } |
| |
| define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv8i1_negative_offset: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv1r.v v9, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v8, 0 |
| ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.v.i v10, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vsetivli zero, 5, e8, m1, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v9, v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma |
| ; CHECK-NEXT: vslideup.vi v9, v8, 5 |
| ; CHECK-NEXT: vmsne.vi v0, v9, 0 |
| ; CHECK-NEXT: ret |
| |
| %v = call <vscale x 8 x i1> @llvm.experimental.vp.splice.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 -5, <vscale x 8 x i1> splat (i1 1), i32 %evla, i32 %evlb) |
| ret <vscale x 8 x i1> %v |
| } |
| |
| define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, <vscale x 8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv8i1_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv1r.v v10, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v8, 0 |
| ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.v.i v11, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v10 |
| ; CHECK-NEXT: vmerge.vim v10, v11, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t |
| ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma |
| ; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t |
| ; CHECK-NEXT: ret |
| %v = call <vscale x 8 x i1> @llvm.experimental.vp.splice.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 5, <vscale x 8 x i1> %mask, i32 %evla, i32 %evlb) |
| ret <vscale x 8 x i1> %v |
| } |
| |
| define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv16i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma |
| ; CHECK-NEXT: vmv1r.v v9, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v10, 0 |
| ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma |
| ; CHECK-NEXT: vmv.v.i v12, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vmerge.vim v8, v12, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v8, v8, 5 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma |
| ; CHECK-NEXT: vslideup.vx v8, v10, a0 |
| ; CHECK-NEXT: vmsne.vi v0, v8, 0 |
| ; CHECK-NEXT: ret |
| |
| %v = call <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 5, <vscale x 16 x i1> splat (i1 1), i32 %evla, i32 %evlb) |
| ret <vscale x 16 x i1> %v |
| } |
| |
| define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv16i1_negative_offset: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma |
| ; CHECK-NEXT: vmv1r.v v9, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v10, 0 |
| ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma |
| ; CHECK-NEXT: vmv.v.i v12, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vmerge.vim v8, v12, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vsetivli zero, 5, e8, m2, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v8, v8, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma |
| ; CHECK-NEXT: vslideup.vi v8, v10, 5 |
| ; CHECK-NEXT: vmsne.vi v0, v8, 0 |
| ; CHECK-NEXT: ret |
| |
| %v = call <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 -5, <vscale x 16 x i1> splat (i1 1), i32 %evla, i32 %evlb) |
| ret <vscale x 16 x i1> %v |
| } |
| |
| define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, <vscale x 16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv16i1_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma |
| ; CHECK-NEXT: vmv1r.v v10, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v12, 0 |
| ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma |
| ; CHECK-NEXT: vmv.v.i v14, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v10 |
| ; CHECK-NEXT: vmerge.vim v10, v14, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vslideup.vx v10, v12, a0, v0.t |
| ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma |
| ; CHECK-NEXT: vmsne.vi v8, v10, 0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: ret |
| %v = call <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 5, <vscale x 16 x i1> %mask, i32 %evla, i32 %evlb) |
| ret <vscale x 16 x i1> %v |
| } |
| |
| define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv32i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma |
| ; CHECK-NEXT: vmv1r.v v9, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v12, 0 |
| ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma |
| ; CHECK-NEXT: vmv.v.i v16, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vmerge.vim v8, v16, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v8, v8, 5 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma |
| ; CHECK-NEXT: vslideup.vx v8, v12, a0 |
| ; CHECK-NEXT: vmsne.vi v0, v8, 0 |
| ; CHECK-NEXT: ret |
| |
| %v = call <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 5, <vscale x 32 x i1> splat (i1 1), i32 %evla, i32 %evlb) |
| ret <vscale x 32 x i1> %v |
| } |
| |
| define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv32i1_negative_offset: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma |
| ; CHECK-NEXT: vmv1r.v v9, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v12, 0 |
| ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma |
| ; CHECK-NEXT: vmv.v.i v16, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vmerge.vim v8, v16, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vsetivli zero, 5, e8, m4, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v8, v8, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma |
| ; CHECK-NEXT: vslideup.vi v8, v12, 5 |
| ; CHECK-NEXT: vmsne.vi v0, v8, 0 |
| ; CHECK-NEXT: ret |
| |
| %v = call <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 -5, <vscale x 32 x i1> splat (i1 1), i32 %evla, i32 %evlb) |
| ret <vscale x 32 x i1> %v |
| } |
| |
| define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, <vscale x 32 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv32i1_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma |
| ; CHECK-NEXT: vmv1r.v v10, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v12, 0 |
| ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma |
| ; CHECK-NEXT: vmv.v.i v16, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v10 |
| ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v16, v16, 5, v0.t |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu |
| ; CHECK-NEXT: vslideup.vx v16, v12, a0, v0.t |
| ; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma |
| ; CHECK-NEXT: vmsne.vi v8, v16, 0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: ret |
| %v = call <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 5, <vscale x 32 x i1> %mask, i32 %evla, i32 %evlb) |
| ret <vscale x 32 x i1> %v |
| } |
| |
| define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv64i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma |
| ; CHECK-NEXT: vmv1r.v v9, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v16, 0 |
| ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma |
| ; CHECK-NEXT: vmv.v.i v24, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vmerge.vim v8, v24, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v8, v8, 5 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma |
| ; CHECK-NEXT: vslideup.vx v8, v16, a0 |
| ; CHECK-NEXT: vmsne.vi v0, v8, 0 |
| ; CHECK-NEXT: ret |
| |
| %v = call <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 5, <vscale x 64 x i1> splat (i1 1), i32 %evla, i32 %evlb) |
| ret <vscale x 64 x i1> %v |
| } |
| |
| define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv64i1_negative_offset: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma |
| ; CHECK-NEXT: vmv1r.v v9, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v16, 0 |
| ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma |
| ; CHECK-NEXT: vmv.v.i v24, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vmerge.vim v8, v24, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vsetivli zero, 5, e8, m8, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v8, v8, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma |
| ; CHECK-NEXT: vslideup.vi v8, v16, 5 |
| ; CHECK-NEXT: vmsne.vi v0, v8, 0 |
| ; CHECK-NEXT: ret |
| |
| %v = call <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 -5, <vscale x 64 x i1> splat (i1 1), i32 %evla, i32 %evlb) |
| ret <vscale x 64 x i1> %v |
| } |
| |
| define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, <vscale x 64 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) { |
| ; CHECK-LABEL: test_vp_splice_nxv64i1_masked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma |
| ; CHECK-NEXT: vmv1r.v v10, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vmv.v.i v16, 0 |
| ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma |
| ; CHECK-NEXT: vmv.v.i v24, 0 |
| ; CHECK-NEXT: vmv1r.v v0, v10 |
| ; CHECK-NEXT: vmerge.vim v24, v24, 1, v0 |
| ; CHECK-NEXT: addi a0, a0, -5 |
| ; CHECK-NEXT: vmv1r.v v0, v9 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v24, v24, 5, v0.t |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu |
| ; CHECK-NEXT: vslideup.vx v24, v16, a0, v0.t |
| ; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, ma |
| ; CHECK-NEXT: vmsne.vi v8, v24, 0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: ret |
| %v = call <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 5, <vscale x 64 x i1> %mask, i32 %evla, i32 %evlb) |
| ret <vscale x 64 x i1> %v |
| } |