| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 |
| ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 |
| |
| define <vscale x 1 x i1> @vp_splat_nxv1i1_true_unmasked(i32 zeroext %evl) { |
| ; CHECK-LABEL: vp_splat_nxv1i1_true_unmasked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmset.m v0 |
| ; CHECK-NEXT: ret |
| %splat = call <vscale x 1 x i1> @llvm.experimental.vp.splat.nxv1i1(i1 true, <vscale x 1 x i1> splat (i1 true), i32 %evl) |
| ret <vscale x 1 x i1> %splat |
| } |
| |
| define <vscale x 1 x i1> @vp_splat_nxv1i1_false_unmasked(i32 zeroext %evl) { |
| ; CHECK-LABEL: vp_splat_nxv1i1_false_unmasked: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmclr.m v0 |
| ; CHECK-NEXT: ret |
| %splat = call <vscale x 1 x i1> @llvm.experimental.vp.splat.nxv1i1(i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl) |
| ret <vscale x 1 x i1> %splat |
| } |
| |
| define <vscale x 1 x i1> @vp_splat_nxv1i1(i1 %val, <vscale x 1 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vp_splat_nxv1i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vmv.v.x v8, a0 |
| ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t |
| ; CHECK-NEXT: ret |
| %splat = call <vscale x 1 x i1> @llvm.experimental.vp.splat.nxv1i1(i1 %val, <vscale x 1 x i1> %m, i32 %evl) |
| ret <vscale x 1 x i1> %splat |
| } |
| |
| define <vscale x 2 x i1> @vp_splat_nxv2i1(i1 %val, <vscale x 2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vp_splat_nxv2i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| ; CHECK-NEXT: vmv.v.x v8, a0 |
| ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t |
| ; CHECK-NEXT: ret |
| %splat = call <vscale x 2 x i1> @llvm.experimental.vp.splat.nxv2i1(i1 %val, <vscale x 2 x i1> %m, i32 %evl) |
| ret <vscale x 2 x i1> %splat |
| } |
| |
| define <vscale x 4 x i1> @vp_splat_nxv4i1(i1 %val, <vscale x 4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vp_splat_nxv4i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma |
| ; CHECK-NEXT: vmv.v.x v8, a0 |
| ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t |
| ; CHECK-NEXT: ret |
| %splat = call <vscale x 4 x i1> @llvm.experimental.vp.splat.nxv4i1(i1 %val, <vscale x 4 x i1> %m, i32 %evl) |
| ret <vscale x 4 x i1> %splat |
| } |
| |
| define <vscale x 8 x i1> @vp_splat_nxv8i1(i1 %val, <vscale x 8 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vp_splat_nxv8i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.v.x v8, a0 |
| ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t |
| ; CHECK-NEXT: ret |
| %splat = call <vscale x 8 x i1> @llvm.experimental.vp.splat.nxv8i1(i1 %val, <vscale x 8 x i1> %m, i32 %evl) |
| ret <vscale x 8 x i1> %splat |
| } |
| |
| define <vscale x 16 x i1> @vp_splat_nxv16i1(i1 %val, <vscale x 16 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vp_splat_nxv16i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma |
| ; CHECK-NEXT: vmv.v.x v10, a0 |
| ; CHECK-NEXT: vmsne.vi v8, v10, 0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: ret |
| %splat = call <vscale x 16 x i1> @llvm.experimental.vp.splat.nxv16i1(i1 %val, <vscale x 16 x i1> %m, i32 %evl) |
| ret <vscale x 16 x i1> %splat |
| } |
| |
| define <vscale x 32 x i1> @vp_splat_nxv32i1(i1 %val, <vscale x 32 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vp_splat_nxv32i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma |
| ; CHECK-NEXT: vmv.v.x v12, a0 |
| ; CHECK-NEXT: vmsne.vi v8, v12, 0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: ret |
| %splat = call <vscale x 32 x i1> @llvm.experimental.vp.splat.nxv32i1(i1 %val, <vscale x 32 x i1> %m, i32 %evl) |
| ret <vscale x 32 x i1> %splat |
| } |
| |
| define <vscale x 64 x i1> @vp_splat_nxv64i1(i1 %val, <vscale x 64 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vp_splat_nxv64i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma |
| ; CHECK-NEXT: vmv.v.x v16, a0 |
| ; CHECK-NEXT: vmsne.vi v8, v16, 0, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: ret |
| %splat = call <vscale x 64 x i1> @llvm.experimental.vp.splat.nxv64i1(i1 %val, <vscale x 64 x i1> %m, i32 %evl) |
| ret <vscale x 64 x i1> %splat |
| } |
| |
| ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
| ; RV32: {{.*}} |
| ; RV64: {{.*}} |