| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32 |
| ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 |
| |
| declare i8 @llvm.vp.reduce.add.v2i8(i8, <2 x i8>, <2 x i1>, i32) |
| |
| define signext i8 @vpreduce_add_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_add_v2i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.add.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.umax.v2i8(i8, <2 x i8>, <2 x i1>, i32) |
| |
| define signext i8 @vpreduce_umax_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_umax_v2i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: andi a0, a0, 255 |
| ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.umax.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.smax.v2i8(i8, <2 x i8>, <2 x i1>, i32) |
| |
| define signext i8 @vpreduce_smax_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_smax_v2i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.smax.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.umin.v2i8(i8, <2 x i8>, <2 x i1>, i32) |
| |
| define signext i8 @vpreduce_umin_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_umin_v2i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: andi a0, a0, 255 |
| ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.umin.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.smin.v2i8(i8, <2 x i8>, <2 x i1>, i32) |
| |
| define signext i8 @vpreduce_smin_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_smin_v2i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.smin.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.and.v2i8(i8, <2 x i8>, <2 x i1>, i32) |
| |
| define signext i8 @vpreduce_and_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_and_v2i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.and.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.or.v2i8(i8, <2 x i8>, <2 x i1>, i32) |
| |
| define signext i8 @vpreduce_or_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_or_v2i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.or.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.xor.v2i8(i8, <2 x i8>, <2 x i1>, i32) |
| |
| define signext i8 @vpreduce_xor_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_xor_v2i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma |
| ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.xor.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.umin.v3i8(i8, <3 x i8>, <3 x i1>, i32) |
| |
| define signext i8 @vpreduce_umin_v3i8(i8 signext %s, <3 x i8> %v, <3 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_umin_v3i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: andi a0, a0, 255 |
| ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.umin.v3i8(i8 %s, <3 x i8> %v, <3 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.add.v4i8(i8, <4 x i8>, <4 x i1>, i32) |
| |
| define signext i8 @vpreduce_add_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_add_v4i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.add.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.umax.v4i8(i8, <4 x i8>, <4 x i1>, i32) |
| |
| define signext i8 @vpreduce_umax_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_umax_v4i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: andi a0, a0, 255 |
| ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.umax.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.smax.v4i8(i8, <4 x i8>, <4 x i1>, i32) |
| |
| define signext i8 @vpreduce_smax_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_smax_v4i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.smax.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.umin.v4i8(i8, <4 x i8>, <4 x i1>, i32) |
| |
| define signext i8 @vpreduce_umin_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_umin_v4i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: andi a0, a0, 255 |
| ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.umin.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.smin.v4i8(i8, <4 x i8>, <4 x i1>, i32) |
| |
| define signext i8 @vpreduce_smin_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_smin_v4i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.smin.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.and.v4i8(i8, <4 x i8>, <4 x i1>, i32) |
| |
| define signext i8 @vpreduce_and_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_and_v4i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.and.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.or.v4i8(i8, <4 x i8>, <4 x i1>, i32) |
| |
| define signext i8 @vpreduce_or_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_or_v4i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.or.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.xor.v4i8(i8, <4 x i8>, <4 x i1>, i32) |
| |
| define signext i8 @vpreduce_xor_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_xor_v4i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.xor.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i16 @llvm.vp.reduce.add.v2i16(i16, <2 x i16>, <2 x i1>, i32) |
| |
| define signext i16 @vpreduce_add_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_add_v2i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma |
| ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i16 @llvm.vp.reduce.add.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) |
| ret i16 %r |
| } |
| |
| declare i16 @llvm.vp.reduce.umax.v2i16(i16, <2 x i16>, <2 x i1>, i32) |
| |
| define signext i16 @vpreduce_umax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_umax_v2i16: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: slli a0, a0, 16 |
| ; RV32-NEXT: srli a0, a0, 16 |
| ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; RV32-NEXT: vmv.s.x v9, a0 |
| ; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma |
| ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v9 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_umax_v2i16: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: slli a0, a0, 48 |
| ; RV64-NEXT: srli a0, a0, 48 |
| ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v9, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma |
| ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v9 |
| ; RV64-NEXT: ret |
| %r = call i16 @llvm.vp.reduce.umax.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) |
| ret i16 %r |
| } |
| |
| declare i16 @llvm.vp.reduce.smax.v2i16(i16, <2 x i16>, <2 x i1>, i32) |
| |
| define signext i16 @vpreduce_smax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_smax_v2i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma |
| ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i16 @llvm.vp.reduce.smax.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) |
| ret i16 %r |
| } |
| |
| declare i16 @llvm.vp.reduce.umin.v2i16(i16, <2 x i16>, <2 x i1>, i32) |
| |
| define signext i16 @vpreduce_umin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_umin_v2i16: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: slli a0, a0, 16 |
| ; RV32-NEXT: srli a0, a0, 16 |
| ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; RV32-NEXT: vmv.s.x v9, a0 |
| ; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma |
| ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v9 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_umin_v2i16: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: slli a0, a0, 48 |
| ; RV64-NEXT: srli a0, a0, 48 |
| ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v9, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma |
| ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v9 |
| ; RV64-NEXT: ret |
| %r = call i16 @llvm.vp.reduce.umin.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) |
| ret i16 %r |
| } |
| |
| declare i16 @llvm.vp.reduce.smin.v2i16(i16, <2 x i16>, <2 x i1>, i32) |
| |
| define signext i16 @vpreduce_smin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_smin_v2i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma |
| ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i16 @llvm.vp.reduce.smin.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) |
| ret i16 %r |
| } |
| |
| declare i16 @llvm.vp.reduce.and.v2i16(i16, <2 x i16>, <2 x i1>, i32) |
| |
| define signext i16 @vpreduce_and_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_and_v2i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma |
| ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i16 @llvm.vp.reduce.and.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) |
| ret i16 %r |
| } |
| |
| declare i16 @llvm.vp.reduce.or.v2i16(i16, <2 x i16>, <2 x i1>, i32) |
| |
| define signext i16 @vpreduce_or_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_or_v2i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma |
| ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i16 @llvm.vp.reduce.or.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) |
| ret i16 %r |
| } |
| |
| declare i16 @llvm.vp.reduce.xor.v2i16(i16, <2 x i16>, <2 x i1>, i32) |
| |
| define signext i16 @vpreduce_xor_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_xor_v2i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma |
| ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i16 @llvm.vp.reduce.xor.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) |
| ret i16 %r |
| } |
| |
| declare i16 @llvm.vp.reduce.add.v4i16(i16, <4 x i16>, <4 x i1>, i32) |
| |
| define signext i16 @vpreduce_add_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_add_v4i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma |
| ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i16 @llvm.vp.reduce.add.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) |
| ret i16 %r |
| } |
| |
| declare i16 @llvm.vp.reduce.umax.v4i16(i16, <4 x i16>, <4 x i1>, i32) |
| |
| define signext i16 @vpreduce_umax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_umax_v4i16: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: slli a0, a0, 16 |
| ; RV32-NEXT: srli a0, a0, 16 |
| ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; RV32-NEXT: vmv.s.x v9, a0 |
| ; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma |
| ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v9 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_umax_v4i16: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: slli a0, a0, 48 |
| ; RV64-NEXT: srli a0, a0, 48 |
| ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v9, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma |
| ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v9 |
| ; RV64-NEXT: ret |
| %r = call i16 @llvm.vp.reduce.umax.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) |
| ret i16 %r |
| } |
| |
| declare i16 @llvm.vp.reduce.smax.v4i16(i16, <4 x i16>, <4 x i1>, i32) |
| |
| define signext i16 @vpreduce_smax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_smax_v4i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma |
| ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i16 @llvm.vp.reduce.smax.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) |
| ret i16 %r |
| } |
| |
| declare i16 @llvm.vp.reduce.umin.v4i16(i16, <4 x i16>, <4 x i1>, i32) |
| |
| define signext i16 @vpreduce_umin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_umin_v4i16: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: slli a0, a0, 16 |
| ; RV32-NEXT: srli a0, a0, 16 |
| ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; RV32-NEXT: vmv.s.x v9, a0 |
| ; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma |
| ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v9 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_umin_v4i16: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: slli a0, a0, 48 |
| ; RV64-NEXT: srli a0, a0, 48 |
| ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v9, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma |
| ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v9 |
| ; RV64-NEXT: ret |
| %r = call i16 @llvm.vp.reduce.umin.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) |
| ret i16 %r |
| } |
| |
| declare i16 @llvm.vp.reduce.smin.v4i16(i16, <4 x i16>, <4 x i1>, i32) |
| |
| define signext i16 @vpreduce_smin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_smin_v4i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma |
| ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i16 @llvm.vp.reduce.smin.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) |
| ret i16 %r |
| } |
| |
| declare i16 @llvm.vp.reduce.and.v4i16(i16, <4 x i16>, <4 x i1>, i32) |
| |
| define signext i16 @vpreduce_and_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_and_v4i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma |
| ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i16 @llvm.vp.reduce.and.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) |
| ret i16 %r |
| } |
| |
| declare i16 @llvm.vp.reduce.or.v4i16(i16, <4 x i16>, <4 x i1>, i32) |
| |
| define signext i16 @vpreduce_or_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_or_v4i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma |
| ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i16 @llvm.vp.reduce.or.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) |
| ret i16 %r |
| } |
| |
| declare i16 @llvm.vp.reduce.xor.v4i16(i16, <4 x i16>, <4 x i1>, i32) |
| |
| define signext i16 @vpreduce_xor_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_xor_v4i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma |
| ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i16 @llvm.vp.reduce.xor.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) |
| ret i16 %r |
| } |
| |
| declare i32 @llvm.vp.reduce.add.v2i32(i32, <2 x i32>, <2 x i1>, i32) |
| |
| define signext i32 @vpreduce_add_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_add_v2i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma |
| ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i32 @llvm.vp.reduce.add.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) |
| ret i32 %r |
| } |
| |
| declare i32 @llvm.vp.reduce.umax.v2i32(i32, <2 x i32>, <2 x i1>, i32) |
| |
| define signext i32 @vpreduce_umax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_umax_v2i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma |
| ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i32 @llvm.vp.reduce.umax.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) |
| ret i32 %r |
| } |
| |
| declare i32 @llvm.vp.reduce.smax.v2i32(i32, <2 x i32>, <2 x i1>, i32) |
| |
| define signext i32 @vpreduce_smax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_smax_v2i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma |
| ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i32 @llvm.vp.reduce.smax.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) |
| ret i32 %r |
| } |
| |
| declare i32 @llvm.vp.reduce.umin.v2i32(i32, <2 x i32>, <2 x i1>, i32) |
| |
| define signext i32 @vpreduce_umin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_umin_v2i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma |
| ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i32 @llvm.vp.reduce.umin.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) |
| ret i32 %r |
| } |
| |
| declare i32 @llvm.vp.reduce.smin.v2i32(i32, <2 x i32>, <2 x i1>, i32) |
| |
| define signext i32 @vpreduce_smin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_smin_v2i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma |
| ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i32 @llvm.vp.reduce.smin.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) |
| ret i32 %r |
| } |
| |
| declare i32 @llvm.vp.reduce.and.v2i32(i32, <2 x i32>, <2 x i1>, i32) |
| |
| define signext i32 @vpreduce_and_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_and_v2i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma |
| ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i32 @llvm.vp.reduce.and.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) |
| ret i32 %r |
| } |
| |
| declare i32 @llvm.vp.reduce.or.v2i32(i32, <2 x i32>, <2 x i1>, i32) |
| |
| define signext i32 @vpreduce_or_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_or_v2i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma |
| ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i32 @llvm.vp.reduce.or.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) |
| ret i32 %r |
| } |
| |
| declare i32 @llvm.vp.reduce.xor.v2i32(i32, <2 x i32>, <2 x i1>, i32) |
| |
| define signext i32 @vpreduce_xor_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_xor_v2i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma |
| ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i32 @llvm.vp.reduce.xor.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) |
| ret i32 %r |
| } |
| |
| declare i32 @llvm.vp.reduce.add.v4i32(i32, <4 x i32>, <4 x i1>, i32) |
| |
| define signext i32 @vpreduce_add_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_add_v4i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma |
| ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i32 @llvm.vp.reduce.add.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) |
| ret i32 %r |
| } |
| |
| declare i32 @llvm.vp.reduce.umax.v4i32(i32, <4 x i32>, <4 x i1>, i32) |
| |
| define signext i32 @vpreduce_umax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_umax_v4i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma |
| ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i32 @llvm.vp.reduce.umax.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) |
| ret i32 %r |
| } |
| |
| declare i32 @llvm.vp.reduce.smax.v4i32(i32, <4 x i32>, <4 x i1>, i32) |
| |
| define signext i32 @vpreduce_smax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_smax_v4i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma |
| ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i32 @llvm.vp.reduce.smax.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) |
| ret i32 %r |
| } |
| |
| declare i32 @llvm.vp.reduce.umin.v4i32(i32, <4 x i32>, <4 x i1>, i32) |
| |
| define signext i32 @vpreduce_umin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_umin_v4i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma |
| ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i32 @llvm.vp.reduce.umin.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) |
| ret i32 %r |
| } |
| |
| declare i32 @llvm.vp.reduce.smin.v4i32(i32, <4 x i32>, <4 x i1>, i32) |
| |
| define signext i32 @vpreduce_smin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_smin_v4i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma |
| ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i32 @llvm.vp.reduce.smin.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) |
| ret i32 %r |
| } |
| |
| declare i32 @llvm.vp.reduce.and.v4i32(i32, <4 x i32>, <4 x i1>, i32) |
| |
| define signext i32 @vpreduce_and_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_and_v4i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma |
| ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i32 @llvm.vp.reduce.and.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) |
| ret i32 %r |
| } |
| |
| declare i32 @llvm.vp.reduce.or.v4i32(i32, <4 x i32>, <4 x i1>, i32) |
| |
| define signext i32 @vpreduce_or_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_or_v4i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma |
| ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i32 @llvm.vp.reduce.or.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) |
| ret i32 %r |
| } |
| |
| declare i32 @llvm.vp.reduce.xor.v4i32(i32, <4 x i32>, <4 x i1>, i32) |
| |
| define signext i32 @vpreduce_xor_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_xor_v4i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma |
| ; CHECK-NEXT: vmv.s.x v9, a0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma |
| ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v9 |
| ; CHECK-NEXT: ret |
| %r = call i32 @llvm.vp.reduce.xor.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) |
| ret i32 %r |
| } |
| |
| declare i32 @llvm.vp.reduce.xor.v64i32(i32, <64 x i32>, <64 x i1>, i32) |
| |
| define signext i32 @vpreduce_xor_v64i32(i32 signext %s, <64 x i32> %v, <64 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: vpreduce_xor_v64i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma |
| ; CHECK-NEXT: li a3, 32 |
| ; CHECK-NEXT: vslidedown.vi v24, v0, 4 |
| ; CHECK-NEXT: mv a2, a1 |
| ; CHECK-NEXT: bltu a1, a3, .LBB49_2 |
| ; CHECK-NEXT: # %bb.1: |
| ; CHECK-NEXT: li a2, 32 |
| ; CHECK-NEXT: .LBB49_2: |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vmv.s.x v25, a0 |
| ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma |
| ; CHECK-NEXT: vredxor.vs v25, v8, v25, v0.t |
| ; CHECK-NEXT: addi a0, a1, -32 |
| ; CHECK-NEXT: sltu a1, a1, a0 |
| ; CHECK-NEXT: addi a1, a1, -1 |
| ; CHECK-NEXT: and a0, a1, a0 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma |
| ; CHECK-NEXT: vmv1r.v v0, v24 |
| ; CHECK-NEXT: vredxor.vs v25, v16, v25, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v25 |
| ; CHECK-NEXT: ret |
| %r = call i32 @llvm.vp.reduce.xor.v64i32(i32 %s, <64 x i32> %v, <64 x i1> %m, i32 %evl) |
| ret i32 %r |
| } |
| |
| declare i64 @llvm.vp.reduce.add.v2i64(i64, <2 x i64>, <2 x i1>, i32) |
| |
| define signext i64 @vpreduce_add_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_add_v2i64: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw a1, 12(sp) |
| ; RV32-NEXT: sw a0, 8(sp) |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vlse64.v v9, (a0), zero |
| ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma |
| ; RV32-NEXT: vredsum.vs v9, v8, v9, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v9 |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vsrl.vx v8, v9, a1 |
| ; RV32-NEXT: vmv.x.s a1, v8 |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_add_v2i64: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v9, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma |
| ; RV64-NEXT: vredsum.vs v9, v8, v9, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v9 |
| ; RV64-NEXT: ret |
| %r = call i64 @llvm.vp.reduce.add.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) |
| ret i64 %r |
| } |
| |
| declare i64 @llvm.vp.reduce.umax.v2i64(i64, <2 x i64>, <2 x i1>, i32) |
| |
| define signext i64 @vpreduce_umax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_umax_v2i64: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw a1, 12(sp) |
| ; RV32-NEXT: sw a0, 8(sp) |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vlse64.v v9, (a0), zero |
| ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma |
| ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v9 |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vsrl.vx v8, v9, a1 |
| ; RV32-NEXT: vmv.x.s a1, v8 |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_umax_v2i64: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v9, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma |
| ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v9 |
| ; RV64-NEXT: ret |
| %r = call i64 @llvm.vp.reduce.umax.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) |
| ret i64 %r |
| } |
| |
| declare i64 @llvm.vp.reduce.smax.v2i64(i64, <2 x i64>, <2 x i1>, i32) |
| |
| define signext i64 @vpreduce_smax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_smax_v2i64: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw a1, 12(sp) |
| ; RV32-NEXT: sw a0, 8(sp) |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vlse64.v v9, (a0), zero |
| ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma |
| ; RV32-NEXT: vredmax.vs v9, v8, v9, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v9 |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vsrl.vx v8, v9, a1 |
| ; RV32-NEXT: vmv.x.s a1, v8 |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_smax_v2i64: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v9, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma |
| ; RV64-NEXT: vredmax.vs v9, v8, v9, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v9 |
| ; RV64-NEXT: ret |
| %r = call i64 @llvm.vp.reduce.smax.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) |
| ret i64 %r |
| } |
| |
| declare i64 @llvm.vp.reduce.umin.v2i64(i64, <2 x i64>, <2 x i1>, i32) |
| |
| define signext i64 @vpreduce_umin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_umin_v2i64: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw a1, 12(sp) |
| ; RV32-NEXT: sw a0, 8(sp) |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vlse64.v v9, (a0), zero |
| ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma |
| ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v9 |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vsrl.vx v8, v9, a1 |
| ; RV32-NEXT: vmv.x.s a1, v8 |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_umin_v2i64: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v9, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma |
| ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v9 |
| ; RV64-NEXT: ret |
| %r = call i64 @llvm.vp.reduce.umin.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) |
| ret i64 %r |
| } |
| |
| declare i64 @llvm.vp.reduce.smin.v2i64(i64, <2 x i64>, <2 x i1>, i32) |
| |
| define signext i64 @vpreduce_smin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_smin_v2i64: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw a1, 12(sp) |
| ; RV32-NEXT: sw a0, 8(sp) |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vlse64.v v9, (a0), zero |
| ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma |
| ; RV32-NEXT: vredmin.vs v9, v8, v9, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v9 |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vsrl.vx v8, v9, a1 |
| ; RV32-NEXT: vmv.x.s a1, v8 |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_smin_v2i64: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v9, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma |
| ; RV64-NEXT: vredmin.vs v9, v8, v9, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v9 |
| ; RV64-NEXT: ret |
| %r = call i64 @llvm.vp.reduce.smin.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) |
| ret i64 %r |
| } |
| |
| declare i64 @llvm.vp.reduce.and.v2i64(i64, <2 x i64>, <2 x i1>, i32) |
| |
| define signext i64 @vpreduce_and_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_and_v2i64: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw a1, 12(sp) |
| ; RV32-NEXT: sw a0, 8(sp) |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vlse64.v v9, (a0), zero |
| ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma |
| ; RV32-NEXT: vredand.vs v9, v8, v9, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v9 |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vsrl.vx v8, v9, a1 |
| ; RV32-NEXT: vmv.x.s a1, v8 |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_and_v2i64: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v9, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma |
| ; RV64-NEXT: vredand.vs v9, v8, v9, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v9 |
| ; RV64-NEXT: ret |
| %r = call i64 @llvm.vp.reduce.and.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) |
| ret i64 %r |
| } |
| |
| declare i64 @llvm.vp.reduce.or.v2i64(i64, <2 x i64>, <2 x i1>, i32) |
| |
| define signext i64 @vpreduce_or_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_or_v2i64: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw a1, 12(sp) |
| ; RV32-NEXT: sw a0, 8(sp) |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vlse64.v v9, (a0), zero |
| ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma |
| ; RV32-NEXT: vredor.vs v9, v8, v9, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v9 |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vsrl.vx v8, v9, a1 |
| ; RV32-NEXT: vmv.x.s a1, v8 |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_or_v2i64: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v9, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma |
| ; RV64-NEXT: vredor.vs v9, v8, v9, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v9 |
| ; RV64-NEXT: ret |
| %r = call i64 @llvm.vp.reduce.or.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) |
| ret i64 %r |
| } |
| |
| declare i64 @llvm.vp.reduce.xor.v2i64(i64, <2 x i64>, <2 x i1>, i32) |
| |
| define signext i64 @vpreduce_xor_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_xor_v2i64: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw a1, 12(sp) |
| ; RV32-NEXT: sw a0, 8(sp) |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vlse64.v v9, (a0), zero |
| ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma |
| ; RV32-NEXT: vredxor.vs v9, v8, v9, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v9 |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vsrl.vx v8, v9, a1 |
| ; RV32-NEXT: vmv.x.s a1, v8 |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_xor_v2i64: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v9, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma |
| ; RV64-NEXT: vredxor.vs v9, v8, v9, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v9 |
| ; RV64-NEXT: ret |
| %r = call i64 @llvm.vp.reduce.xor.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) |
| ret i64 %r |
| } |
| |
| declare i64 @llvm.vp.reduce.add.v4i64(i64, <4 x i64>, <4 x i1>, i32) |
| |
| define signext i64 @vpreduce_add_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_add_v4i64: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw a1, 12(sp) |
| ; RV32-NEXT: sw a0, 8(sp) |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vlse64.v v10, (a0), zero |
| ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma |
| ; RV32-NEXT: vredsum.vs v10, v8, v10, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v10 |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vsrl.vx v8, v10, a1 |
| ; RV32-NEXT: vmv.x.s a1, v8 |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_add_v4i64: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v10, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma |
| ; RV64-NEXT: vredsum.vs v10, v8, v10, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v10 |
| ; RV64-NEXT: ret |
| %r = call i64 @llvm.vp.reduce.add.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) |
| ret i64 %r |
| } |
| |
| declare i64 @llvm.vp.reduce.umax.v4i64(i64, <4 x i64>, <4 x i1>, i32) |
| |
| define signext i64 @vpreduce_umax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_umax_v4i64: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw a1, 12(sp) |
| ; RV32-NEXT: sw a0, 8(sp) |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vlse64.v v10, (a0), zero |
| ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma |
| ; RV32-NEXT: vredmaxu.vs v10, v8, v10, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v10 |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vsrl.vx v8, v10, a1 |
| ; RV32-NEXT: vmv.x.s a1, v8 |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_umax_v4i64: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v10, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma |
| ; RV64-NEXT: vredmaxu.vs v10, v8, v10, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v10 |
| ; RV64-NEXT: ret |
| %r = call i64 @llvm.vp.reduce.umax.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) |
| ret i64 %r |
| } |
| |
| declare i64 @llvm.vp.reduce.smax.v4i64(i64, <4 x i64>, <4 x i1>, i32) |
| |
| define signext i64 @vpreduce_smax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_smax_v4i64: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw a1, 12(sp) |
| ; RV32-NEXT: sw a0, 8(sp) |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vlse64.v v10, (a0), zero |
| ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma |
| ; RV32-NEXT: vredmax.vs v10, v8, v10, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v10 |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vsrl.vx v8, v10, a1 |
| ; RV32-NEXT: vmv.x.s a1, v8 |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_smax_v4i64: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v10, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma |
| ; RV64-NEXT: vredmax.vs v10, v8, v10, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v10 |
| ; RV64-NEXT: ret |
| %r = call i64 @llvm.vp.reduce.smax.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) |
| ret i64 %r |
| } |
| |
| declare i64 @llvm.vp.reduce.umin.v4i64(i64, <4 x i64>, <4 x i1>, i32) |
| |
| define signext i64 @vpreduce_umin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_umin_v4i64: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw a1, 12(sp) |
| ; RV32-NEXT: sw a0, 8(sp) |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vlse64.v v10, (a0), zero |
| ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma |
| ; RV32-NEXT: vredminu.vs v10, v8, v10, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v10 |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vsrl.vx v8, v10, a1 |
| ; RV32-NEXT: vmv.x.s a1, v8 |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_umin_v4i64: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v10, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma |
| ; RV64-NEXT: vredminu.vs v10, v8, v10, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v10 |
| ; RV64-NEXT: ret |
| %r = call i64 @llvm.vp.reduce.umin.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) |
| ret i64 %r |
| } |
| |
| declare i64 @llvm.vp.reduce.smin.v4i64(i64, <4 x i64>, <4 x i1>, i32) |
| |
| define signext i64 @vpreduce_smin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_smin_v4i64: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw a1, 12(sp) |
| ; RV32-NEXT: sw a0, 8(sp) |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vlse64.v v10, (a0), zero |
| ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma |
| ; RV32-NEXT: vredmin.vs v10, v8, v10, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v10 |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vsrl.vx v8, v10, a1 |
| ; RV32-NEXT: vmv.x.s a1, v8 |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_smin_v4i64: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v10, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma |
| ; RV64-NEXT: vredmin.vs v10, v8, v10, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v10 |
| ; RV64-NEXT: ret |
| %r = call i64 @llvm.vp.reduce.smin.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) |
| ret i64 %r |
| } |
| |
| declare i64 @llvm.vp.reduce.and.v4i64(i64, <4 x i64>, <4 x i1>, i32) |
| |
| define signext i64 @vpreduce_and_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_and_v4i64: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw a1, 12(sp) |
| ; RV32-NEXT: sw a0, 8(sp) |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vlse64.v v10, (a0), zero |
| ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma |
| ; RV32-NEXT: vredand.vs v10, v8, v10, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v10 |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vsrl.vx v8, v10, a1 |
| ; RV32-NEXT: vmv.x.s a1, v8 |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_and_v4i64: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v10, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma |
| ; RV64-NEXT: vredand.vs v10, v8, v10, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v10 |
| ; RV64-NEXT: ret |
| %r = call i64 @llvm.vp.reduce.and.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) |
| ret i64 %r |
| } |
| |
| declare i64 @llvm.vp.reduce.or.v4i64(i64, <4 x i64>, <4 x i1>, i32) |
| |
| define signext i64 @vpreduce_or_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_or_v4i64: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw a1, 12(sp) |
| ; RV32-NEXT: sw a0, 8(sp) |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vlse64.v v10, (a0), zero |
| ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma |
| ; RV32-NEXT: vredor.vs v10, v8, v10, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v10 |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vsrl.vx v8, v10, a1 |
| ; RV32-NEXT: vmv.x.s a1, v8 |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_or_v4i64: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v10, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma |
| ; RV64-NEXT: vredor.vs v10, v8, v10, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v10 |
| ; RV64-NEXT: ret |
| %r = call i64 @llvm.vp.reduce.or.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) |
| ret i64 %r |
| } |
| |
| declare i64 @llvm.vp.reduce.xor.v4i64(i64, <4 x i64>, <4 x i1>, i32) |
| |
| define signext i64 @vpreduce_xor_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_xor_v4i64: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw a1, 12(sp) |
| ; RV32-NEXT: sw a0, 8(sp) |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vlse64.v v10, (a0), zero |
| ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma |
| ; RV32-NEXT: vredxor.vs v10, v8, v10, v0.t |
| ; RV32-NEXT: vmv.x.s a0, v10 |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vsrl.vx v8, v10, a1 |
| ; RV32-NEXT: vmv.x.s a1, v8 |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_xor_v4i64: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vmv.s.x v10, a0 |
| ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma |
| ; RV64-NEXT: vredxor.vs v10, v8, v10, v0.t |
| ; RV64-NEXT: vmv.x.s a0, v10 |
| ; RV64-NEXT: ret |
| %r = call i64 @llvm.vp.reduce.xor.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) |
| ret i64 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.mul.v1i8(i8, <1 x i8>, <1 x i1>, i32) |
| |
| define i8 @vpreduce_mul_v1i8(i8 %s, <1 x i8> %v, <1 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_mul_v1i8: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a2, a0 |
| ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma |
| ; RV32-NEXT: vmv.s.x v9, a1 |
| ; RV32-NEXT: vmsne.vi v9, v9, 0 |
| ; RV32-NEXT: vmand.mm v0, v9, v0 |
| ; RV32-NEXT: vmv.v.i v9, 1 |
| ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma |
| ; RV32-NEXT: vmerge.vvm v8, v9, v8, v0 |
| ; RV32-NEXT: vmv.x.s a0, v8 |
| ; RV32-NEXT: mv a1, a2 |
| ; RV32-NEXT: call __mulsi3 |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_mul_v1i8: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: mv a2, a0 |
| ; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma |
| ; RV64-NEXT: vmv.s.x v9, a1 |
| ; RV64-NEXT: vmsne.vi v9, v9, 0 |
| ; RV64-NEXT: vmand.mm v0, v9, v0 |
| ; RV64-NEXT: vmv.v.i v9, 1 |
| ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma |
| ; RV64-NEXT: vmerge.vvm v8, v9, v8, v0 |
| ; RV64-NEXT: vmv.x.s a0, v8 |
| ; RV64-NEXT: mv a1, a2 |
| ; RV64-NEXT: call __muldi3 |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.mul.v1i8(i8 %s, <1 x i8> %v, <1 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.mul.v2i8(i8, <2 x i8>, <2 x i1>, i32) |
| |
| define signext i8 @vpreduce_mul_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_mul_v2i8: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a2, a0 |
| ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
| ; RV32-NEXT: vid.v v9 |
| ; RV32-NEXT: vmsltu.vx v9, v9, a1 |
| ; RV32-NEXT: vmand.mm v0, v9, v0 |
| ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma |
| ; RV32-NEXT: vmv.v.i v9, 1 |
| ; RV32-NEXT: vmerge.vvm v8, v9, v8, v0 |
| ; RV32-NEXT: vrgather.vi v9, v8, 1 |
| ; RV32-NEXT: vmul.vv v8, v8, v9 |
| ; RV32-NEXT: vmv.x.s a0, v8 |
| ; RV32-NEXT: mv a1, a2 |
| ; RV32-NEXT: call __mulsi3 |
| ; RV32-NEXT: slli a0, a0, 24 |
| ; RV32-NEXT: srai a0, a0, 24 |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_mul_v2i8: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: mv a2, a0 |
| ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
| ; RV64-NEXT: vid.v v9 |
| ; RV64-NEXT: vmsltu.vx v9, v9, a1 |
| ; RV64-NEXT: vmand.mm v0, v9, v0 |
| ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma |
| ; RV64-NEXT: vmv.v.i v9, 1 |
| ; RV64-NEXT: vmerge.vvm v8, v9, v8, v0 |
| ; RV64-NEXT: vrgather.vi v9, v8, 1 |
| ; RV64-NEXT: vmul.vv v8, v8, v9 |
| ; RV64-NEXT: vmv.x.s a0, v8 |
| ; RV64-NEXT: mv a1, a2 |
| ; RV64-NEXT: call __muldi3 |
| ; RV64-NEXT: slli a0, a0, 56 |
| ; RV64-NEXT: srai a0, a0, 56 |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.mul.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.mul.v4i8(i8, <4 x i8>, <4 x i1>, i32) |
| |
| define signext i8 @vpreduce_mul_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_mul_v4i8: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a2, a0 |
| ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RV32-NEXT: vid.v v9 |
| ; RV32-NEXT: vmsltu.vx v9, v9, a1 |
| ; RV32-NEXT: vmand.mm v0, v9, v0 |
| ; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, ma |
| ; RV32-NEXT: vmv.v.i v9, 1 |
| ; RV32-NEXT: vmerge.vvm v8, v9, v8, v0 |
| ; RV32-NEXT: vslidedown.vi v9, v8, 2 |
| ; RV32-NEXT: vmul.vv v8, v8, v9 |
| ; RV32-NEXT: vrgather.vi v9, v8, 1 |
| ; RV32-NEXT: vmul.vv v8, v8, v9 |
| ; RV32-NEXT: vmv.x.s a0, v8 |
| ; RV32-NEXT: mv a1, a2 |
| ; RV32-NEXT: call __mulsi3 |
| ; RV32-NEXT: slli a0, a0, 24 |
| ; RV32-NEXT: srai a0, a0, 24 |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_mul_v4i8: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: mv a2, a0 |
| ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RV64-NEXT: vid.v v9 |
| ; RV64-NEXT: vmsltu.vx v9, v9, a1 |
| ; RV64-NEXT: vmand.mm v0, v9, v0 |
| ; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, ma |
| ; RV64-NEXT: vmv.v.i v9, 1 |
| ; RV64-NEXT: vmerge.vvm v8, v9, v8, v0 |
| ; RV64-NEXT: vslidedown.vi v9, v8, 2 |
| ; RV64-NEXT: vmul.vv v8, v8, v9 |
| ; RV64-NEXT: vrgather.vi v9, v8, 1 |
| ; RV64-NEXT: vmul.vv v8, v8, v9 |
| ; RV64-NEXT: vmv.x.s a0, v8 |
| ; RV64-NEXT: mv a1, a2 |
| ; RV64-NEXT: call __muldi3 |
| ; RV64-NEXT: slli a0, a0, 56 |
| ; RV64-NEXT: srai a0, a0, 56 |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.mul.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.mul.v8i8(i8, <8 x i8>, <8 x i1>, i32) |
| |
| define signext i8 @vpreduce_mul_v8i8(i8 signext %s, <8 x i8> %v, <8 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_mul_v8i8: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a2, a0 |
| ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; RV32-NEXT: vid.v v10 |
| ; RV32-NEXT: vmsltu.vx v9, v10, a1 |
| ; RV32-NEXT: vmand.mm v0, v9, v0 |
| ; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma |
| ; RV32-NEXT: vmv.v.i v9, 1 |
| ; RV32-NEXT: vmerge.vvm v8, v9, v8, v0 |
| ; RV32-NEXT: vslidedown.vi v9, v8, 4 |
| ; RV32-NEXT: vmul.vv v8, v8, v9 |
| ; RV32-NEXT: vslidedown.vi v9, v8, 2 |
| ; RV32-NEXT: vmul.vv v8, v8, v9 |
| ; RV32-NEXT: vrgather.vi v9, v8, 1 |
| ; RV32-NEXT: vmul.vv v8, v8, v9 |
| ; RV32-NEXT: vmv.x.s a0, v8 |
| ; RV32-NEXT: mv a1, a2 |
| ; RV32-NEXT: call __mulsi3 |
| ; RV32-NEXT: slli a0, a0, 24 |
| ; RV32-NEXT: srai a0, a0, 24 |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_mul_v8i8: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: mv a2, a0 |
| ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; RV64-NEXT: vid.v v10 |
| ; RV64-NEXT: vmsltu.vx v9, v10, a1 |
| ; RV64-NEXT: vmand.mm v0, v9, v0 |
| ; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma |
| ; RV64-NEXT: vmv.v.i v9, 1 |
| ; RV64-NEXT: vmerge.vvm v8, v9, v8, v0 |
| ; RV64-NEXT: vslidedown.vi v9, v8, 4 |
| ; RV64-NEXT: vmul.vv v8, v8, v9 |
| ; RV64-NEXT: vslidedown.vi v9, v8, 2 |
| ; RV64-NEXT: vmul.vv v8, v8, v9 |
| ; RV64-NEXT: vrgather.vi v9, v8, 1 |
| ; RV64-NEXT: vmul.vv v8, v8, v9 |
| ; RV64-NEXT: vmv.x.s a0, v8 |
| ; RV64-NEXT: mv a1, a2 |
| ; RV64-NEXT: call __muldi3 |
| ; RV64-NEXT: slli a0, a0, 56 |
| ; RV64-NEXT: srai a0, a0, 56 |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.mul.v8i8(i8 %s, <8 x i8> %v, <8 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.mul.v16i8(i8, <16 x i8>, <16 x i1>, i32) |
| |
| define signext i8 @vpreduce_mul_v16i8(i8 signext %s, <16 x i8> %v, <16 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_mul_v16i8: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a2, a0 |
| ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma |
| ; RV32-NEXT: vid.v v12 |
| ; RV32-NEXT: vmsltu.vx v9, v12, a1 |
| ; RV32-NEXT: vmand.mm v0, v9, v0 |
| ; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, ma |
| ; RV32-NEXT: vmv.v.i v9, 1 |
| ; RV32-NEXT: vmerge.vvm v8, v9, v8, v0 |
| ; RV32-NEXT: vslidedown.vi v9, v8, 8 |
| ; RV32-NEXT: vmul.vv v8, v8, v9 |
| ; RV32-NEXT: vslidedown.vi v9, v8, 4 |
| ; RV32-NEXT: vmul.vv v8, v8, v9 |
| ; RV32-NEXT: vslidedown.vi v9, v8, 2 |
| ; RV32-NEXT: vmul.vv v8, v8, v9 |
| ; RV32-NEXT: vrgather.vi v9, v8, 1 |
| ; RV32-NEXT: vmul.vv v8, v8, v9 |
| ; RV32-NEXT: vmv.x.s a0, v8 |
| ; RV32-NEXT: mv a1, a2 |
| ; RV32-NEXT: call __mulsi3 |
| ; RV32-NEXT: slli a0, a0, 24 |
| ; RV32-NEXT: srai a0, a0, 24 |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_mul_v16i8: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: mv a2, a0 |
| ; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma |
| ; RV64-NEXT: vid.v v12 |
| ; RV64-NEXT: vmsltu.vx v9, v12, a1 |
| ; RV64-NEXT: vmand.mm v0, v9, v0 |
| ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, ma |
| ; RV64-NEXT: vmv.v.i v9, 1 |
| ; RV64-NEXT: vmerge.vvm v8, v9, v8, v0 |
| ; RV64-NEXT: vslidedown.vi v9, v8, 8 |
| ; RV64-NEXT: vmul.vv v8, v8, v9 |
| ; RV64-NEXT: vslidedown.vi v9, v8, 4 |
| ; RV64-NEXT: vmul.vv v8, v8, v9 |
| ; RV64-NEXT: vslidedown.vi v9, v8, 2 |
| ; RV64-NEXT: vmul.vv v8, v8, v9 |
| ; RV64-NEXT: vrgather.vi v9, v8, 1 |
| ; RV64-NEXT: vmul.vv v8, v8, v9 |
| ; RV64-NEXT: vmv.x.s a0, v8 |
| ; RV64-NEXT: mv a1, a2 |
| ; RV64-NEXT: call __muldi3 |
| ; RV64-NEXT: slli a0, a0, 56 |
| ; RV64-NEXT: srai a0, a0, 56 |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.mul.v16i8(i8 %s, <16 x i8> %v, <16 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.mul.v32i8(i8, <32 x i8>, <32 x i1>, i32) |
| |
| define signext i8 @vpreduce_mul_v32i8(i8 signext %s, <32 x i8> %v, <32 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_mul_v32i8: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a2, a0 |
| ; RV32-NEXT: li a0, 32 |
| ; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma |
| ; RV32-NEXT: vid.v v16 |
| ; RV32-NEXT: vmsltu.vx v10, v16, a1 |
| ; RV32-NEXT: vmand.mm v0, v10, v0 |
| ; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, ma |
| ; RV32-NEXT: vmv.v.i v10, 1 |
| ; RV32-NEXT: vmerge.vvm v8, v10, v8, v0 |
| ; RV32-NEXT: vslidedown.vi v10, v8, 16 |
| ; RV32-NEXT: vmul.vv v8, v8, v10 |
| ; RV32-NEXT: vslidedown.vi v10, v8, 8 |
| ; RV32-NEXT: vmul.vv v8, v8, v10 |
| ; RV32-NEXT: vslidedown.vi v10, v8, 4 |
| ; RV32-NEXT: vmul.vv v8, v8, v10 |
| ; RV32-NEXT: vslidedown.vi v10, v8, 2 |
| ; RV32-NEXT: vmul.vv v8, v8, v10 |
| ; RV32-NEXT: vrgather.vi v10, v8, 1 |
| ; RV32-NEXT: vmul.vv v8, v8, v10 |
| ; RV32-NEXT: vmv.x.s a0, v8 |
| ; RV32-NEXT: mv a1, a2 |
| ; RV32-NEXT: call __mulsi3 |
| ; RV32-NEXT: slli a0, a0, 24 |
| ; RV32-NEXT: srai a0, a0, 24 |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_mul_v32i8: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: mv a2, a0 |
| ; RV64-NEXT: li a0, 32 |
| ; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma |
| ; RV64-NEXT: vid.v v16 |
| ; RV64-NEXT: vmsltu.vx v10, v16, a1 |
| ; RV64-NEXT: vmand.mm v0, v10, v0 |
| ; RV64-NEXT: vsetvli zero, zero, e8, m2, ta, ma |
| ; RV64-NEXT: vmv.v.i v10, 1 |
| ; RV64-NEXT: vmerge.vvm v8, v10, v8, v0 |
| ; RV64-NEXT: vslidedown.vi v10, v8, 16 |
| ; RV64-NEXT: vmul.vv v8, v8, v10 |
| ; RV64-NEXT: vslidedown.vi v10, v8, 8 |
| ; RV64-NEXT: vmul.vv v8, v8, v10 |
| ; RV64-NEXT: vslidedown.vi v10, v8, 4 |
| ; RV64-NEXT: vmul.vv v8, v8, v10 |
| ; RV64-NEXT: vslidedown.vi v10, v8, 2 |
| ; RV64-NEXT: vmul.vv v8, v8, v10 |
| ; RV64-NEXT: vrgather.vi v10, v8, 1 |
| ; RV64-NEXT: vmul.vv v8, v8, v10 |
| ; RV64-NEXT: vmv.x.s a0, v8 |
| ; RV64-NEXT: mv a1, a2 |
| ; RV64-NEXT: call __muldi3 |
| ; RV64-NEXT: slli a0, a0, 56 |
| ; RV64-NEXT: srai a0, a0, 56 |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.mul.v32i8(i8 %s, <32 x i8> %v, <32 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| declare i8 @llvm.vp.reduce.mul.v64i8(i8, <64 x i8>, <64 x i1>, i32) |
| |
| define signext i8 @vpreduce_mul_v64i8(i8 signext %s, <64 x i8> %v, <64 x i1> %m, i32 zeroext %evl) { |
| ; RV32-LABEL: vpreduce_mul_v64i8: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: li a3, 32 |
| ; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma |
| ; RV32-NEXT: lui a2, %hi(.LCPI72_0) |
| ; RV32-NEXT: addi a2, a2, %lo(.LCPI72_0) |
| ; RV32-NEXT: vle8.v v12, (a2) |
| ; RV32-NEXT: mv a2, a0 |
| ; RV32-NEXT: vid.v v16 |
| ; RV32-NEXT: vmsltu.vx v14, v16, a1 |
| ; RV32-NEXT: vsext.vf4 v16, v12 |
| ; RV32-NEXT: vmsltu.vx v12, v16, a1 |
| ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma |
| ; RV32-NEXT: vslideup.vi v14, v12, 4 |
| ; RV32-NEXT: li a0, 64 |
| ; RV32-NEXT: vsetvli zero, a0, e8, m4, ta, ma |
| ; RV32-NEXT: vmand.mm v0, v14, v0 |
| ; RV32-NEXT: vmv.v.i v12, 1 |
| ; RV32-NEXT: vmerge.vvm v8, v12, v8, v0 |
| ; RV32-NEXT: vslidedown.vx v12, v8, a3 |
| ; RV32-NEXT: vmul.vv v8, v8, v12 |
| ; RV32-NEXT: vslidedown.vi v12, v8, 16 |
| ; RV32-NEXT: vmul.vv v8, v8, v12 |
| ; RV32-NEXT: vslidedown.vi v12, v8, 8 |
| ; RV32-NEXT: vmul.vv v8, v8, v12 |
| ; RV32-NEXT: vslidedown.vi v12, v8, 4 |
| ; RV32-NEXT: vmul.vv v8, v8, v12 |
| ; RV32-NEXT: vslidedown.vi v12, v8, 2 |
| ; RV32-NEXT: vmul.vv v8, v8, v12 |
| ; RV32-NEXT: vrgather.vi v12, v8, 1 |
| ; RV32-NEXT: vmul.vv v8, v8, v12 |
| ; RV32-NEXT: vmv.x.s a0, v8 |
| ; RV32-NEXT: mv a1, a2 |
| ; RV32-NEXT: call __mulsi3 |
| ; RV32-NEXT: slli a0, a0, 24 |
| ; RV32-NEXT: srai a0, a0, 24 |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vpreduce_mul_v64i8: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: li a3, 32 |
| ; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma |
| ; RV64-NEXT: lui a2, %hi(.LCPI72_0) |
| ; RV64-NEXT: addi a2, a2, %lo(.LCPI72_0) |
| ; RV64-NEXT: vle8.v v12, (a2) |
| ; RV64-NEXT: mv a2, a0 |
| ; RV64-NEXT: vid.v v16 |
| ; RV64-NEXT: vmsltu.vx v14, v16, a1 |
| ; RV64-NEXT: vsext.vf4 v16, v12 |
| ; RV64-NEXT: vmsltu.vx v12, v16, a1 |
| ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma |
| ; RV64-NEXT: vslideup.vi v14, v12, 4 |
| ; RV64-NEXT: li a0, 64 |
| ; RV64-NEXT: vsetvli zero, a0, e8, m4, ta, ma |
| ; RV64-NEXT: vmand.mm v0, v14, v0 |
| ; RV64-NEXT: vmv.v.i v12, 1 |
| ; RV64-NEXT: vmerge.vvm v8, v12, v8, v0 |
| ; RV64-NEXT: vslidedown.vx v12, v8, a3 |
| ; RV64-NEXT: vmul.vv v8, v8, v12 |
| ; RV64-NEXT: vslidedown.vi v12, v8, 16 |
| ; RV64-NEXT: vmul.vv v8, v8, v12 |
| ; RV64-NEXT: vslidedown.vi v12, v8, 8 |
| ; RV64-NEXT: vmul.vv v8, v8, v12 |
| ; RV64-NEXT: vslidedown.vi v12, v8, 4 |
| ; RV64-NEXT: vmul.vv v8, v8, v12 |
| ; RV64-NEXT: vslidedown.vi v12, v8, 2 |
| ; RV64-NEXT: vmul.vv v8, v8, v12 |
| ; RV64-NEXT: vrgather.vi v12, v8, 1 |
| ; RV64-NEXT: vmul.vv v8, v8, v12 |
| ; RV64-NEXT: vmv.x.s a0, v8 |
| ; RV64-NEXT: mv a1, a2 |
| ; RV64-NEXT: call __muldi3 |
| ; RV64-NEXT: slli a0, a0, 56 |
| ; RV64-NEXT: srai a0, a0, 56 |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| %r = call i8 @llvm.vp.reduce.mul.v64i8(i8 %s, <64 x i8> %v, <64 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| ; Test start value is the first element of a vector. |
| define zeroext i8 @front_ele_v4i8(<4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: front_ele_v4i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma |
| ; CHECK-NEXT: vredand.vs v8, v8, v8, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v8 |
| ; CHECK-NEXT: andi a0, a0, 255 |
| ; CHECK-NEXT: ret |
| %s = extractelement <4 x i8> %v, i64 0 |
| %r = call i8 @llvm.vp.reduce.and.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |
| |
| ; Test start value is the first element of a vector which longer than M1. |
| declare i8 @llvm.vp.reduce.and.v32i8(i8, <32 x i8>, <32 x i1>, i32) |
| define zeroext i8 @front_ele_v32i8(<32 x i8> %v, <32 x i1> %m, i32 zeroext %evl) { |
| ; CHECK-LABEL: front_ele_v32i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma |
| ; CHECK-NEXT: vredand.vs v8, v8, v8, v0.t |
| ; CHECK-NEXT: vmv.x.s a0, v8 |
| ; CHECK-NEXT: andi a0, a0, 255 |
| ; CHECK-NEXT: ret |
| %s = extractelement <32 x i8> %v, i64 0 |
| %r = call i8 @llvm.vp.reduce.and.v32i8(i8 %s, <32 x i8> %v, <32 x i1> %m, i32 %evl) |
| ret i8 %r |
| } |