| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s |
| |
| ; ANDV |
| |
| define i8 @andv_nxv16i8(<vscale x 16 x i8> %a) { |
| ; CHECK-LABEL: andv_nxv16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b |
| ; CHECK-NEXT: andv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.and.nxv16i8(<vscale x 16 x i8> %a) |
| ret i8 %res |
| } |
| |
| define i16 @andv_nxv8i16(<vscale x 8 x i16> %a) { |
| ; CHECK-LABEL: andv_nxv8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h |
| ; CHECK-NEXT: andv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.and.nxv8i16(<vscale x 8 x i16> %a) |
| ret i16 %res |
| } |
| |
| define i32 @andv_nxv4i32(<vscale x 4 x i32> %a) { |
| ; CHECK-LABEL: andv_nxv4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s |
| ; CHECK-NEXT: andv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> %a) |
| ret i32 %res |
| } |
| |
| define i64 @andv_nxv2i64(<vscale x 2 x i64> %a) { |
| ; CHECK-LABEL: andv_nxv2i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: andv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64> %a) |
| ret i64 %res |
| } |
| |
| ; ORV |
| |
| define i8 @orv_nxv16i8(<vscale x 16 x i8> %a) { |
| ; CHECK-LABEL: orv_nxv16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b |
| ; CHECK-NEXT: orv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.or.nxv16i8(<vscale x 16 x i8> %a) |
| ret i8 %res |
| } |
| |
| define i16 @orv_nxv8i16(<vscale x 8 x i16> %a) { |
| ; CHECK-LABEL: orv_nxv8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h |
| ; CHECK-NEXT: orv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.or.nxv8i16(<vscale x 8 x i16> %a) |
| ret i16 %res |
| } |
| |
| define i32 @orv_nxv4i32(<vscale x 4 x i32> %a) { |
| ; CHECK-LABEL: orv_nxv4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s |
| ; CHECK-NEXT: orv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> %a) |
| ret i32 %res |
| } |
| |
| define i64 @orv_nxv2i64(<vscale x 2 x i64> %a) { |
| ; CHECK-LABEL: orv_nxv2i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: orv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.or.nxv2i64(<vscale x 2 x i64> %a) |
| ret i64 %res |
| } |
| |
| ; XORV |
| |
| define i8 @xorv_nxv16i8(<vscale x 16 x i8> %a) { |
| ; CHECK-LABEL: xorv_nxv16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b |
| ; CHECK-NEXT: eorv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.xor.nxv16i8(<vscale x 16 x i8> %a) |
| ret i8 %res |
| } |
| |
| define i16 @xorv_nxv8i16(<vscale x 8 x i16> %a) { |
| ; CHECK-LABEL: xorv_nxv8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h |
| ; CHECK-NEXT: eorv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.xor.nxv8i16(<vscale x 8 x i16> %a) |
| ret i16 %res |
| } |
| |
| define i32 @xorv_nxv4i32(<vscale x 4 x i32> %a) { |
| ; CHECK-LABEL: xorv_nxv4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s |
| ; CHECK-NEXT: eorv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> %a) |
| ret i32 %res |
| } |
| |
| define i64 @xorv_nxv2i64(<vscale x 2 x i64> %a) { |
| ; CHECK-LABEL: xorv_nxv2i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: eorv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.xor.nxv2i64(<vscale x 2 x i64> %a) |
| ret i64 %res |
| } |
| |
| ; UADDV |
| |
| define i8 @uaddv_nxv16i8(<vscale x 16 x i8> %a) { |
| ; CHECK-LABEL: uaddv_nxv16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b |
| ; CHECK-NEXT: uaddv d0, p0, z0.b |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.add.nxv16i8(<vscale x 16 x i8> %a) |
| ret i8 %res |
| } |
| |
| define i16 @uaddv_nxv8i16(<vscale x 8 x i16> %a) { |
| ; CHECK-LABEL: uaddv_nxv8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h |
| ; CHECK-NEXT: uaddv d0, p0, z0.h |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.add.nxv8i16(<vscale x 8 x i16> %a) |
| ret i16 %res |
| } |
| |
| define i32 @uaddv_nxv4i32(<vscale x 4 x i32> %a) { |
| ; CHECK-LABEL: uaddv_nxv4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s |
| ; CHECK-NEXT: uaddv d0, p0, z0.s |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %a) |
| ret i32 %res |
| } |
| |
| define i64 @uaddv_nxv2i64(<vscale x 2 x i64> %a) { |
| ; CHECK-LABEL: uaddv_nxv2i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: uaddv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %a) |
| ret i64 %res |
| } |
| |
| ; UMINV |
| |
| define i8 @umin_nxv16i8(<vscale x 16 x i8> %a) { |
| ; CHECK-LABEL: umin_nxv16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b |
| ; CHECK-NEXT: uminv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.umin.nxv16i8(<vscale x 16 x i8> %a) |
| ret i8 %res |
| } |
| |
| define i16 @umin_nxv8i16(<vscale x 8 x i16> %a) { |
| ; CHECK-LABEL: umin_nxv8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h |
| ; CHECK-NEXT: uminv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.umin.nxv8i16(<vscale x 8 x i16> %a) |
| ret i16 %res |
| } |
| |
| define i32 @umin_nxv4i32(<vscale x 4 x i32> %a) { |
| ; CHECK-LABEL: umin_nxv4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s |
| ; CHECK-NEXT: uminv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.umin.nxv4i32(<vscale x 4 x i32> %a) |
| ret i32 %res |
| } |
| |
| define i64 @umin_nxv2i64(<vscale x 2 x i64> %a) { |
| ; CHECK-LABEL: umin_nxv2i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: uminv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.umin.nxv2i64(<vscale x 2 x i64> %a) |
| ret i64 %res |
| } |
| |
| ; SMINV |
| |
| define i8 @smin_nxv16i8(<vscale x 16 x i8> %a) { |
| ; CHECK-LABEL: smin_nxv16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b |
| ; CHECK-NEXT: sminv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.smin.nxv16i8(<vscale x 16 x i8> %a) |
| ret i8 %res |
| } |
| |
| define i16 @smin_nxv8i16(<vscale x 8 x i16> %a) { |
| ; CHECK-LABEL: smin_nxv8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h |
| ; CHECK-NEXT: sminv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.smin.nxv8i16(<vscale x 8 x i16> %a) |
| ret i16 %res |
| } |
| |
| define i32 @smin_nxv4i32(<vscale x 4 x i32> %a) { |
| ; CHECK-LABEL: smin_nxv4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s |
| ; CHECK-NEXT: sminv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> %a) |
| ret i32 %res |
| } |
| |
| define i64 @smin_nxv2i64(<vscale x 2 x i64> %a) { |
| ; CHECK-LABEL: smin_nxv2i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: sminv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.smin.nxv2i64(<vscale x 2 x i64> %a) |
| ret i64 %res |
| } |
| |
| ; UMAXV |
| |
| define i8 @umax_nxv16i8(<vscale x 16 x i8> %a) { |
| ; CHECK-LABEL: umax_nxv16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b |
| ; CHECK-NEXT: umaxv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.umax.nxv16i8(<vscale x 16 x i8> %a) |
| ret i8 %res |
| } |
| |
| define i16 @umax_nxv8i16(<vscale x 8 x i16> %a) { |
| ; CHECK-LABEL: umax_nxv8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h |
| ; CHECK-NEXT: umaxv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.umax.nxv8i16(<vscale x 8 x i16> %a) |
| ret i16 %res |
| } |
| |
| define i32 @umax_nxv4i32(<vscale x 4 x i32> %a) { |
| ; CHECK-LABEL: umax_nxv4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s |
| ; CHECK-NEXT: umaxv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> %a) |
| ret i32 %res |
| } |
| |
| define i64 @umax_nxv2i64(<vscale x 2 x i64> %a) { |
| ; CHECK-LABEL: umax_nxv2i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: umaxv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.umax.nxv2i64(<vscale x 2 x i64> %a) |
| ret i64 %res |
| } |
| |
| ; SMAXV |
| |
| define i8 @smax_nxv16i8(<vscale x 16 x i8> %a) { |
| ; CHECK-LABEL: smax_nxv16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b |
| ; CHECK-NEXT: smaxv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.smax.nxv16i8(<vscale x 16 x i8> %a) |
| ret i8 %res |
| } |
| |
| define i16 @smax_nxv8i16(<vscale x 8 x i16> %a) { |
| ; CHECK-LABEL: smax_nxv8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h |
| ; CHECK-NEXT: smaxv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.smax.nxv8i16(<vscale x 8 x i16> %a) |
| ret i16 %res |
| } |
| |
| define i32 @smax_nxv4i32(<vscale x 4 x i32> %a) { |
| ; CHECK-LABEL: smax_nxv4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s |
| ; CHECK-NEXT: smaxv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.smax.nxv4i32(<vscale x 4 x i32> %a) |
| ret i32 %res |
| } |
| |
| define i64 @smax_nxv2i64(<vscale x 2 x i64> %a) { |
| ; CHECK-LABEL: smax_nxv2i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: smaxv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.smax.nxv2i64(<vscale x 2 x i64> %a) |
| ret i64 %res |
| } |
| |
| declare i8 @llvm.vector.reduce.and.nxv16i8(<vscale x 16 x i8>) |
| declare i16 @llvm.vector.reduce.and.nxv8i16(<vscale x 8 x i16>) |
| declare i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32>) |
| declare i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64>) |
| |
| declare i8 @llvm.vector.reduce.or.nxv16i8(<vscale x 16 x i8>) |
| declare i16 @llvm.vector.reduce.or.nxv8i16(<vscale x 8 x i16>) |
| declare i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32>) |
| declare i64 @llvm.vector.reduce.or.nxv2i64(<vscale x 2 x i64>) |
| |
| declare i8 @llvm.vector.reduce.xor.nxv16i8(<vscale x 16 x i8>) |
| declare i16 @llvm.vector.reduce.xor.nxv8i16(<vscale x 8 x i16>) |
| declare i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32>) |
| declare i64 @llvm.vector.reduce.xor.nxv2i64(<vscale x 2 x i64>) |
| |
| declare i8 @llvm.vector.reduce.add.nxv16i8(<vscale x 16 x i8>) |
| declare i16 @llvm.vector.reduce.add.nxv8i16(<vscale x 8 x i16>) |
| declare i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32>) |
| declare i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64>) |
| |
| declare i8 @llvm.vector.reduce.umin.nxv16i8(<vscale x 16 x i8>) |
| declare i16 @llvm.vector.reduce.umin.nxv8i16(<vscale x 8 x i16>) |
| declare i32 @llvm.vector.reduce.umin.nxv4i32(<vscale x 4 x i32>) |
| declare i64 @llvm.vector.reduce.umin.nxv2i64(<vscale x 2 x i64>) |
| |
| declare i8 @llvm.vector.reduce.smin.nxv16i8(<vscale x 16 x i8>) |
| declare i16 @llvm.vector.reduce.smin.nxv8i16(<vscale x 8 x i16>) |
| declare i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32>) |
| declare i64 @llvm.vector.reduce.smin.nxv2i64(<vscale x 2 x i64>) |
| |
| declare i8 @llvm.vector.reduce.umax.nxv16i8(<vscale x 16 x i8>) |
| declare i16 @llvm.vector.reduce.umax.nxv8i16(<vscale x 8 x i16>) |
| declare i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32>) |
| declare i64 @llvm.vector.reduce.umax.nxv2i64(<vscale x 2 x i64>) |
| |
| declare i8 @llvm.vector.reduce.smax.nxv16i8(<vscale x 16 x i8>) |
| declare i16 @llvm.vector.reduce.smax.nxv8i16(<vscale x 8 x i16>) |
| declare i32 @llvm.vector.reduce.smax.nxv4i32(<vscale x 4 x i32>) |
| declare i64 @llvm.vector.reduce.smax.nxv2i64(<vscale x 2 x i64>) |