| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -aarch64-sve-vector-bits-min=256 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256 |
| ; RUN: llc -aarch64-sve-vector-bits-min=512 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 |
| ; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 |
| |
| target triple = "aarch64-unknown-linux-gnu" |
| |
| ; |
| ; UADDV |
| ; |
| |
| ; Don't use SVE for 64-bit vectors. |
| define i8 @uaddv_v8i8(<8 x i8> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uaddv_v8i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: addv b0, v0.8b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %a) |
| ret i8 %res |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define i8 @uaddv_v16i8(<16 x i8> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uaddv_v16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: addv b0, v0.16b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %a) |
| ret i8 %res |
| } |
| |
| define i8 @uaddv_v32i8(<32 x i8>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uaddv_v32i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl32 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: uaddv d0, p0, z0.b |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; CHECK-NEXT: ret |
| %op = load <32 x i8>, <32 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %op) |
| ret i8 %res |
| } |
| |
| define i8 @uaddv_v64i8(<64 x i8>* %a) #0 { |
| ; VBITS_GE_256-LABEL: uaddv_v64i8: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov w8, #32 |
| ; VBITS_GE_256-NEXT: ptrue p0.b, vl32 |
| ; VBITS_GE_256-NEXT: ld1b { z0.b }, p0/z, [x0, x8] |
| ; VBITS_GE_256-NEXT: ld1b { z1.b }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: add z0.b, z1.b, z0.b |
| ; VBITS_GE_256-NEXT: uaddv d0, p0, z0.b |
| ; VBITS_GE_256-NEXT: fmov x0, d0 |
| ; VBITS_GE_256-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: uaddv_v64i8: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.b, vl64 |
| ; VBITS_GE_512-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: uaddv d0, p0, z0.b |
| ; VBITS_GE_512-NEXT: fmov x0, d0 |
| ; VBITS_GE_512-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <64 x i8>, <64 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.add.v64i8(<64 x i8> %op) |
| ret i8 %res |
| } |
| |
| define i8 @uaddv_v128i8(<128 x i8>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: uaddv_v128i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl128 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: uaddv d0, p0, z0.b |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; CHECK-NEXT: ret |
| %op = load <128 x i8>, <128 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.add.v128i8(<128 x i8> %op) |
| ret i8 %res |
| } |
| |
| define i8 @uaddv_v256i8(<256 x i8>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: uaddv_v256i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl256 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: uaddv d0, p0, z0.b |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; CHECK-NEXT: ret |
| %op = load <256 x i8>, <256 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.add.v256i8(<256 x i8> %op) |
| ret i8 %res |
| } |
| |
| ; Don't use SVE for 64-bit vectors. |
| define i16 @uaddv_v4i16(<4 x i16> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uaddv_v4i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: addv h0, v0.4h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %a) |
| ret i16 %res |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define i16 @uaddv_v8i16(<8 x i16> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uaddv_v8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: addv h0, v0.8h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %a) |
| ret i16 %res |
| } |
| |
| define i16 @uaddv_v16i16(<16 x i16>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uaddv_v16i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h, vl16 |
| ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; CHECK-NEXT: uaddv d0, p0, z0.h |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; CHECK-NEXT: ret |
| %op = load <16 x i16>, <16 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %op) |
| ret i16 %res |
| } |
| |
| define i16 @uaddv_v32i16(<32 x i16>* %a) #0 { |
| ; VBITS_GE_256-LABEL: uaddv_v32i16: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov x8, #16 |
| ; VBITS_GE_256-NEXT: ptrue p0.h, vl16 |
| ; VBITS_GE_256-NEXT: ld1h { z0.h }, p0/z, [x0, x8, lsl #1] |
| ; VBITS_GE_256-NEXT: ld1h { z1.h }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: add z0.h, z1.h, z0.h |
| ; VBITS_GE_256-NEXT: uaddv d0, p0, z0.h |
| ; VBITS_GE_256-NEXT: fmov x0, d0 |
| ; VBITS_GE_256-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: uaddv_v32i16: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.h, vl32 |
| ; VBITS_GE_512-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: uaddv d0, p0, z0.h |
| ; VBITS_GE_512-NEXT: fmov x0, d0 |
| ; VBITS_GE_512-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <32 x i16>, <32 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.add.v32i16(<32 x i16> %op) |
| ret i16 %res |
| } |
| |
| define i16 @uaddv_v64i16(<64 x i16>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: uaddv_v64i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h, vl64 |
| ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; CHECK-NEXT: uaddv d0, p0, z0.h |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; CHECK-NEXT: ret |
| %op = load <64 x i16>, <64 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.add.v64i16(<64 x i16> %op) |
| ret i16 %res |
| } |
| |
| define i16 @uaddv_v128i16(<128 x i16>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: uaddv_v128i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h, vl128 |
| ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; CHECK-NEXT: uaddv d0, p0, z0.h |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; CHECK-NEXT: ret |
| %op = load <128 x i16>, <128 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.add.v128i16(<128 x i16> %op) |
| ret i16 %res |
| } |
| |
| ; Don't use SVE for 64-bit vectors. |
| define i32 @uaddv_v2i32(<2 x i32> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uaddv_v2i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: addp v0.2s, v0.2s, v0.2s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %a) |
| ret i32 %res |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define i32 @uaddv_v4i32(<4 x i32> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uaddv_v4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: addv s0, v0.4s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a) |
| ret i32 %res |
| } |
| |
| define i32 @uaddv_v8i32(<8 x i32>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uaddv_v8i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s, vl8 |
| ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: uaddv d0, p0, z0.s |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; CHECK-NEXT: ret |
| %op = load <8 x i32>, <8 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %op) |
| ret i32 %res |
| } |
| |
| define i32 @uaddv_v16i32(<16 x i32>* %a) #0 { |
| ; VBITS_GE_256-LABEL: uaddv_v16i32: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov x8, #8 |
| ; VBITS_GE_256-NEXT: ptrue p0.s, vl8 |
| ; VBITS_GE_256-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] |
| ; VBITS_GE_256-NEXT: ld1w { z1.s }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: add z0.s, z1.s, z0.s |
| ; VBITS_GE_256-NEXT: uaddv d0, p0, z0.s |
| ; VBITS_GE_256-NEXT: fmov x0, d0 |
| ; VBITS_GE_256-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: uaddv_v16i32: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.s, vl16 |
| ; VBITS_GE_512-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: uaddv d0, p0, z0.s |
| ; VBITS_GE_512-NEXT: fmov x0, d0 |
| ; VBITS_GE_512-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <16 x i32>, <16 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %op) |
| ret i32 %res |
| } |
| |
| define i32 @uaddv_v32i32(<32 x i32>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: uaddv_v32i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s, vl32 |
| ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: uaddv d0, p0, z0.s |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; CHECK-NEXT: ret |
| %op = load <32 x i32>, <32 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %op) |
| ret i32 %res |
| } |
| |
| define i32 @uaddv_v64i32(<64 x i32>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: uaddv_v64i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s, vl64 |
| ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: uaddv d0, p0, z0.s |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 |
| ; CHECK-NEXT: ret |
| %op = load <64 x i32>, <64 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.add.v64i32(<64 x i32> %op) |
| ret i32 %res |
| } |
| |
| ; Nothing to do for single element vectors. |
| define i64 @uaddv_v1i64(<1 x i64> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uaddv_v1i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.add.v1i64(<1 x i64> %a) |
| ret i64 %res |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define i64 @uaddv_v2i64(<2 x i64> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uaddv_v2i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: addp d0, v0.2d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %a) |
| ret i64 %res |
| } |
| |
| define i64 @uaddv_v4i64(<4 x i64>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uaddv_v4i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d, vl4 |
| ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: uaddv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %op = load <4 x i64>, <4 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %op) |
| ret i64 %res |
| } |
| |
| define i64 @uaddv_v8i64(<8 x i64>* %a) #0 { |
| ; VBITS_GE_256-LABEL: uaddv_v8i64: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov x8, #4 |
| ; VBITS_GE_256-NEXT: ptrue p0.d, vl4 |
| ; VBITS_GE_256-NEXT: ld1d { z0.d }, p0/z, [x0, x8, lsl #3] |
| ; VBITS_GE_256-NEXT: ld1d { z1.d }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: add z0.d, z1.d, z0.d |
| ; VBITS_GE_256-NEXT: uaddv d0, p0, z0.d |
| ; VBITS_GE_256-NEXT: fmov x0, d0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: uaddv_v8i64: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.d, vl8 |
| ; VBITS_GE_512-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: uaddv d0, p0, z0.d |
| ; VBITS_GE_512-NEXT: fmov x0, d0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <8 x i64>, <8 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %op) |
| ret i64 %res |
| } |
| |
| define i64 @uaddv_v16i64(<16 x i64>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: uaddv_v16i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d, vl16 |
| ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: uaddv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %op = load <16 x i64>, <16 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %op) |
| ret i64 %res |
| } |
| |
| define i64 @uaddv_v32i64(<32 x i64>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: uaddv_v32i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d, vl32 |
| ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: uaddv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %op = load <32 x i64>, <32 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.add.v32i64(<32 x i64> %op) |
| ret i64 %res |
| } |
| |
| ; |
| ; SMAXV |
| ; |
| |
| ; Don't use SVE for 64-bit vectors. |
| define i8 @smaxv_v8i8(<8 x i8> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: smaxv_v8i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: smaxv b0, v0.8b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> %a) |
| ret i8 %res |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define i8 @smaxv_v16i8(<16 x i8> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: smaxv_v16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: smaxv b0, v0.16b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %a) |
| ret i8 %res |
| } |
| |
| define i8 @smaxv_v32i8(<32 x i8>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: smaxv_v32i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl32 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: smaxv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <32 x i8>, <32 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %op) |
| ret i8 %res |
| } |
| |
| define i8 @smaxv_v64i8(<64 x i8>* %a) #0 { |
| ; VBITS_GE_256-LABEL: smaxv_v64i8: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov w8, #32 |
| ; VBITS_GE_256-NEXT: ptrue p0.b, vl32 |
| ; VBITS_GE_256-NEXT: ld1b { z0.b }, p0/z, [x0, x8] |
| ; VBITS_GE_256-NEXT: ld1b { z1.b }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: smax z0.b, p0/m, z0.b, z1.b |
| ; VBITS_GE_256-NEXT: smaxv b0, p0, z0.b |
| ; VBITS_GE_256-NEXT: fmov w0, s0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: smaxv_v64i8: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.b, vl64 |
| ; VBITS_GE_512-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: smaxv b0, p0, z0.b |
| ; VBITS_GE_512-NEXT: fmov w0, s0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <64 x i8>, <64 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.smax.v64i8(<64 x i8> %op) |
| ret i8 %res |
| } |
| |
| define i8 @smaxv_v128i8(<128 x i8>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: smaxv_v128i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl128 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: smaxv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <128 x i8>, <128 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.smax.v128i8(<128 x i8> %op) |
| ret i8 %res |
| } |
| |
| define i8 @smaxv_v256i8(<256 x i8>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: smaxv_v256i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl256 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: smaxv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <256 x i8>, <256 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.smax.v256i8(<256 x i8> %op) |
| ret i8 %res |
| } |
| |
| ; Don't use SVE for 64-bit vectors. |
| define i16 @smaxv_v4i16(<4 x i16> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: smaxv_v4i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: smaxv h0, v0.4h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.smax.v4i16(<4 x i16> %a) |
| ret i16 %res |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define i16 @smaxv_v8i16(<8 x i16> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: smaxv_v8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: smaxv h0, v0.8h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %a) |
| ret i16 %res |
| } |
| |
| define i16 @smaxv_v16i16(<16 x i16>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: smaxv_v16i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h, vl16 |
| ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; CHECK-NEXT: smaxv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <16 x i16>, <16 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %op) |
| ret i16 %res |
| } |
| |
| define i16 @smaxv_v32i16(<32 x i16>* %a) #0 { |
| ; VBITS_GE_256-LABEL: smaxv_v32i16: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov x8, #16 |
| ; VBITS_GE_256-NEXT: ptrue p0.h, vl16 |
| ; VBITS_GE_256-NEXT: ld1h { z0.h }, p0/z, [x0, x8, lsl #1] |
| ; VBITS_GE_256-NEXT: ld1h { z1.h }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: smax z0.h, p0/m, z0.h, z1.h |
| ; VBITS_GE_256-NEXT: smaxv h0, p0, z0.h |
| ; VBITS_GE_256-NEXT: fmov w0, s0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: smaxv_v32i16: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.h, vl32 |
| ; VBITS_GE_512-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: smaxv h0, p0, z0.h |
| ; VBITS_GE_512-NEXT: fmov w0, s0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <32 x i16>, <32 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.smax.v32i16(<32 x i16> %op) |
| ret i16 %res |
| } |
| |
| define i16 @smaxv_v64i16(<64 x i16>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: smaxv_v64i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h, vl64 |
| ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; CHECK-NEXT: smaxv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <64 x i16>, <64 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.smax.v64i16(<64 x i16> %op) |
| ret i16 %res |
| } |
| |
| define i16 @smaxv_v128i16(<128 x i16>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: smaxv_v128i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h, vl128 |
| ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; CHECK-NEXT: smaxv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <128 x i16>, <128 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.smax.v128i16(<128 x i16> %op) |
| ret i16 %res |
| } |
| |
| ; Don't use SVE for 64-bit vectors. |
| define i32 @smaxv_v2i32(<2 x i32> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: smaxv_v2i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: smaxp v0.2s, v0.2s, v0.2s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.smax.v2i32(<2 x i32> %a) |
| ret i32 %res |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define i32 @smaxv_v4i32(<4 x i32> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: smaxv_v4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: smaxv s0, v0.4s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %a) |
| ret i32 %res |
| } |
| |
| define i32 @smaxv_v8i32(<8 x i32>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: smaxv_v8i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s, vl8 |
| ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: smaxv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <8 x i32>, <8 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %op) |
| ret i32 %res |
| } |
| |
| define i32 @smaxv_v16i32(<16 x i32>* %a) #0 { |
| ; VBITS_GE_256-LABEL: smaxv_v16i32: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov x8, #8 |
| ; VBITS_GE_256-NEXT: ptrue p0.s, vl8 |
| ; VBITS_GE_256-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] |
| ; VBITS_GE_256-NEXT: ld1w { z1.s }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: smax z0.s, p0/m, z0.s, z1.s |
| ; VBITS_GE_256-NEXT: smaxv s0, p0, z0.s |
| ; VBITS_GE_256-NEXT: fmov w0, s0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: smaxv_v16i32: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.s, vl16 |
| ; VBITS_GE_512-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: smaxv s0, p0, z0.s |
| ; VBITS_GE_512-NEXT: fmov w0, s0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <16 x i32>, <16 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> %op) |
| ret i32 %res |
| } |
| |
| define i32 @smaxv_v32i32(<32 x i32>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: smaxv_v32i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s, vl32 |
| ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: smaxv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <32 x i32>, <32 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.smax.v32i32(<32 x i32> %op) |
| ret i32 %res |
| } |
| |
| define i32 @smaxv_v64i32(<64 x i32>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: smaxv_v64i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s, vl64 |
| ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: smaxv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <64 x i32>, <64 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.smax.v64i32(<64 x i32> %op) |
| ret i32 %res |
| } |
| |
| ; Nothing to do for single element vectors. |
| define i64 @smaxv_v1i64(<1 x i64> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: smaxv_v1i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.smax.v1i64(<1 x i64> %a) |
| ret i64 %res |
| } |
| |
| ; No NEON 64-bit vector SMAXV support. Use SVE. |
| define i64 @smaxv_v2i64(<2 x i64> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: smaxv_v2i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| ; CHECK-NEXT: ptrue p0.d, vl2 |
| ; CHECK-NEXT: smaxv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> %a) |
| ret i64 %res |
| } |
| |
| define i64 @smaxv_v4i64(<4 x i64>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: smaxv_v4i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d, vl4 |
| ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: smaxv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %op = load <4 x i64>, <4 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> %op) |
| ret i64 %res |
| } |
| |
| define i64 @smaxv_v8i64(<8 x i64>* %a) #0 { |
| ; VBITS_GE_256-LABEL: smaxv_v8i64: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov x8, #4 |
| ; VBITS_GE_256-NEXT: ptrue p0.d, vl4 |
| ; VBITS_GE_256-NEXT: ld1d { z0.d }, p0/z, [x0, x8, lsl #3] |
| ; VBITS_GE_256-NEXT: ld1d { z1.d }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: smax z0.d, p0/m, z0.d, z1.d |
| ; VBITS_GE_256-NEXT: smaxv d0, p0, z0.d |
| ; VBITS_GE_256-NEXT: fmov x0, d0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: smaxv_v8i64: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.d, vl8 |
| ; VBITS_GE_512-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: smaxv d0, p0, z0.d |
| ; VBITS_GE_512-NEXT: fmov x0, d0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <8 x i64>, <8 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.smax.v8i64(<8 x i64> %op) |
| ret i64 %res |
| } |
| |
| define i64 @smaxv_v16i64(<16 x i64>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: smaxv_v16i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d, vl16 |
| ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: smaxv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %op = load <16 x i64>, <16 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> %op) |
| ret i64 %res |
| } |
| |
| define i64 @smaxv_v32i64(<32 x i64>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: smaxv_v32i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d, vl32 |
| ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: smaxv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %op = load <32 x i64>, <32 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.smax.v32i64(<32 x i64> %op) |
| ret i64 %res |
| } |
| |
| ; |
| ; SMINV |
| ; |
| |
| ; Don't use SVE for 64-bit vectors. |
| define i8 @sminv_v8i8(<8 x i8> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: sminv_v8i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: sminv b0, v0.8b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> %a) |
| ret i8 %res |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define i8 @sminv_v16i8(<16 x i8> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: sminv_v16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: sminv b0, v0.16b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %a) |
| ret i8 %res |
| } |
| |
| define i8 @sminv_v32i8(<32 x i8>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: sminv_v32i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl32 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: sminv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <32 x i8>, <32 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %op) |
| ret i8 %res |
| } |
| |
| define i8 @sminv_v64i8(<64 x i8>* %a) #0 { |
| ; VBITS_GE_256-LABEL: sminv_v64i8: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov w8, #32 |
| ; VBITS_GE_256-NEXT: ptrue p0.b, vl32 |
| ; VBITS_GE_256-NEXT: ld1b { z0.b }, p0/z, [x0, x8] |
| ; VBITS_GE_256-NEXT: ld1b { z1.b }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: smin z0.b, p0/m, z0.b, z1.b |
| ; VBITS_GE_256-NEXT: sminv b0, p0, z0.b |
| ; VBITS_GE_256-NEXT: fmov w0, s0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: sminv_v64i8: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.b, vl64 |
| ; VBITS_GE_512-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: sminv b0, p0, z0.b |
| ; VBITS_GE_512-NEXT: fmov w0, s0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <64 x i8>, <64 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.smin.v64i8(<64 x i8> %op) |
| ret i8 %res |
| } |
| |
| define i8 @sminv_v128i8(<128 x i8>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: sminv_v128i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl128 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: sminv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <128 x i8>, <128 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.smin.v128i8(<128 x i8> %op) |
| ret i8 %res |
| } |
| |
| define i8 @sminv_v256i8(<256 x i8>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: sminv_v256i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl256 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: sminv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <256 x i8>, <256 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.smin.v256i8(<256 x i8> %op) |
| ret i8 %res |
| } |
| |
| ; Don't use SVE for 64-bit vectors. |
| define i16 @sminv_v4i16(<4 x i16> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: sminv_v4i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: sminv h0, v0.4h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.smin.v4i16(<4 x i16> %a) |
| ret i16 %res |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define i16 @sminv_v8i16(<8 x i16> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: sminv_v8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: sminv h0, v0.8h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %a) |
| ret i16 %res |
| } |
| |
| define i16 @sminv_v16i16(<16 x i16>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: sminv_v16i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h, vl16 |
| ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; CHECK-NEXT: sminv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <16 x i16>, <16 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %op) |
| ret i16 %res |
| } |
| |
| define i16 @sminv_v32i16(<32 x i16>* %a) #0 { |
| ; VBITS_GE_256-LABEL: sminv_v32i16: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov x8, #16 |
| ; VBITS_GE_256-NEXT: ptrue p0.h, vl16 |
| ; VBITS_GE_256-NEXT: ld1h { z0.h }, p0/z, [x0, x8, lsl #1] |
| ; VBITS_GE_256-NEXT: ld1h { z1.h }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: smin z0.h, p0/m, z0.h, z1.h |
| ; VBITS_GE_256-NEXT: sminv h0, p0, z0.h |
| ; VBITS_GE_256-NEXT: fmov w0, s0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: sminv_v32i16: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.h, vl32 |
| ; VBITS_GE_512-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: sminv h0, p0, z0.h |
| ; VBITS_GE_512-NEXT: fmov w0, s0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <32 x i16>, <32 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.smin.v32i16(<32 x i16> %op) |
| ret i16 %res |
| } |
| |
| define i16 @sminv_v64i16(<64 x i16>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: sminv_v64i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h, vl64 |
| ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; CHECK-NEXT: sminv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <64 x i16>, <64 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.smin.v64i16(<64 x i16> %op) |
| ret i16 %res |
| } |
| |
| define i16 @sminv_v128i16(<128 x i16>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: sminv_v128i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h, vl128 |
| ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; CHECK-NEXT: sminv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <128 x i16>, <128 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.smin.v128i16(<128 x i16> %op) |
| ret i16 %res |
| } |
| |
| ; Don't use SVE for 64-bit vectors. |
| define i32 @sminv_v2i32(<2 x i32> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: sminv_v2i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: sminp v0.2s, v0.2s, v0.2s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.smin.v2i32(<2 x i32> %a) |
| ret i32 %res |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define i32 @sminv_v4i32(<4 x i32> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: sminv_v4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: sminv s0, v0.4s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %a) |
| ret i32 %res |
| } |
| |
| define i32 @sminv_v8i32(<8 x i32>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: sminv_v8i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s, vl8 |
| ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: sminv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <8 x i32>, <8 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %op) |
| ret i32 %res |
| } |
| |
| define i32 @sminv_v16i32(<16 x i32>* %a) #0 { |
| ; VBITS_GE_256-LABEL: sminv_v16i32: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov x8, #8 |
| ; VBITS_GE_256-NEXT: ptrue p0.s, vl8 |
| ; VBITS_GE_256-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] |
| ; VBITS_GE_256-NEXT: ld1w { z1.s }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: smin z0.s, p0/m, z0.s, z1.s |
| ; VBITS_GE_256-NEXT: sminv s0, p0, z0.s |
| ; VBITS_GE_256-NEXT: fmov w0, s0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: sminv_v16i32: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.s, vl16 |
| ; VBITS_GE_512-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: sminv s0, p0, z0.s |
| ; VBITS_GE_512-NEXT: fmov w0, s0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <16 x i32>, <16 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> %op) |
| ret i32 %res |
| } |
| |
| define i32 @sminv_v32i32(<32 x i32>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: sminv_v32i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s, vl32 |
| ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: sminv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <32 x i32>, <32 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.smin.v32i32(<32 x i32> %op) |
| ret i32 %res |
| } |
| |
| define i32 @sminv_v64i32(<64 x i32>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: sminv_v64i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s, vl64 |
| ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: sminv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <64 x i32>, <64 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.smin.v64i32(<64 x i32> %op) |
| ret i32 %res |
| } |
| |
| ; Nothing to do for single element vectors. |
| define i64 @sminv_v1i64(<1 x i64> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: sminv_v1i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.smin.v1i64(<1 x i64> %a) |
| ret i64 %res |
| } |
| |
| ; No NEON 64-bit vector SMINV support. Use SVE. |
| define i64 @sminv_v2i64(<2 x i64> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: sminv_v2i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| ; CHECK-NEXT: ptrue p0.d, vl2 |
| ; CHECK-NEXT: sminv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.smin.v2i64(<2 x i64> %a) |
| ret i64 %res |
| } |
| |
| define i64 @sminv_v4i64(<4 x i64>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: sminv_v4i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d, vl4 |
| ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: sminv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %op = load <4 x i64>, <4 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> %op) |
| ret i64 %res |
| } |
| |
| define i64 @sminv_v8i64(<8 x i64>* %a) #0 { |
| ; VBITS_GE_256-LABEL: sminv_v8i64: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov x8, #4 |
| ; VBITS_GE_256-NEXT: ptrue p0.d, vl4 |
| ; VBITS_GE_256-NEXT: ld1d { z0.d }, p0/z, [x0, x8, lsl #3] |
| ; VBITS_GE_256-NEXT: ld1d { z1.d }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: smin z0.d, p0/m, z0.d, z1.d |
| ; VBITS_GE_256-NEXT: sminv d0, p0, z0.d |
| ; VBITS_GE_256-NEXT: fmov x0, d0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: sminv_v8i64: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.d, vl8 |
| ; VBITS_GE_512-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: sminv d0, p0, z0.d |
| ; VBITS_GE_512-NEXT: fmov x0, d0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <8 x i64>, <8 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.smin.v8i64(<8 x i64> %op) |
| ret i64 %res |
| } |
| |
| define i64 @sminv_v16i64(<16 x i64>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: sminv_v16i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d, vl16 |
| ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: sminv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %op = load <16 x i64>, <16 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> %op) |
| ret i64 %res |
| } |
| |
| define i64 @sminv_v32i64(<32 x i64>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: sminv_v32i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d, vl32 |
| ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: sminv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %op = load <32 x i64>, <32 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.smin.v32i64(<32 x i64> %op) |
| ret i64 %res |
| } |
| |
| ; |
| ; UMAXV |
| ; |
| |
| ; Don't use SVE for 64-bit vectors. |
| define i8 @umaxv_v8i8(<8 x i8> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: umaxv_v8i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: umaxv b0, v0.8b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> %a) |
| ret i8 %res |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define i8 @umaxv_v16i8(<16 x i8> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: umaxv_v16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: umaxv b0, v0.16b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %a) |
| ret i8 %res |
| } |
| |
| define i8 @umaxv_v32i8(<32 x i8>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: umaxv_v32i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl32 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: umaxv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <32 x i8>, <32 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %op) |
| ret i8 %res |
| } |
| |
| define i8 @umaxv_v64i8(<64 x i8>* %a) #0 { |
| ; VBITS_GE_256-LABEL: umaxv_v64i8: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov w8, #32 |
| ; VBITS_GE_256-NEXT: ptrue p0.b, vl32 |
| ; VBITS_GE_256-NEXT: ld1b { z0.b }, p0/z, [x0, x8] |
| ; VBITS_GE_256-NEXT: ld1b { z1.b }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: umax z0.b, p0/m, z0.b, z1.b |
| ; VBITS_GE_256-NEXT: umaxv b0, p0, z0.b |
| ; VBITS_GE_256-NEXT: fmov w0, s0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: umaxv_v64i8: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.b, vl64 |
| ; VBITS_GE_512-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: umaxv b0, p0, z0.b |
| ; VBITS_GE_512-NEXT: fmov w0, s0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <64 x i8>, <64 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.umax.v64i8(<64 x i8> %op) |
| ret i8 %res |
| } |
| |
| define i8 @umaxv_v128i8(<128 x i8>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: umaxv_v128i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl128 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: umaxv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <128 x i8>, <128 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.umax.v128i8(<128 x i8> %op) |
| ret i8 %res |
| } |
| |
| define i8 @umaxv_v256i8(<256 x i8>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: umaxv_v256i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl256 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: umaxv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <256 x i8>, <256 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.umax.v256i8(<256 x i8> %op) |
| ret i8 %res |
| } |
| |
| ; Don't use SVE for 64-bit vectors. |
| define i16 @umaxv_v4i16(<4 x i16> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: umaxv_v4i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: umaxv h0, v0.4h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> %a) |
| ret i16 %res |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define i16 @umaxv_v8i16(<8 x i16> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: umaxv_v8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: umaxv h0, v0.8h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %a) |
| ret i16 %res |
| } |
| |
| define i16 @umaxv_v16i16(<16 x i16>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: umaxv_v16i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h, vl16 |
| ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; CHECK-NEXT: umaxv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <16 x i16>, <16 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %op) |
| ret i16 %res |
| } |
| |
| define i16 @umaxv_v32i16(<32 x i16>* %a) #0 { |
| ; VBITS_GE_256-LABEL: umaxv_v32i16: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov x8, #16 |
| ; VBITS_GE_256-NEXT: ptrue p0.h, vl16 |
| ; VBITS_GE_256-NEXT: ld1h { z0.h }, p0/z, [x0, x8, lsl #1] |
| ; VBITS_GE_256-NEXT: ld1h { z1.h }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: umax z0.h, p0/m, z0.h, z1.h |
| ; VBITS_GE_256-NEXT: umaxv h0, p0, z0.h |
| ; VBITS_GE_256-NEXT: fmov w0, s0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: umaxv_v32i16: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.h, vl32 |
| ; VBITS_GE_512-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: umaxv h0, p0, z0.h |
| ; VBITS_GE_512-NEXT: fmov w0, s0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <32 x i16>, <32 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.umax.v32i16(<32 x i16> %op) |
| ret i16 %res |
| } |
| |
| define i16 @umaxv_v64i16(<64 x i16>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: umaxv_v64i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h, vl64 |
| ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; CHECK-NEXT: umaxv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <64 x i16>, <64 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.umax.v64i16(<64 x i16> %op) |
| ret i16 %res |
| } |
| |
| define i16 @umaxv_v128i16(<128 x i16>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: umaxv_v128i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h, vl128 |
| ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; CHECK-NEXT: umaxv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <128 x i16>, <128 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.umax.v128i16(<128 x i16> %op) |
| ret i16 %res |
| } |
| |
| ; Don't use SVE for 64-bit vectors. |
| define i32 @umaxv_v2i32(<2 x i32> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: umaxv_v2i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: umaxp v0.2s, v0.2s, v0.2s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> %a) |
| ret i32 %res |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define i32 @umaxv_v4i32(<4 x i32> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: umaxv_v4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: umaxv s0, v0.4s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %a) |
| ret i32 %res |
| } |
| |
| define i32 @umaxv_v8i32(<8 x i32>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: umaxv_v8i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s, vl8 |
| ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: umaxv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <8 x i32>, <8 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %op) |
| ret i32 %res |
| } |
| |
| define i32 @umaxv_v16i32(<16 x i32>* %a) #0 { |
| ; VBITS_GE_256-LABEL: umaxv_v16i32: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov x8, #8 |
| ; VBITS_GE_256-NEXT: ptrue p0.s, vl8 |
| ; VBITS_GE_256-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] |
| ; VBITS_GE_256-NEXT: ld1w { z1.s }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: umax z0.s, p0/m, z0.s, z1.s |
| ; VBITS_GE_256-NEXT: umaxv s0, p0, z0.s |
| ; VBITS_GE_256-NEXT: fmov w0, s0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: umaxv_v16i32: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.s, vl16 |
| ; VBITS_GE_512-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: umaxv s0, p0, z0.s |
| ; VBITS_GE_512-NEXT: fmov w0, s0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <16 x i32>, <16 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> %op) |
| ret i32 %res |
| } |
| |
| define i32 @umaxv_v32i32(<32 x i32>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: umaxv_v32i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s, vl32 |
| ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: umaxv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <32 x i32>, <32 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.umax.v32i32(<32 x i32> %op) |
| ret i32 %res |
| } |
| |
| define i32 @umaxv_v64i32(<64 x i32>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: umaxv_v64i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s, vl64 |
| ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: umaxv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <64 x i32>, <64 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.umax.v64i32(<64 x i32> %op) |
| ret i32 %res |
| } |
| |
| ; Nothing to do for single element vectors. |
| define i64 @umaxv_v1i64(<1 x i64> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: umaxv_v1i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.umax.v1i64(<1 x i64> %a) |
| ret i64 %res |
| } |
| |
| ; No NEON 64-bit vector UMAXV support. Use SVE. |
| define i64 @umaxv_v2i64(<2 x i64> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: umaxv_v2i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| ; CHECK-NEXT: ptrue p0.d, vl2 |
| ; CHECK-NEXT: umaxv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> %a) |
| ret i64 %res |
| } |
| |
| define i64 @umaxv_v4i64(<4 x i64>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: umaxv_v4i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d, vl4 |
| ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: umaxv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %op = load <4 x i64>, <4 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> %op) |
| ret i64 %res |
| } |
| |
| define i64 @umaxv_v8i64(<8 x i64>* %a) #0 { |
| ; VBITS_GE_256-LABEL: umaxv_v8i64: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov x8, #4 |
| ; VBITS_GE_256-NEXT: ptrue p0.d, vl4 |
| ; VBITS_GE_256-NEXT: ld1d { z0.d }, p0/z, [x0, x8, lsl #3] |
| ; VBITS_GE_256-NEXT: ld1d { z1.d }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: umax z0.d, p0/m, z0.d, z1.d |
| ; VBITS_GE_256-NEXT: umaxv d0, p0, z0.d |
| ; VBITS_GE_256-NEXT: fmov x0, d0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: umaxv_v8i64: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.d, vl8 |
| ; VBITS_GE_512-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: umaxv d0, p0, z0.d |
| ; VBITS_GE_512-NEXT: fmov x0, d0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <8 x i64>, <8 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.umax.v8i64(<8 x i64> %op) |
| ret i64 %res |
| } |
| |
| define i64 @umaxv_v16i64(<16 x i64>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: umaxv_v16i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d, vl16 |
| ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: umaxv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %op = load <16 x i64>, <16 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> %op) |
| ret i64 %res |
| } |
| |
| define i64 @umaxv_v32i64(<32 x i64>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: umaxv_v32i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d, vl32 |
| ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: umaxv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %op = load <32 x i64>, <32 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.umax.v32i64(<32 x i64> %op) |
| ret i64 %res |
| } |
| |
| ; |
| ; UMINV |
| ; |
| |
| ; Don't use SVE for 64-bit vectors. |
| define i8 @uminv_v8i8(<8 x i8> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uminv_v8i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uminv b0, v0.8b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> %a) |
| ret i8 %res |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define i8 @uminv_v16i8(<16 x i8> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uminv_v16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uminv b0, v0.16b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %a) |
| ret i8 %res |
| } |
| |
| define i8 @uminv_v32i8(<32 x i8>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uminv_v32i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl32 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: uminv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <32 x i8>, <32 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %op) |
| ret i8 %res |
| } |
| |
| define i8 @uminv_v64i8(<64 x i8>* %a) #0 { |
| ; VBITS_GE_256-LABEL: uminv_v64i8: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov w8, #32 |
| ; VBITS_GE_256-NEXT: ptrue p0.b, vl32 |
| ; VBITS_GE_256-NEXT: ld1b { z0.b }, p0/z, [x0, x8] |
| ; VBITS_GE_256-NEXT: ld1b { z1.b }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: umin z0.b, p0/m, z0.b, z1.b |
| ; VBITS_GE_256-NEXT: uminv b0, p0, z0.b |
| ; VBITS_GE_256-NEXT: fmov w0, s0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: uminv_v64i8: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.b, vl64 |
| ; VBITS_GE_512-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: uminv b0, p0, z0.b |
| ; VBITS_GE_512-NEXT: fmov w0, s0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <64 x i8>, <64 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.umin.v64i8(<64 x i8> %op) |
| ret i8 %res |
| } |
| |
| define i8 @uminv_v128i8(<128 x i8>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: uminv_v128i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl128 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: uminv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <128 x i8>, <128 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.umin.v128i8(<128 x i8> %op) |
| ret i8 %res |
| } |
| |
| define i8 @uminv_v256i8(<256 x i8>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: uminv_v256i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl256 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: uminv b0, p0, z0.b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <256 x i8>, <256 x i8>* %a |
| %res = call i8 @llvm.vector.reduce.umin.v256i8(<256 x i8> %op) |
| ret i8 %res |
| } |
| |
| ; Don't use SVE for 64-bit vectors. |
| define i16 @uminv_v4i16(<4 x i16> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uminv_v4i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uminv h0, v0.4h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.umin.v4i16(<4 x i16> %a) |
| ret i16 %res |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define i16 @uminv_v8i16(<8 x i16> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uminv_v8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uminv h0, v0.8h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %a) |
| ret i16 %res |
| } |
| |
| define i16 @uminv_v16i16(<16 x i16>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uminv_v16i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h, vl16 |
| ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; CHECK-NEXT: uminv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <16 x i16>, <16 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %op) |
| ret i16 %res |
| } |
| |
| define i16 @uminv_v32i16(<32 x i16>* %a) #0 { |
| ; VBITS_GE_256-LABEL: uminv_v32i16: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov x8, #16 |
| ; VBITS_GE_256-NEXT: ptrue p0.h, vl16 |
| ; VBITS_GE_256-NEXT: ld1h { z0.h }, p0/z, [x0, x8, lsl #1] |
| ; VBITS_GE_256-NEXT: ld1h { z1.h }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: umin z0.h, p0/m, z0.h, z1.h |
| ; VBITS_GE_256-NEXT: uminv h0, p0, z0.h |
| ; VBITS_GE_256-NEXT: fmov w0, s0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: uminv_v32i16: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.h, vl32 |
| ; VBITS_GE_512-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: uminv h0, p0, z0.h |
| ; VBITS_GE_512-NEXT: fmov w0, s0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <32 x i16>, <32 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.umin.v32i16(<32 x i16> %op) |
| ret i16 %res |
| } |
| |
| define i16 @uminv_v64i16(<64 x i16>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: uminv_v64i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h, vl64 |
| ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; CHECK-NEXT: uminv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <64 x i16>, <64 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.umin.v64i16(<64 x i16> %op) |
| ret i16 %res |
| } |
| |
| define i16 @uminv_v128i16(<128 x i16>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: uminv_v128i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h, vl128 |
| ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] |
| ; CHECK-NEXT: uminv h0, p0, z0.h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <128 x i16>, <128 x i16>* %a |
| %res = call i16 @llvm.vector.reduce.umin.v128i16(<128 x i16> %op) |
| ret i16 %res |
| } |
| |
| ; Don't use SVE for 64-bit vectors. |
| define i32 @uminv_v2i32(<2 x i32> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uminv_v2i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uminp v0.2s, v0.2s, v0.2s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.umin.v2i32(<2 x i32> %a) |
| ret i32 %res |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define i32 @uminv_v4i32(<4 x i32> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uminv_v4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uminv s0, v0.4s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %res = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %a) |
| ret i32 %res |
| } |
| |
| define i32 @uminv_v8i32(<8 x i32>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uminv_v8i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s, vl8 |
| ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: uminv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <8 x i32>, <8 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %op) |
| ret i32 %res |
| } |
| |
| define i32 @uminv_v16i32(<16 x i32>* %a) #0 { |
| ; VBITS_GE_256-LABEL: uminv_v16i32: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov x8, #8 |
| ; VBITS_GE_256-NEXT: ptrue p0.s, vl8 |
| ; VBITS_GE_256-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] |
| ; VBITS_GE_256-NEXT: ld1w { z1.s }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: umin z0.s, p0/m, z0.s, z1.s |
| ; VBITS_GE_256-NEXT: uminv s0, p0, z0.s |
| ; VBITS_GE_256-NEXT: fmov w0, s0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: uminv_v16i32: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.s, vl16 |
| ; VBITS_GE_512-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: uminv s0, p0, z0.s |
| ; VBITS_GE_512-NEXT: fmov w0, s0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <16 x i32>, <16 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> %op) |
| ret i32 %res |
| } |
| |
| define i32 @uminv_v32i32(<32 x i32>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: uminv_v32i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s, vl32 |
| ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: uminv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <32 x i32>, <32 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.umin.v32i32(<32 x i32> %op) |
| ret i32 %res |
| } |
| |
| define i32 @uminv_v64i32(<64 x i32>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: uminv_v64i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s, vl64 |
| ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: uminv s0, p0, z0.s |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %op = load <64 x i32>, <64 x i32>* %a |
| %res = call i32 @llvm.vector.reduce.umin.v64i32(<64 x i32> %op) |
| ret i32 %res |
| } |
| |
| ; Nothing to do for single element vectors. |
| define i64 @uminv_v1i64(<1 x i64> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uminv_v1i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.umin.v1i64(<1 x i64> %a) |
| ret i64 %res |
| } |
| |
| ; No NEON 64-bit vector UMINV support. Use SVE. |
| define i64 @uminv_v2i64(<2 x i64> %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uminv_v2i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| ; CHECK-NEXT: ptrue p0.d, vl2 |
| ; CHECK-NEXT: uminv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %res = call i64 @llvm.vector.reduce.umin.v2i64(<2 x i64> %a) |
| ret i64 %res |
| } |
| |
| define i64 @uminv_v4i64(<4 x i64>* %a) vscale_range(2,0) #0 { |
| ; CHECK-LABEL: uminv_v4i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d, vl4 |
| ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: uminv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %op = load <4 x i64>, <4 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.umin.v4i64(<4 x i64> %op) |
| ret i64 %res |
| } |
| |
| define i64 @uminv_v8i64(<8 x i64>* %a) #0 { |
| ; VBITS_GE_256-LABEL: uminv_v8i64: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: mov x8, #4 |
| ; VBITS_GE_256-NEXT: ptrue p0.d, vl4 |
| ; VBITS_GE_256-NEXT: ld1d { z0.d }, p0/z, [x0, x8, lsl #3] |
| ; VBITS_GE_256-NEXT: ld1d { z1.d }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: umin z0.d, p0/m, z0.d, z1.d |
| ; VBITS_GE_256-NEXT: uminv d0, p0, z0.d |
| ; VBITS_GE_256-NEXT: fmov x0, d0 |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: uminv_v8i64: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.d, vl8 |
| ; VBITS_GE_512-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: uminv d0, p0, z0.d |
| ; VBITS_GE_512-NEXT: fmov x0, d0 |
| ; VBITS_GE_512-NEXT: ret |
| %op = load <8 x i64>, <8 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> %op) |
| ret i64 %res |
| } |
| |
| define i64 @uminv_v16i64(<16 x i64>* %a) vscale_range(8,0) #0 { |
| ; CHECK-LABEL: uminv_v16i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d, vl16 |
| ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: uminv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %op = load <16 x i64>, <16 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> %op) |
| ret i64 %res |
| } |
| |
| define i64 @uminv_v32i64(<32 x i64>* %a) vscale_range(16,0) #0 { |
| ; CHECK-LABEL: uminv_v32i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d, vl32 |
| ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: uminv d0, p0, z0.d |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %op = load <32 x i64>, <32 x i64>* %a |
| %res = call i64 @llvm.vector.reduce.umin.v32i64(<32 x i64> %op) |
| ret i64 %res |
| } |
| |
| attributes #0 = { "target-features"="+sve" } |
| |
| declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>) |
| declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>) |
| declare i8 @llvm.vector.reduce.add.v32i8(<32 x i8>) |
| declare i8 @llvm.vector.reduce.add.v64i8(<64 x i8>) |
| declare i8 @llvm.vector.reduce.add.v128i8(<128 x i8>) |
| declare i8 @llvm.vector.reduce.add.v256i8(<256 x i8>) |
| |
| declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>) |
| declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>) |
| declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>) |
| declare i16 @llvm.vector.reduce.add.v32i16(<32 x i16>) |
| declare i16 @llvm.vector.reduce.add.v64i16(<64 x i16>) |
| declare i16 @llvm.vector.reduce.add.v128i16(<128 x i16>) |
| |
| declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>) |
| declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) |
| declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>) |
| declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>) |
| declare i32 @llvm.vector.reduce.add.v32i32(<32 x i32>) |
| declare i32 @llvm.vector.reduce.add.v64i32(<64 x i32>) |
| |
| declare i64 @llvm.vector.reduce.add.v1i64(<1 x i64>) |
| declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) |
| declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>) |
| declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64>) |
| declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>) |
| declare i64 @llvm.vector.reduce.add.v32i64(<32 x i64>) |
| |
| declare i8 @llvm.vector.reduce.smax.v8i8(<8 x i8>) |
| declare i8 @llvm.vector.reduce.smax.v16i8(<16 x i8>) |
| declare i8 @llvm.vector.reduce.smax.v32i8(<32 x i8>) |
| declare i8 @llvm.vector.reduce.smax.v64i8(<64 x i8>) |
| declare i8 @llvm.vector.reduce.smax.v128i8(<128 x i8>) |
| declare i8 @llvm.vector.reduce.smax.v256i8(<256 x i8>) |
| |
| declare i16 @llvm.vector.reduce.smax.v4i16(<4 x i16>) |
| declare i16 @llvm.vector.reduce.smax.v8i16(<8 x i16>) |
| declare i16 @llvm.vector.reduce.smax.v16i16(<16 x i16>) |
| declare i16 @llvm.vector.reduce.smax.v32i16(<32 x i16>) |
| declare i16 @llvm.vector.reduce.smax.v64i16(<64 x i16>) |
| declare i16 @llvm.vector.reduce.smax.v128i16(<128 x i16>) |
| |
| declare i32 @llvm.vector.reduce.smax.v2i32(<2 x i32>) |
| declare i32 @llvm.vector.reduce.smax.v4i32(<4 x i32>) |
| declare i32 @llvm.vector.reduce.smax.v8i32(<8 x i32>) |
| declare i32 @llvm.vector.reduce.smax.v16i32(<16 x i32>) |
| declare i32 @llvm.vector.reduce.smax.v32i32(<32 x i32>) |
| declare i32 @llvm.vector.reduce.smax.v64i32(<64 x i32>) |
| |
| declare i64 @llvm.vector.reduce.smax.v1i64(<1 x i64>) |
| declare i64 @llvm.vector.reduce.smax.v2i64(<2 x i64>) |
| declare i64 @llvm.vector.reduce.smax.v4i64(<4 x i64>) |
| declare i64 @llvm.vector.reduce.smax.v8i64(<8 x i64>) |
| declare i64 @llvm.vector.reduce.smax.v16i64(<16 x i64>) |
| declare i64 @llvm.vector.reduce.smax.v32i64(<32 x i64>) |
| |
| declare i8 @llvm.vector.reduce.smin.v8i8(<8 x i8>) |
| declare i8 @llvm.vector.reduce.smin.v16i8(<16 x i8>) |
| declare i8 @llvm.vector.reduce.smin.v32i8(<32 x i8>) |
| declare i8 @llvm.vector.reduce.smin.v64i8(<64 x i8>) |
| declare i8 @llvm.vector.reduce.smin.v128i8(<128 x i8>) |
| declare i8 @llvm.vector.reduce.smin.v256i8(<256 x i8>) |
| |
| declare i16 @llvm.vector.reduce.smin.v4i16(<4 x i16>) |
| declare i16 @llvm.vector.reduce.smin.v8i16(<8 x i16>) |
| declare i16 @llvm.vector.reduce.smin.v16i16(<16 x i16>) |
| declare i16 @llvm.vector.reduce.smin.v32i16(<32 x i16>) |
| declare i16 @llvm.vector.reduce.smin.v64i16(<64 x i16>) |
| declare i16 @llvm.vector.reduce.smin.v128i16(<128 x i16>) |
| |
| declare i32 @llvm.vector.reduce.smin.v2i32(<2 x i32>) |
| declare i32 @llvm.vector.reduce.smin.v4i32(<4 x i32>) |
| declare i32 @llvm.vector.reduce.smin.v8i32(<8 x i32>) |
| declare i32 @llvm.vector.reduce.smin.v16i32(<16 x i32>) |
| declare i32 @llvm.vector.reduce.smin.v32i32(<32 x i32>) |
| declare i32 @llvm.vector.reduce.smin.v64i32(<64 x i32>) |
| |
| declare i64 @llvm.vector.reduce.smin.v1i64(<1 x i64>) |
| declare i64 @llvm.vector.reduce.smin.v2i64(<2 x i64>) |
| declare i64 @llvm.vector.reduce.smin.v4i64(<4 x i64>) |
| declare i64 @llvm.vector.reduce.smin.v8i64(<8 x i64>) |
| declare i64 @llvm.vector.reduce.smin.v16i64(<16 x i64>) |
| declare i64 @llvm.vector.reduce.smin.v32i64(<32 x i64>) |
| |
| declare i8 @llvm.vector.reduce.umax.v8i8(<8 x i8>) |
| declare i8 @llvm.vector.reduce.umax.v16i8(<16 x i8>) |
| declare i8 @llvm.vector.reduce.umax.v32i8(<32 x i8>) |
| declare i8 @llvm.vector.reduce.umax.v64i8(<64 x i8>) |
| declare i8 @llvm.vector.reduce.umax.v128i8(<128 x i8>) |
| declare i8 @llvm.vector.reduce.umax.v256i8(<256 x i8>) |
| |
| declare i16 @llvm.vector.reduce.umax.v4i16(<4 x i16>) |
| declare i16 @llvm.vector.reduce.umax.v8i16(<8 x i16>) |
| declare i16 @llvm.vector.reduce.umax.v16i16(<16 x i16>) |
| declare i16 @llvm.vector.reduce.umax.v32i16(<32 x i16>) |
| declare i16 @llvm.vector.reduce.umax.v64i16(<64 x i16>) |
| declare i16 @llvm.vector.reduce.umax.v128i16(<128 x i16>) |
| |
| declare i32 @llvm.vector.reduce.umax.v2i32(<2 x i32>) |
| declare i32 @llvm.vector.reduce.umax.v4i32(<4 x i32>) |
| declare i32 @llvm.vector.reduce.umax.v8i32(<8 x i32>) |
| declare i32 @llvm.vector.reduce.umax.v16i32(<16 x i32>) |
| declare i32 @llvm.vector.reduce.umax.v32i32(<32 x i32>) |
| declare i32 @llvm.vector.reduce.umax.v64i32(<64 x i32>) |
| |
| declare i64 @llvm.vector.reduce.umax.v1i64(<1 x i64>) |
| declare i64 @llvm.vector.reduce.umax.v2i64(<2 x i64>) |
| declare i64 @llvm.vector.reduce.umax.v4i64(<4 x i64>) |
| declare i64 @llvm.vector.reduce.umax.v8i64(<8 x i64>) |
| declare i64 @llvm.vector.reduce.umax.v16i64(<16 x i64>) |
| declare i64 @llvm.vector.reduce.umax.v32i64(<32 x i64>) |
| |
| declare i8 @llvm.vector.reduce.umin.v8i8(<8 x i8>) |
| declare i8 @llvm.vector.reduce.umin.v16i8(<16 x i8>) |
| declare i8 @llvm.vector.reduce.umin.v32i8(<32 x i8>) |
| declare i8 @llvm.vector.reduce.umin.v64i8(<64 x i8>) |
| declare i8 @llvm.vector.reduce.umin.v128i8(<128 x i8>) |
| declare i8 @llvm.vector.reduce.umin.v256i8(<256 x i8>) |
| |
| declare i16 @llvm.vector.reduce.umin.v4i16(<4 x i16>) |
| declare i16 @llvm.vector.reduce.umin.v8i16(<8 x i16>) |
| declare i16 @llvm.vector.reduce.umin.v16i16(<16 x i16>) |
| declare i16 @llvm.vector.reduce.umin.v32i16(<32 x i16>) |
| declare i16 @llvm.vector.reduce.umin.v64i16(<64 x i16>) |
| declare i16 @llvm.vector.reduce.umin.v128i16(<128 x i16>) |
| |
| declare i32 @llvm.vector.reduce.umin.v2i32(<2 x i32>) |
| declare i32 @llvm.vector.reduce.umin.v4i32(<4 x i32>) |
| declare i32 @llvm.vector.reduce.umin.v8i32(<8 x i32>) |
| declare i32 @llvm.vector.reduce.umin.v16i32(<16 x i32>) |
| declare i32 @llvm.vector.reduce.umin.v32i32(<32 x i32>) |
| declare i32 @llvm.vector.reduce.umin.v64i32(<64 x i32>) |
| |
| declare i64 @llvm.vector.reduce.umin.v1i64(<1 x i64>) |
| declare i64 @llvm.vector.reduce.umin.v2i64(<2 x i64>) |
| declare i64 @llvm.vector.reduce.umin.v4i64(<4 x i64>) |
| declare i64 @llvm.vector.reduce.umin.v8i64(<8 x i64>) |
| declare i64 @llvm.vector.reduce.umin.v16i64(<16 x i64>) |
| declare i64 @llvm.vector.reduce.umin.v32i64(<32 x i64>) |