blob: d9180a28bd40b8bf80ac93f9ff8b85c5959d416f [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64 -aarch64-neon-syntax=generic | FileCheck %s -check-prefixes=CHECK,CHECK-SD
; RUN: llc < %s -mtriple=aarch64 -global-isel=1 -aarch64-neon-syntax=generic | FileCheck %s --check-prefixes=CHECK,CHECK-GI
declare i8 @llvm.vector.reduce.add.v2i8(<2 x i8>)
declare i8 @llvm.vector.reduce.add.v3i8(<3 x i8>)
declare i8 @llvm.vector.reduce.add.v4i8(<4 x i8>)
declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>)
declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>)
declare i8 @llvm.vector.reduce.add.v32i8(<32 x i8>)
declare i16 @llvm.vector.reduce.add.v2i16(<2 x i16>)
declare i16 @llvm.vector.reduce.add.v3i16(<3 x i16>)
declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>)
declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>)
declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>)
declare i32 @llvm.vector.reduce.add.v3i32(<3 x i32>)
declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>)
declare i64 @llvm.vector.reduce.add.v3i64(<3 x i64>)
declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)
declare i128 @llvm.vector.reduce.add.v2i128(<2 x i128>)
define i8 @add_B(ptr %arr) {
; CHECK-LABEL: add_B:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: addv b0, v0.16b
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
%bin.rdx = load <16 x i8>, ptr %arr
%r = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %bin.rdx)
ret i8 %r
}
define i16 @add_H(ptr %arr) {
; CHECK-LABEL: add_H:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: addv h0, v0.8h
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
%bin.rdx = load <8 x i16>, ptr %arr
%r = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %bin.rdx)
ret i16 %r
}
define i32 @add_S( ptr %arr) {
; CHECK-LABEL: add_S:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: addv s0, v0.4s
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
%bin.rdx = load <4 x i32>, ptr %arr
%r = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %bin.rdx)
ret i32 %r
}
define i64 @add_D(ptr %arr) {
; CHECK-LABEL: add_D:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: addp d0, v0.2d
; CHECK-NEXT: fmov x0, d0
; CHECK-NEXT: ret
%bin.rdx = load <2 x i64>, ptr %arr
%r = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %bin.rdx)
ret i64 %r
}
define i32 @oversized_ADDV_256(ptr noalias nocapture readonly %arg1, ptr noalias nocapture readonly %arg2) {
; CHECK-SD-LABEL: oversized_ADDV_256:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: ldr d0, [x0]
; CHECK-SD-NEXT: ldr d1, [x1]
; CHECK-SD-NEXT: uabdl v0.8h, v0.8b, v1.8b
; CHECK-SD-NEXT: uaddlv s0, v0.8h
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: oversized_ADDV_256:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: ldr d1, [x0]
; CHECK-GI-NEXT: ldr d2, [x1]
; CHECK-GI-NEXT: movi v0.2d, #0000000000000000
; CHECK-GI-NEXT: usubl v1.8h, v1.8b, v2.8b
; CHECK-GI-NEXT: sshll v2.4s, v1.4h, #0
; CHECK-GI-NEXT: sshll2 v3.4s, v1.8h, #0
; CHECK-GI-NEXT: ssubw2 v0.4s, v0.4s, v1.8h
; CHECK-GI-NEXT: cmlt v4.4s, v2.4s, #0
; CHECK-GI-NEXT: cmlt v5.4s, v3.4s, #0
; CHECK-GI-NEXT: neg v6.4s, v2.4s
; CHECK-GI-NEXT: mov v1.16b, v4.16b
; CHECK-GI-NEXT: bif v0.16b, v3.16b, v5.16b
; CHECK-GI-NEXT: bsl v1.16b, v6.16b, v2.16b
; CHECK-GI-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
entry:
%0 = load <8 x i8>, ptr %arg1, align 1
%1 = zext <8 x i8> %0 to <8 x i32>
%2 = load <8 x i8>, ptr %arg2, align 1
%3 = zext <8 x i8> %2 to <8 x i32>
%4 = sub nsw <8 x i32> %1, %3
%5 = icmp slt <8 x i32> %4, zeroinitializer
%6 = sub nsw <8 x i32> zeroinitializer, %4
%7 = select <8 x i1> %5, <8 x i32> %6, <8 x i32> %4
%r = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %7)
ret i32 %r
}
declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
define i32 @oversized_ADDV_512(ptr %arr) {
; CHECK-SD-LABEL: oversized_ADDV_512:
; CHECK-SD: // %bb.0:
; CHECK-SD-NEXT: ldp q0, q1, [x0, #32]
; CHECK-SD-NEXT: ldp q2, q3, [x0]
; CHECK-SD-NEXT: add v1.4s, v3.4s, v1.4s
; CHECK-SD-NEXT: add v0.4s, v2.4s, v0.4s
; CHECK-SD-NEXT: add v0.4s, v0.4s, v1.4s
; CHECK-SD-NEXT: addv s0, v0.4s
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: oversized_ADDV_512:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldp q0, q1, [x0]
; CHECK-GI-NEXT: ldp q2, q3, [x0, #32]
; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
; CHECK-GI-NEXT: add v1.4s, v2.4s, v3.4s
; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
%bin.rdx = load <16 x i32>, ptr %arr
%r = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %bin.rdx)
ret i32 %r
}
define i8 @addv_combine_i8(<8 x i8> %a1, <8 x i8> %a2) {
; CHECK-SD-LABEL: addv_combine_i8:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: add v0.8b, v0.8b, v1.8b
; CHECK-SD-NEXT: addv b0, v0.8b
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: addv_combine_i8:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: addv b0, v0.8b
; CHECK-GI-NEXT: addv b1, v1.8b
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: fmov w9, s1
; CHECK-GI-NEXT: add w0, w9, w8, uxtb
; CHECK-GI-NEXT: ret
entry:
%rdx.1 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %a1)
%rdx.2 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %a2)
%r = add i8 %rdx.1, %rdx.2
ret i8 %r
}
define i16 @addv_combine_i16(<4 x i16> %a1, <4 x i16> %a2) {
; CHECK-SD-LABEL: addv_combine_i16:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: add v0.4h, v0.4h, v1.4h
; CHECK-SD-NEXT: addv h0, v0.4h
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: addv_combine_i16:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: addv h0, v0.4h
; CHECK-GI-NEXT: addv h1, v1.4h
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: fmov w9, s1
; CHECK-GI-NEXT: add w0, w9, w8, uxth
; CHECK-GI-NEXT: ret
entry:
%rdx.1 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %a1)
%rdx.2 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %a2)
%r = add i16 %rdx.1, %rdx.2
ret i16 %r
}
define i32 @addv_combine_i32(<4 x i32> %a1, <4 x i32> %a2) {
; CHECK-SD-LABEL: addv_combine_i32:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: add v0.4s, v0.4s, v1.4s
; CHECK-SD-NEXT: addv s0, v0.4s
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: addv_combine_i32:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: addv s1, v1.4s
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: fmov w9, s1
; CHECK-GI-NEXT: add w0, w8, w9
; CHECK-GI-NEXT: ret
entry:
%rdx.1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a1)
%rdx.2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a2)
%r = add i32 %rdx.1, %rdx.2
ret i32 %r
}
define i64 @addv_combine_i64(<2 x i64> %a1, <2 x i64> %a2) {
; CHECK-SD-LABEL: addv_combine_i64:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: add v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: addp d0, v0.2d
; CHECK-SD-NEXT: fmov x0, d0
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: addv_combine_i64:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: addp d0, v0.2d
; CHECK-GI-NEXT: addp d1, v1.2d
; CHECK-GI-NEXT: fmov x8, d0
; CHECK-GI-NEXT: fmov x9, d1
; CHECK-GI-NEXT: add x0, x8, x9
; CHECK-GI-NEXT: ret
entry:
%rdx.1 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %a1)
%rdx.2 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %a2)
%r = add i64 %rdx.1, %rdx.2
ret i64 %r
}
define i8 @addv_v2i8(<2 x i8> %a) {
; CHECK-LABEL: addv_v2i8:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: addp v0.2s, v0.2s, v0.2s
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
%arg1 = call i8 @llvm.vector.reduce.add.v2i8(<2 x i8> %a)
ret i8 %arg1
}
define i8 @addv_v3i8(<3 x i8> %a) {
; CHECK-SD-LABEL: addv_v3i8:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: movi v0.2d, #0000000000000000
; CHECK-SD-NEXT: mov v0.h[0], w0
; CHECK-SD-NEXT: mov v0.h[1], w1
; CHECK-SD-NEXT: mov v0.h[2], w2
; CHECK-SD-NEXT: addv h0, v0.4h
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: addv_v3i8:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: fmov s0, w0
; CHECK-GI-NEXT: mov w8, #0 // =0x0
; CHECK-GI-NEXT: mov v0.h[1], w1
; CHECK-GI-NEXT: mov v0.h[2], w2
; CHECK-GI-NEXT: mov v0.h[3], w8
; CHECK-GI-NEXT: addv h0, v0.4h
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
entry:
%arg1 = call i8 @llvm.vector.reduce.add.v3i8(<3 x i8> %a)
ret i8 %arg1
}
define i8 @addv_v4i8(<4 x i8> %a) {
; CHECK-LABEL: addv_v4i8:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: addv h0, v0.4h
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
%arg1 = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> %a)
ret i8 %arg1
}
define i8 @addv_v8i8(<8 x i8> %a) {
; CHECK-LABEL: addv_v8i8:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: addv b0, v0.8b
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
%arg1 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %a)
ret i8 %arg1
}
define i8 @addv_v16i8(<16 x i8> %a) {
; CHECK-LABEL: addv_v16i8:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: addv b0, v0.16b
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
%arg1 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %a)
ret i8 %arg1
}
define i8 @addv_v32i8(<32 x i8> %a) {
; CHECK-LABEL: addv_v32i8:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: add v0.16b, v0.16b, v1.16b
; CHECK-NEXT: addv b0, v0.16b
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
%arg1 = call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %a)
ret i8 %arg1
}
define i16 @addv_v2i16(<2 x i16> %a) {
; CHECK-LABEL: addv_v2i16:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: addp v0.2s, v0.2s, v0.2s
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
%arg1 = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> %a)
ret i16 %arg1
}
define i16 @addv_v3i16(<3 x i16> %a) {
; CHECK-SD-LABEL: addv_v3i16:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-SD-NEXT: mov v0.h[3], wzr
; CHECK-SD-NEXT: addv h0, v0.4h
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: addv_v3i16:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-GI-NEXT: mov w8, #0 // =0x0
; CHECK-GI-NEXT: mov v0.h[3], w8
; CHECK-GI-NEXT: addv h0, v0.4h
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
entry:
%arg1 = call i16 @llvm.vector.reduce.add.v3i16(<3 x i16> %a)
ret i16 %arg1
}
define i16 @addv_v4i16(<4 x i16> %a) {
; CHECK-LABEL: addv_v4i16:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: addv h0, v0.4h
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
%arg1 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %a)
ret i16 %arg1
}
define i16 @addv_v8i16(<8 x i16> %a) {
; CHECK-LABEL: addv_v8i16:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: addv h0, v0.8h
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
%arg1 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %a)
ret i16 %arg1
}
define i16 @addv_v16i16(<16 x i16> %a) {
; CHECK-LABEL: addv_v16i16:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: add v0.8h, v0.8h, v1.8h
; CHECK-NEXT: addv h0, v0.8h
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
%arg1 = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %a)
ret i16 %arg1
}
define i32 @addv_v2i32(<2 x i32> %a) {
; CHECK-LABEL: addv_v2i32:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: addp v0.2s, v0.2s, v0.2s
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
%arg1 = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %a)
ret i32 %arg1
}
define i32 @addv_v3i32(<3 x i32> %a) {
; CHECK-LABEL: addv_v3i32:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: mov v0.s[3], wzr
; CHECK-NEXT: addv s0, v0.4s
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
%arg1 = call i32 @llvm.vector.reduce.add.v3i32(<3 x i32> %a)
ret i32 %arg1
}
define i32 @addv_v4i32(<4 x i32> %a) {
; CHECK-LABEL: addv_v4i32:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: addv s0, v0.4s
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
%arg1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a)
ret i32 %arg1
}
define i32 @addv_v8i32(<8 x i32> %a) {
; CHECK-LABEL: addv_v8i32:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
; CHECK-NEXT: addv s0, v0.4s
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
%arg1 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %a)
ret i32 %arg1
}
define i64 @addv_v2i64(<2 x i64> %a) {
; CHECK-LABEL: addv_v2i64:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: addp d0, v0.2d
; CHECK-NEXT: fmov x0, d0
; CHECK-NEXT: ret
entry:
%arg1 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %a)
ret i64 %arg1
}
define i64 @addv_v3i64(<3 x i64> %a) {
; CHECK-SD-LABEL: addv_v3i64:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2
; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
; CHECK-SD-NEXT: mov v2.d[1], xzr
; CHECK-SD-NEXT: add v0.2d, v0.2d, v2.2d
; CHECK-SD-NEXT: addp d0, v0.2d
; CHECK-SD-NEXT: fmov x0, d0
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: addv_v3i64:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
; CHECK-GI-NEXT: mov v2.d[1], xzr
; CHECK-GI-NEXT: add v0.2d, v0.2d, v2.2d
; CHECK-GI-NEXT: addp d0, v0.2d
; CHECK-GI-NEXT: fmov x0, d0
; CHECK-GI-NEXT: ret
entry:
%arg1 = call i64 @llvm.vector.reduce.add.v3i64(<3 x i64> %a)
ret i64 %arg1
}
define i64 @addv_v4i64(<4 x i64> %a) {
; CHECK-LABEL: addv_v4i64:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: add v0.2d, v0.2d, v1.2d
; CHECK-NEXT: addp d0, v0.2d
; CHECK-NEXT: fmov x0, d0
; CHECK-NEXT: ret
entry:
%arg1 = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %a)
ret i64 %arg1
}
define i128 @addv_v2i128(<2 x i128> %a) {
; CHECK-LABEL: addv_v2i128:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: adds x0, x0, x2
; CHECK-NEXT: adc x1, x1, x3
; CHECK-NEXT: ret
entry:
%arg1 = call i128 @llvm.vector.reduce.add.v2i128(<2 x i128> %a)
ret i128 %arg1
}
define i16 @addv_zero_lanes_v4i16(ptr %arr) {
; CHECK-SD-LABEL: addv_zero_lanes_v4i16:
; CHECK-SD: // %bb.0:
; CHECK-SD-NEXT: ldrb w0, [x0]
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: addv_zero_lanes_v4i16:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldrb w8, [x0]
; CHECK-GI-NEXT: fmov d0, x8
; CHECK-GI-NEXT: addv h0, v0.4h
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
%v = load i64, ptr %arr
%and = and i64 %v, 255
%vec = bitcast i64 %and to <4 x i16>
%r = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %vec)
ret i16 %r
}
define i8 @addv_zero_lanes_v8i8(ptr %arr) {
; CHECK-SD-LABEL: addv_zero_lanes_v8i8:
; CHECK-SD: // %bb.0:
; CHECK-SD-NEXT: ldrb w0, [x0]
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: addv_zero_lanes_v8i8:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldrb w8, [x0]
; CHECK-GI-NEXT: fmov d0, x8
; CHECK-GI-NEXT: addv b0, v0.8b
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
%v = load i64, ptr %arr
%and = and i64 %v, 255
%vec = bitcast i64 %and to <8 x i8>
%r = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %vec)
ret i8 %r
}
define i8 @addv_zero_lanes_negative_v8i8(ptr %arr) {
; CHECK-LABEL: addv_zero_lanes_negative_v8i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr x8, [x0]
; CHECK-NEXT: and x8, x8, #0x100
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: addv b0, v0.8b
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
%v = load i64, ptr %arr
%and = and i64 %v, 256
%vec = bitcast i64 %and to <8 x i8>
%r = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %vec)
ret i8 %r
}
define i8 @addv_zero_lanes_v16i8(ptr %arr) {
; CHECK-SD-LABEL: addv_zero_lanes_v16i8:
; CHECK-SD: // %bb.0:
; CHECK-SD-NEXT: movi v0.2d, #0000000000000000
; CHECK-SD-NEXT: ldrb w8, [x0]
; CHECK-SD-NEXT: mov v0.d[0], x8
; CHECK-SD-NEXT: addv b0, v0.16b
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: addv_zero_lanes_v16i8:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldrb w8, [x0]
; CHECK-GI-NEXT: mov v0.d[0], x8
; CHECK-GI-NEXT: mov v0.d[1], xzr
; CHECK-GI-NEXT: addv b0, v0.16b
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
%v = load i128, ptr %arr
%and = and i128 %v, 255
%vec = bitcast i128 %and to <16 x i8>
%r = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %vec)
ret i8 %r
}
define i16 @addv_zero_lanes_v8i16(ptr %arr) {
; CHECK-SD-LABEL: addv_zero_lanes_v8i16:
; CHECK-SD: // %bb.0:
; CHECK-SD-NEXT: movi v0.2d, #0000000000000000
; CHECK-SD-NEXT: ldrh w8, [x0]
; CHECK-SD-NEXT: mov v0.d[0], x8
; CHECK-SD-NEXT: addv h0, v0.8h
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: addv_zero_lanes_v8i16:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldrh w8, [x0]
; CHECK-GI-NEXT: mov v0.d[0], x8
; CHECK-GI-NEXT: mov v0.d[1], xzr
; CHECK-GI-NEXT: addv h0, v0.8h
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
%v = load i128, ptr %arr
%and = and i128 %v, u0xFFFF
%vec = bitcast i128 %and to <8 x i16>
%r = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %vec)
ret i16 %r
}
define i32 @addv_zero_lanes_v4i32(ptr %arr) {
; CHECK-SD-LABEL: addv_zero_lanes_v4i32:
; CHECK-SD: // %bb.0:
; CHECK-SD-NEXT: movi v0.2d, #0000000000000000
; CHECK-SD-NEXT: ldr w8, [x0]
; CHECK-SD-NEXT: mov v0.d[0], x8
; CHECK-SD-NEXT: addv s0, v0.4s
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: addv_zero_lanes_v4i32:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldr w8, [x0]
; CHECK-GI-NEXT: mov v0.d[0], x8
; CHECK-GI-NEXT: mov v0.d[1], xzr
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
%v = load i128, ptr %arr
%and = and i128 %v, u0xFFFFFFFF
%vec = bitcast i128 %and to <4 x i32>
%r = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %vec)
ret i32 %r
}
define i32 @addv_zero_lanes_v2i32(ptr %arr) {
; CHECK-SD-LABEL: addv_zero_lanes_v2i32:
; CHECK-SD: // %bb.0:
; CHECK-SD-NEXT: ldr w0, [x0]
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: addv_zero_lanes_v2i32:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldr w8, [x0]
; CHECK-GI-NEXT: fmov d0, x8
; CHECK-GI-NEXT: addp v0.2s, v0.2s, v0.2s
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
%v = load i64, ptr %arr
%and = and i64 %v, u0xFFFFFFFF
%vec = bitcast i64 %and to <2 x i32>
%r = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %vec)
ret i32 %r
}