|  | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
|  | ; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK | 
|  |  | 
|  | declare i1 @llvm.vector.reduce.add.v1i1(<1 x i1> %a) | 
|  | declare i8 @llvm.vector.reduce.add.v1i8(<1 x i8> %a) | 
|  | declare i16 @llvm.vector.reduce.add.v1i16(<1 x i16> %a) | 
|  | declare i24 @llvm.vector.reduce.add.v1i24(<1 x i24> %a) | 
|  | declare i32 @llvm.vector.reduce.add.v1i32(<1 x i32> %a) | 
|  | declare i64 @llvm.vector.reduce.add.v1i64(<1 x i64> %a) | 
|  | declare i128 @llvm.vector.reduce.add.v1i128(<1 x i128> %a) | 
|  |  | 
|  | declare i8 @llvm.vector.reduce.add.v3i8(<3 x i8> %a) | 
|  | declare i8 @llvm.vector.reduce.add.v9i8(<9 x i8> %a) | 
|  | declare i32 @llvm.vector.reduce.add.v3i32(<3 x i32> %a) | 
|  | declare i1 @llvm.vector.reduce.add.v4i1(<4 x i1> %a) | 
|  | declare i24 @llvm.vector.reduce.add.v4i24(<4 x i24> %a) | 
|  | declare i128 @llvm.vector.reduce.add.v2i128(<2 x i128> %a) | 
|  | declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %a) | 
|  |  | 
|  | define i1 @test_v1i1(<1 x i1> %a) nounwind { | 
|  | ; CHECK-LABEL: test_v1i1: | 
|  | ; CHECK:       // %bb.0: | 
|  | ; CHECK-NEXT:    and w0, w0, #0x1 | 
|  | ; CHECK-NEXT:    ret | 
|  | %b = call i1 @llvm.vector.reduce.add.v1i1(<1 x i1> %a) | 
|  | ret i1 %b | 
|  | } | 
|  |  | 
|  | define i8 @test_v1i8(<1 x i8> %a) nounwind { | 
|  | ; CHECK-LABEL: test_v1i8: | 
|  | ; CHECK:       // %bb.0: | 
|  | ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0 | 
|  | ; CHECK-NEXT:    umov w0, v0.b[0] | 
|  | ; CHECK-NEXT:    ret | 
|  | %b = call i8 @llvm.vector.reduce.add.v1i8(<1 x i8> %a) | 
|  | ret i8 %b | 
|  | } | 
|  |  | 
|  | define i16 @test_v1i16(<1 x i16> %a) nounwind { | 
|  | ; CHECK-LABEL: test_v1i16: | 
|  | ; CHECK:       // %bb.0: | 
|  | ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0 | 
|  | ; CHECK-NEXT:    umov w0, v0.h[0] | 
|  | ; CHECK-NEXT:    ret | 
|  | %b = call i16 @llvm.vector.reduce.add.v1i16(<1 x i16> %a) | 
|  | ret i16 %b | 
|  | } | 
|  |  | 
|  | define i24 @test_v1i24(<1 x i24> %a) nounwind { | 
|  | ; CHECK-LABEL: test_v1i24: | 
|  | ; CHECK:       // %bb.0: | 
|  | ; CHECK-NEXT:    ret | 
|  | %b = call i24 @llvm.vector.reduce.add.v1i24(<1 x i24> %a) | 
|  | ret i24 %b | 
|  | } | 
|  |  | 
|  | define i32 @test_v1i32(<1 x i32> %a) nounwind { | 
|  | ; CHECK-LABEL: test_v1i32: | 
|  | ; CHECK:       // %bb.0: | 
|  | ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0 | 
|  | ; CHECK-NEXT:    fmov w0, s0 | 
|  | ; CHECK-NEXT:    ret | 
|  | %b = call i32 @llvm.vector.reduce.add.v1i32(<1 x i32> %a) | 
|  | ret i32 %b | 
|  | } | 
|  |  | 
|  | define i64 @test_v1i64(<1 x i64> %a) nounwind { | 
|  | ; CHECK-LABEL: test_v1i64: | 
|  | ; CHECK:       // %bb.0: | 
|  | ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0 | 
|  | ; CHECK-NEXT:    fmov x0, d0 | 
|  | ; CHECK-NEXT:    ret | 
|  | %b = call i64 @llvm.vector.reduce.add.v1i64(<1 x i64> %a) | 
|  | ret i64 %b | 
|  | } | 
|  |  | 
|  | define i128 @test_v1i128(<1 x i128> %a) nounwind { | 
|  | ; CHECK-LABEL: test_v1i128: | 
|  | ; CHECK:       // %bb.0: | 
|  | ; CHECK-NEXT:    ret | 
|  | %b = call i128 @llvm.vector.reduce.add.v1i128(<1 x i128> %a) | 
|  | ret i128 %b | 
|  | } | 
|  |  | 
|  | define i8 @test_v3i8(<3 x i8> %a) nounwind { | 
|  | ; CHECK-LABEL: test_v3i8: | 
|  | ; CHECK:       // %bb.0: | 
|  | ; CHECK-NEXT:    movi v0.2d, #0000000000000000 | 
|  | ; CHECK-NEXT:    mov v0.h[0], w0 | 
|  | ; CHECK-NEXT:    mov v0.h[1], w1 | 
|  | ; CHECK-NEXT:    mov v0.h[2], w2 | 
|  | ; CHECK-NEXT:    addv h0, v0.4h | 
|  | ; CHECK-NEXT:    fmov w0, s0 | 
|  | ; CHECK-NEXT:    ret | 
|  | %b = call i8 @llvm.vector.reduce.add.v3i8(<3 x i8> %a) | 
|  | ret i8 %b | 
|  | } | 
|  |  | 
|  | define i8 @test_v9i8(<9 x i8> %a) nounwind { | 
|  | ; CHECK-LABEL: test_v9i8: | 
|  | ; CHECK:       // %bb.0: | 
|  | ; CHECK-NEXT:    adrp x8, .LCPI8_0 | 
|  | ; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI8_0] | 
|  | ; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b | 
|  | ; CHECK-NEXT:    addv b0, v0.16b | 
|  | ; CHECK-NEXT:    fmov w0, s0 | 
|  | ; CHECK-NEXT:    ret | 
|  | %b = call i8 @llvm.vector.reduce.add.v9i8(<9 x i8> %a) | 
|  | ret i8 %b | 
|  | } | 
|  |  | 
|  | define i32 @test_v3i32(<3 x i32> %a) nounwind { | 
|  | ; CHECK-LABEL: test_v3i32: | 
|  | ; CHECK:       // %bb.0: | 
|  | ; CHECK-NEXT:    mov v0.s[3], wzr | 
|  | ; CHECK-NEXT:    addv s0, v0.4s | 
|  | ; CHECK-NEXT:    fmov w0, s0 | 
|  | ; CHECK-NEXT:    ret | 
|  | %b = call i32 @llvm.vector.reduce.add.v3i32(<3 x i32> %a) | 
|  | ret i32 %b | 
|  | } | 
|  |  | 
|  | define i1 @test_v4i1(<4 x i1> %a) nounwind { | 
|  | ; CHECK-LABEL: test_v4i1: | 
|  | ; CHECK:       // %bb.0: | 
|  | ; CHECK-NEXT:    addv h0, v0.4h | 
|  | ; CHECK-NEXT:    fmov w8, s0 | 
|  | ; CHECK-NEXT:    and w0, w8, #0x1 | 
|  | ; CHECK-NEXT:    ret | 
|  | %b = call i1 @llvm.vector.reduce.add.v4i1(<4 x i1> %a) | 
|  | ret i1 %b | 
|  | } | 
|  |  | 
|  | define i24 @test_v4i24(<4 x i24> %a) nounwind { | 
|  | ; CHECK-LABEL: test_v4i24: | 
|  | ; CHECK:       // %bb.0: | 
|  | ; CHECK-NEXT:    addv s0, v0.4s | 
|  | ; CHECK-NEXT:    fmov w0, s0 | 
|  | ; CHECK-NEXT:    ret | 
|  | %b = call i24 @llvm.vector.reduce.add.v4i24(<4 x i24> %a) | 
|  | ret i24 %b | 
|  | } | 
|  |  | 
|  | define i128 @test_v2i128(<2 x i128> %a) nounwind { | 
|  | ; CHECK-LABEL: test_v2i128: | 
|  | ; CHECK:       // %bb.0: | 
|  | ; CHECK-NEXT:    adds x0, x0, x2 | 
|  | ; CHECK-NEXT:    adc x1, x1, x3 | 
|  | ; CHECK-NEXT:    ret | 
|  | %b = call i128 @llvm.vector.reduce.add.v2i128(<2 x i128> %a) | 
|  | ret i128 %b | 
|  | } | 
|  |  | 
|  | define i32 @test_v16i32(<16 x i32> %a) nounwind { | 
|  | ; CHECK-LABEL: test_v16i32: | 
|  | ; CHECK:       // %bb.0: | 
|  | ; CHECK-NEXT:    add v1.4s, v1.4s, v3.4s | 
|  | ; CHECK-NEXT:    add v0.4s, v0.4s, v2.4s | 
|  | ; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s | 
|  | ; CHECK-NEXT:    addv s0, v0.4s | 
|  | ; CHECK-NEXT:    fmov w0, s0 | 
|  | ; CHECK-NEXT:    ret | 
|  | %b = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %a) | 
|  | ret i32 %b | 
|  | } |