| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple aarch64-none-linux-gnu < %s | FileCheck %s |
| |
| declare <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8>) nounwind readnone |
| declare <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8>) nounwind readnone |
| declare <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16>) nounwind readnone |
| declare <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32>) nounwind readnone |
| declare <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16>) nounwind readnone |
| |
| declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>) nounwind readnone |
| declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>) nounwind readnone |
| declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) nounwind readnone |
| declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) nounwind readnone |
| declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>) nounwind readnone |
| |
| define i16 @uaddlv4h_from_v8i8(<8 x i8>* %A) nounwind { |
| ; CHECK-LABEL: uaddlv4h_from_v8i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr d0, [x0] |
| ; CHECK-NEXT: uaddlv h0, v0.8b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %tmp1 = load <8 x i8>, <8 x i8>* %A |
| %tmp3 = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> %tmp1) |
| %tmp5 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %tmp3) |
| ret i16 %tmp5 |
| } |
| |
| define i16 @uaddlv16b_from_v16i8(<16 x i8>* %A) nounwind { |
| ; CHECK-LABEL: uaddlv16b_from_v16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr q0, [x0] |
| ; CHECK-NEXT: uaddlv h0, v0.16b |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %tmp1 = load <16 x i8>, <16 x i8>* %A |
| %tmp3 = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> %tmp1) |
| %tmp5 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %tmp3) |
| ret i16 %tmp5 |
| } |
| |
| define i32 @uaddlv8h_from_v8i16(<8 x i16>* %A) nounwind { |
| ; CHECK-LABEL: uaddlv8h_from_v8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr q0, [x0] |
| ; CHECK-NEXT: uaddlv s0, v0.8h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %tmp1 = load <8 x i16>, <8 x i16>* %A |
| %tmp3 = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> %tmp1) |
| %tmp5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp3) |
| ret i32 %tmp5 |
| } |
| |
| define i64 @uaddlv4s_from_v4i32(<4 x i32>* %A) nounwind { |
| ; CHECK-LABEL: uaddlv4s_from_v4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr q0, [x0] |
| ; CHECK-NEXT: uaddlv d0, v0.4s |
| ; CHECK-NEXT: fmov x0, d0 |
| ; CHECK-NEXT: ret |
| %tmp1 = load <4 x i32>, <4 x i32>* %A |
| %tmp3 = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> %tmp1) |
| %tmp5 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %tmp3) |
| ret i64 %tmp5 |
| } |
| |
| define i32 @uaddlv4h_from_v4i16(<4 x i16>* %A) nounwind { |
| ; CHECK-LABEL: uaddlv4h_from_v4i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr d0, [x0] |
| ; CHECK-NEXT: uaddlv s0, v0.4h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| %tmp1 = load <4 x i16>, <4 x i16>* %A |
| %tmp3 = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> %tmp1) |
| %tmp5 = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %tmp3) |
| ret i32 %tmp5 |
| } |