| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple aarch64-none-linux-gnu < %s | FileCheck %s |
| |
| declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1 immarg) |
| declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>) |
| |
| define i32 @test_sad_v16i8_zext(i8* nocapture readonly %a, i8* nocapture readonly %b) { |
| ; CHECK-LABEL: test_sad_v16i8_zext: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ldr q0, [x0] |
| ; CHECK-NEXT: ldr q1, [x1] |
| ; CHECK-NEXT: uabdl v2.8h, v1.8b, v0.8b |
| ; CHECK-NEXT: uabal2 v2.8h, v1.16b, v0.16b |
| ; CHECK-NEXT: uaddlv s0, v2.8h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| entry: |
| %0 = bitcast i8* %a to <16 x i8>* |
| %1 = load <16 x i8>, <16 x i8>* %0 |
| %2 = zext <16 x i8> %1 to <16 x i32> |
| %3 = bitcast i8* %b to <16 x i8>* |
| %4 = load <16 x i8>, <16 x i8>* %3 |
| %5 = zext <16 x i8> %4 to <16 x i32> |
| %6 = sub nsw <16 x i32> %5, %2 |
| %7 = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %6, i1 true) |
| %8 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %7) |
| ret i32 %8 |
| } |
| |
| define i32 @test_sad_v16i8_sext(i8* nocapture readonly %a, i8* nocapture readonly %b) { |
| ; CHECK-LABEL: test_sad_v16i8_sext: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ldr q0, [x0] |
| ; CHECK-NEXT: ldr q1, [x1] |
| ; CHECK-NEXT: sabdl v2.8h, v1.8b, v0.8b |
| ; CHECK-NEXT: sabal2 v2.8h, v1.16b, v0.16b |
| ; CHECK-NEXT: uaddlv s0, v2.8h |
| ; CHECK-NEXT: fmov w0, s0 |
| ; CHECK-NEXT: ret |
| entry: |
| %0 = bitcast i8* %a to <16 x i8>* |
| %1 = load <16 x i8>, <16 x i8>* %0 |
| %2 = sext <16 x i8> %1 to <16 x i32> |
| %3 = bitcast i8* %b to <16 x i8>* |
| %4 = load <16 x i8>, <16 x i8>* %3 |
| %5 = sext <16 x i8> %4 to <16 x i32> |
| %6 = sub nsw <16 x i32> %5, %2 |
| %7 = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %6, i1 true) |
| %8 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %7) |
| ret i32 %8 |
| } |