| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc -mtriple=aarch64-- -O2 -mattr=+neon < %s | FileCheck %s |
| |
| define <8 x i8> @avgceil_u_i8_to_i16(<8 x i8> %a, <8 x i8> %b) { |
| ; CHECK-LABEL: avgceil_u_i8_to_i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: urhadd v0.8b, v0.8b, v1.8b |
| ; CHECK-NEXT: ret |
| %a16 = zext <8 x i8> %a to <8 x i16> |
| %b16 = zext <8 x i8> %b to <8 x i16> |
| %avg16 = call <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16> %a16, <8 x i16> %b16) |
| %r = trunc <8 x i16> %avg16 to <8 x i8> |
| ret <8 x i8> %r |
| } |
| |
| |
| define <8 x i8> @test_avgceil_s(<8 x i8> %a, <8 x i8> %b) { |
| ; CHECK-LABEL: test_avgceil_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: srhadd v0.8b, v0.8b, v1.8b |
| ; CHECK-NEXT: ret |
| %a16 = sext <8 x i8> %a to <8 x i16> |
| %b16 = sext <8 x i8> %b to <8 x i16> |
| %avg16 = call <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16> %a16, <8 x i16> %b16) |
| %res = trunc <8 x i16> %avg16 to <8 x i8> |
| ret <8 x i8> %res |
| } |
| |
| define <8 x i8> @avgfloor_u_i8_to_i16(<8 x i8> %a, <8 x i8> %b) { |
| ; CHECK-LABEL: avgfloor_u_i8_to_i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uhadd v0.8b, v0.8b, v1.8b |
| ; CHECK-NEXT: ret |
| %a16 = zext <8 x i8> %a to <8 x i16> |
| %b16 = zext <8 x i8> %b to <8 x i16> |
| %avg16 = call <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16> %a16, <8 x i16> %b16) |
| %res = trunc <8 x i16> %avg16 to <8 x i8> |
| ret <8 x i8> %res |
| } |
| |
| define <8 x i8> @test_avgfloor_s(<8 x i8> %a, <8 x i8> %b) { |
| ; CHECK-LABEL: test_avgfloor_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: shadd v0.8b, v0.8b, v1.8b |
| ; CHECK-NEXT: ret |
| %a16 = sext <8 x i8> %a to <8 x i16> |
| %b16 = sext <8 x i8> %b to <8 x i16> |
| %avg16 = call <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16> %a16, <8 x i16> %b16) |
| %res = trunc <8 x i16> %avg16 to <8 x i8> |
| ret <8 x i8> %res |
| } |
| |
| |