| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc -mtriple=aarch64 -mattr=+sve %s -o - | FileCheck %s --check-prefix=CHECK-SVE |
| ; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s --check-prefix=CHECK-SVE2 |
| |
| define <vscale x 2 x i64> @signed_wide_add_nxv4i32(<vscale x 2 x i64> %acc, <vscale x 4 x i32> %input){ |
| ; CHECK-SVE-LABEL: signed_wide_add_nxv4i32: |
| ; CHECK-SVE: // %bb.0: // %entry |
| ; CHECK-SVE-NEXT: sunpklo z2.d, z1.s |
| ; CHECK-SVE-NEXT: sunpkhi z1.d, z1.s |
| ; CHECK-SVE-NEXT: add z0.d, z0.d, z2.d |
| ; CHECK-SVE-NEXT: add z0.d, z0.d, z1.d |
| ; CHECK-SVE-NEXT: ret |
| ; |
| ; CHECK-SVE2-LABEL: signed_wide_add_nxv4i32: |
| ; CHECK-SVE2: // %bb.0: // %entry |
| ; CHECK-SVE2-NEXT: saddwb z0.d, z0.d, z1.s |
| ; CHECK-SVE2-NEXT: saddwt z0.d, z0.d, z1.s |
| ; CHECK-SVE2-NEXT: ret |
| entry: |
| %input.wide = sext <vscale x 4 x i32> %input to <vscale x 4 x i64> |
| %partial.reduce = tail call <vscale x 2 x i64> @llvm.vector.partial.reduce.add.nxv2i64.nxv4i64(<vscale x 2 x i64> %acc, <vscale x 4 x i64> %input.wide) |
| ret <vscale x 2 x i64> %partial.reduce |
| } |
| |
| define <vscale x 2 x i64> @unsigned_wide_add_nxv4i32(<vscale x 2 x i64> %acc, <vscale x 4 x i32> %input){ |
| ; CHECK-SVE-LABEL: unsigned_wide_add_nxv4i32: |
| ; CHECK-SVE: // %bb.0: // %entry |
| ; CHECK-SVE-NEXT: uunpklo z2.d, z1.s |
| ; CHECK-SVE-NEXT: uunpkhi z1.d, z1.s |
| ; CHECK-SVE-NEXT: add z0.d, z0.d, z2.d |
| ; CHECK-SVE-NEXT: add z0.d, z0.d, z1.d |
| ; CHECK-SVE-NEXT: ret |
| ; |
| ; CHECK-SVE2-LABEL: unsigned_wide_add_nxv4i32: |
| ; CHECK-SVE2: // %bb.0: // %entry |
| ; CHECK-SVE2-NEXT: uaddwb z0.d, z0.d, z1.s |
| ; CHECK-SVE2-NEXT: uaddwt z0.d, z0.d, z1.s |
| ; CHECK-SVE2-NEXT: ret |
| entry: |
| %input.wide = zext <vscale x 4 x i32> %input to <vscale x 4 x i64> |
| %partial.reduce = tail call <vscale x 2 x i64> @llvm.vector.partial.reduce.add.nxv2i64.nxv4i64(<vscale x 2 x i64> %acc, <vscale x 4 x i64> %input.wide) |
| ret <vscale x 2 x i64> %partial.reduce |
| } |
| |
| define <vscale x 4 x i32> @signed_wide_add_nxv8i16(<vscale x 4 x i32> %acc, <vscale x 8 x i16> %input){ |
| ; CHECK-SVE-LABEL: signed_wide_add_nxv8i16: |
| ; CHECK-SVE: // %bb.0: // %entry |
| ; CHECK-SVE-NEXT: sunpklo z2.s, z1.h |
| ; CHECK-SVE-NEXT: sunpkhi z1.s, z1.h |
| ; CHECK-SVE-NEXT: add z0.s, z0.s, z2.s |
| ; CHECK-SVE-NEXT: add z0.s, z0.s, z1.s |
| ; CHECK-SVE-NEXT: ret |
| ; |
| ; CHECK-SVE2-LABEL: signed_wide_add_nxv8i16: |
| ; CHECK-SVE2: // %bb.0: // %entry |
| ; CHECK-SVE2-NEXT: saddwb z0.s, z0.s, z1.h |
| ; CHECK-SVE2-NEXT: saddwt z0.s, z0.s, z1.h |
| ; CHECK-SVE2-NEXT: ret |
| entry: |
| %input.wide = sext <vscale x 8 x i16> %input to <vscale x 8 x i32> |
| %partial.reduce = tail call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv8i32(<vscale x 4 x i32> %acc, <vscale x 8 x i32> %input.wide) |
| ret <vscale x 4 x i32> %partial.reduce |
| } |
| |
| define <vscale x 4 x i32> @unsigned_wide_add_nxv8i16(<vscale x 4 x i32> %acc, <vscale x 8 x i16> %input){ |
| ; CHECK-SVE-LABEL: unsigned_wide_add_nxv8i16: |
| ; CHECK-SVE: // %bb.0: // %entry |
| ; CHECK-SVE-NEXT: uunpklo z2.s, z1.h |
| ; CHECK-SVE-NEXT: uunpkhi z1.s, z1.h |
| ; CHECK-SVE-NEXT: add z0.s, z0.s, z2.s |
| ; CHECK-SVE-NEXT: add z0.s, z0.s, z1.s |
| ; CHECK-SVE-NEXT: ret |
| ; |
| ; CHECK-SVE2-LABEL: unsigned_wide_add_nxv8i16: |
| ; CHECK-SVE2: // %bb.0: // %entry |
| ; CHECK-SVE2-NEXT: uaddwb z0.s, z0.s, z1.h |
| ; CHECK-SVE2-NEXT: uaddwt z0.s, z0.s, z1.h |
| ; CHECK-SVE2-NEXT: ret |
| entry: |
| %input.wide = zext <vscale x 8 x i16> %input to <vscale x 8 x i32> |
| %partial.reduce = tail call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv8i32(<vscale x 4 x i32> %acc, <vscale x 8 x i32> %input.wide) |
| ret <vscale x 4 x i32> %partial.reduce |
| } |
| |
| define <vscale x 8 x i16> @signed_wide_add_nxv16i8(<vscale x 8 x i16> %acc, <vscale x 16 x i8> %input){ |
| ; CHECK-SVE-LABEL: signed_wide_add_nxv16i8: |
| ; CHECK-SVE: // %bb.0: // %entry |
| ; CHECK-SVE-NEXT: sunpklo z2.h, z1.b |
| ; CHECK-SVE-NEXT: sunpkhi z1.h, z1.b |
| ; CHECK-SVE-NEXT: add z0.h, z0.h, z2.h |
| ; CHECK-SVE-NEXT: add z0.h, z0.h, z1.h |
| ; CHECK-SVE-NEXT: ret |
| ; |
| ; CHECK-SVE2-LABEL: signed_wide_add_nxv16i8: |
| ; CHECK-SVE2: // %bb.0: // %entry |
| ; CHECK-SVE2-NEXT: saddwb z0.h, z0.h, z1.b |
| ; CHECK-SVE2-NEXT: saddwt z0.h, z0.h, z1.b |
| ; CHECK-SVE2-NEXT: ret |
| entry: |
| %input.wide = sext <vscale x 16 x i8> %input to <vscale x 16 x i16> |
| %partial.reduce = tail call <vscale x 8 x i16> @llvm.vector.partial.reduce.add.nxv8i16.nxv16i16(<vscale x 8 x i16> %acc, <vscale x 16 x i16> %input.wide) |
| ret <vscale x 8 x i16> %partial.reduce |
| } |
| |
| define <vscale x 8 x i16> @unsigned_wide_add_nxv16i8(<vscale x 8 x i16> %acc, <vscale x 16 x i8> %input){ |
| ; CHECK-SVE-LABEL: unsigned_wide_add_nxv16i8: |
| ; CHECK-SVE: // %bb.0: // %entry |
| ; CHECK-SVE-NEXT: uunpklo z2.h, z1.b |
| ; CHECK-SVE-NEXT: uunpkhi z1.h, z1.b |
| ; CHECK-SVE-NEXT: add z0.h, z0.h, z2.h |
| ; CHECK-SVE-NEXT: add z0.h, z0.h, z1.h |
| ; CHECK-SVE-NEXT: ret |
| ; |
| ; CHECK-SVE2-LABEL: unsigned_wide_add_nxv16i8: |
| ; CHECK-SVE2: // %bb.0: // %entry |
| ; CHECK-SVE2-NEXT: uaddwb z0.h, z0.h, z1.b |
| ; CHECK-SVE2-NEXT: uaddwt z0.h, z0.h, z1.b |
| ; CHECK-SVE2-NEXT: ret |
| entry: |
| %input.wide = zext <vscale x 16 x i8> %input to <vscale x 16 x i16> |
| %partial.reduce = tail call <vscale x 8 x i16> @llvm.vector.partial.reduce.add.nxv8i16.nxv16i16(<vscale x 8 x i16> %acc, <vscale x 16 x i16> %input.wide) |
| ret <vscale x 8 x i16> %partial.reduce |
| } |
| |
| define <vscale x 2 x i32> @signed_wide_add_nxv4i16(<vscale x 2 x i32> %acc, <vscale x 4 x i16> %input){ |
| ; CHECK-SVE-LABEL: signed_wide_add_nxv4i16: |
| ; CHECK-SVE: // %bb.0: // %entry |
| ; CHECK-SVE-NEXT: ptrue p0.s |
| ; CHECK-SVE-NEXT: sxth z1.s, p0/m, z1.s |
| ; CHECK-SVE-NEXT: uunpklo z2.d, z1.s |
| ; CHECK-SVE-NEXT: uunpkhi z1.d, z1.s |
| ; CHECK-SVE-NEXT: add z0.d, z0.d, z2.d |
| ; CHECK-SVE-NEXT: add z0.d, z1.d, z0.d |
| ; CHECK-SVE-NEXT: ret |
| ; |
| ; CHECK-SVE2-LABEL: signed_wide_add_nxv4i16: |
| ; CHECK-SVE2: // %bb.0: // %entry |
| ; CHECK-SVE2-NEXT: ptrue p0.s |
| ; CHECK-SVE2-NEXT: sxth z1.s, p0/m, z1.s |
| ; CHECK-SVE2-NEXT: saddwb z0.d, z0.d, z1.s |
| ; CHECK-SVE2-NEXT: saddwt z0.d, z0.d, z1.s |
| ; CHECK-SVE2-NEXT: ret |
| entry: |
| %input.wide = sext <vscale x 4 x i16> %input to <vscale x 4 x i32> |
| %partial.reduce = tail call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv4i32(<vscale x 2 x i32> %acc, <vscale x 4 x i32> %input.wide) |
| ret <vscale x 2 x i32> %partial.reduce |
| } |
| |
| define <vscale x 2 x i32> @unsigned_wide_add_nxv4i16(<vscale x 2 x i32> %acc, <vscale x 4 x i16> %input){ |
| ; CHECK-SVE-LABEL: unsigned_wide_add_nxv4i16: |
| ; CHECK-SVE: // %bb.0: // %entry |
| ; CHECK-SVE-NEXT: and z1.s, z1.s, #0xffff |
| ; CHECK-SVE-NEXT: uunpklo z2.d, z1.s |
| ; CHECK-SVE-NEXT: uunpkhi z1.d, z1.s |
| ; CHECK-SVE-NEXT: add z0.d, z0.d, z2.d |
| ; CHECK-SVE-NEXT: add z0.d, z1.d, z0.d |
| ; CHECK-SVE-NEXT: ret |
| ; |
| ; CHECK-SVE2-LABEL: unsigned_wide_add_nxv4i16: |
| ; CHECK-SVE2: // %bb.0: // %entry |
| ; CHECK-SVE2-NEXT: and z1.s, z1.s, #0xffff |
| ; CHECK-SVE2-NEXT: uaddwb z0.d, z0.d, z1.s |
| ; CHECK-SVE2-NEXT: uaddwt z0.d, z0.d, z1.s |
| ; CHECK-SVE2-NEXT: ret |
| entry: |
| %input.wide = zext <vscale x 4 x i16> %input to <vscale x 4 x i32> |
| %partial.reduce = tail call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv4i32(<vscale x 2 x i32> %acc, <vscale x 4 x i32> %input.wide) |
| ret <vscale x 2 x i32> %partial.reduce |
| } |
| |
| define <vscale x 4 x i64> @signed_wide_add_nxv8i32(<vscale x 4 x i64> %acc, <vscale x 8 x i32> %input){ |
| ; CHECK-SVE-LABEL: signed_wide_add_nxv8i32: |
| ; CHECK-SVE: // %bb.0: // %entry |
| ; CHECK-SVE-NEXT: sunpklo z4.d, z3.s |
| ; CHECK-SVE-NEXT: sunpklo z5.d, z2.s |
| ; CHECK-SVE-NEXT: sunpkhi z3.d, z3.s |
| ; CHECK-SVE-NEXT: sunpkhi z2.d, z2.s |
| ; CHECK-SVE-NEXT: add z0.d, z0.d, z5.d |
| ; CHECK-SVE-NEXT: add z1.d, z1.d, z4.d |
| ; CHECK-SVE-NEXT: add z0.d, z0.d, z2.d |
| ; CHECK-SVE-NEXT: add z1.d, z1.d, z3.d |
| ; CHECK-SVE-NEXT: ret |
| ; |
| ; CHECK-SVE2-LABEL: signed_wide_add_nxv8i32: |
| ; CHECK-SVE2: // %bb.0: // %entry |
| ; CHECK-SVE2-NEXT: saddwb z1.d, z1.d, z3.s |
| ; CHECK-SVE2-NEXT: saddwb z0.d, z0.d, z2.s |
| ; CHECK-SVE2-NEXT: saddwt z1.d, z1.d, z3.s |
| ; CHECK-SVE2-NEXT: saddwt z0.d, z0.d, z2.s |
| ; CHECK-SVE2-NEXT: ret |
| entry: |
| %input.wide = sext <vscale x 8 x i32> %input to <vscale x 8 x i64> |
| %partial.reduce = tail call <vscale x 4 x i64> @llvm.vector.partial.reduce.add.nxv4i64.nxv8i64(<vscale x 4 x i64> %acc, <vscale x 8 x i64> %input.wide) |
| ret <vscale x 4 x i64> %partial.reduce |
| } |
| |
| define <vscale x 4 x i64> @unsigned_wide_add_nxv8i32(<vscale x 4 x i64> %acc, <vscale x 8 x i32> %input){ |
| ; CHECK-SVE-LABEL: unsigned_wide_add_nxv8i32: |
| ; CHECK-SVE: // %bb.0: // %entry |
| ; CHECK-SVE-NEXT: uunpklo z4.d, z3.s |
| ; CHECK-SVE-NEXT: uunpklo z5.d, z2.s |
| ; CHECK-SVE-NEXT: uunpkhi z3.d, z3.s |
| ; CHECK-SVE-NEXT: uunpkhi z2.d, z2.s |
| ; CHECK-SVE-NEXT: add z0.d, z0.d, z5.d |
| ; CHECK-SVE-NEXT: add z1.d, z1.d, z4.d |
| ; CHECK-SVE-NEXT: add z0.d, z0.d, z2.d |
| ; CHECK-SVE-NEXT: add z1.d, z1.d, z3.d |
| ; CHECK-SVE-NEXT: ret |
| ; |
| ; CHECK-SVE2-LABEL: unsigned_wide_add_nxv8i32: |
| ; CHECK-SVE2: // %bb.0: // %entry |
| ; CHECK-SVE2-NEXT: uaddwb z1.d, z1.d, z3.s |
| ; CHECK-SVE2-NEXT: uaddwb z0.d, z0.d, z2.s |
| ; CHECK-SVE2-NEXT: uaddwt z1.d, z1.d, z3.s |
| ; CHECK-SVE2-NEXT: uaddwt z0.d, z0.d, z2.s |
| ; CHECK-SVE2-NEXT: ret |
| entry: |
| %input.wide = zext <vscale x 8 x i32> %input to <vscale x 8 x i64> |
| %partial.reduce = tail call <vscale x 4 x i64> @llvm.vector.partial.reduce.add.nxv4i64.nxv8i64(<vscale x 4 x i64> %acc, <vscale x 8 x i64> %input.wide) |
| ret <vscale x 4 x i64> %partial.reduce |
| } |