| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s |
| ; RUN: llc -mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s |
| |
| define <16 x i8> @vuadd_b(<16 x i8> %a, <16 x i8> %b) { |
| ; CHECK-LABEL: vuadd_b: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsadd.bu $vr0, $vr0, $vr1 |
| ; CHECK-NEXT: ret |
| %ret = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b) |
| ret <16 x i8> %ret |
| } |
| |
| define <8 x i16> @vuadd_h(<8 x i16> %a, <8 x i16> %b) { |
| ; CHECK-LABEL: vuadd_h: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsadd.hu $vr0, $vr0, $vr1 |
| ; CHECK-NEXT: ret |
| %ret = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b) |
| ret <8 x i16> %ret |
| } |
| |
| define <4 x i32> @vuadd_w(<4 x i32> %a, <4 x i32> %b) { |
| ; CHECK-LABEL: vuadd_w: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsadd.wu $vr0, $vr0, $vr1 |
| ; CHECK-NEXT: ret |
| %ret = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %a, <4 x i32> %b) |
| ret <4 x i32> %ret |
| } |
| |
| define <2 x i64> @vuadd_d(<2 x i64> %a, <2 x i64> %b) { |
| ; CHECK-LABEL: vuadd_d: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsadd.du $vr0, $vr0, $vr1 |
| ; CHECK-NEXT: ret |
| %ret = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %a, <2 x i64> %b) |
| ret <2 x i64> %ret |
| } |
| |
| declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>) |
| declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>) |
| declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>) |
| declare <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64>, <2 x i64>) |