| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA32 |
| ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA64 |
| |
| declare <16 x i8> @llvm.loongarch.lsx.vbitrev.b(<16 x i8>, <16 x i8>) |
| |
| define <16 x i8> @lsx_vbitrev_b(<16 x i8> %va, <16 x i8> %vb) nounwind { |
| ; CHECK-LABEL: lsx_vbitrev_b: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vbitrev.b $vr0, $vr0, $vr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <16 x i8> @llvm.loongarch.lsx.vbitrev.b(<16 x i8> %va, <16 x i8> %vb) |
| ret <16 x i8> %res |
| } |
| |
| declare <8 x i16> @llvm.loongarch.lsx.vbitrev.h(<8 x i16>, <8 x i16>) |
| |
| define <8 x i16> @lsx_vbitrev_h(<8 x i16> %va, <8 x i16> %vb) nounwind { |
| ; CHECK-LABEL: lsx_vbitrev_h: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vbitrev.h $vr0, $vr0, $vr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i16> @llvm.loongarch.lsx.vbitrev.h(<8 x i16> %va, <8 x i16> %vb) |
| ret <8 x i16> %res |
| } |
| |
| declare <4 x i32> @llvm.loongarch.lsx.vbitrev.w(<4 x i32>, <4 x i32>) |
| |
| define <4 x i32> @lsx_vbitrev_w(<4 x i32> %va, <4 x i32> %vb) nounwind { |
| ; CHECK-LABEL: lsx_vbitrev_w: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vbitrev.w $vr0, $vr0, $vr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i32> @llvm.loongarch.lsx.vbitrev.w(<4 x i32> %va, <4 x i32> %vb) |
| ret <4 x i32> %res |
| } |
| |
| declare <2 x i64> @llvm.loongarch.lsx.vbitrev.d(<2 x i64>, <2 x i64>) |
| |
| define <2 x i64> @lsx_vbitrev_d(<2 x i64> %va, <2 x i64> %vb) nounwind { |
| ; LA32-LABEL: lsx_vbitrev_d: |
| ; LA32: # %bb.0: # %entry |
| ; LA32-NEXT: vrepli.d $vr2, 63 |
| ; LA32-NEXT: vand.v $vr1, $vr1, $vr2 |
| ; LA32-NEXT: vrepli.d $vr2, 1 |
| ; LA32-NEXT: vsll.d $vr1, $vr2, $vr1 |
| ; LA32-NEXT: vxor.v $vr0, $vr0, $vr1 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: lsx_vbitrev_d: |
| ; LA64: # %bb.0: # %entry |
| ; LA64-NEXT: vbitrev.d $vr0, $vr0, $vr1 |
| ; LA64-NEXT: ret |
| entry: |
| %res = call <2 x i64> @llvm.loongarch.lsx.vbitrev.d(<2 x i64> %va, <2 x i64> %vb) |
| ret <2 x i64> %res |
| } |
| |
| declare <16 x i8> @llvm.loongarch.lsx.vbitrevi.b(<16 x i8>, i32) |
| |
| define <16 x i8> @lsx_vbitrevi_b(<16 x i8> %va) nounwind { |
| ; CHECK-LABEL: lsx_vbitrevi_b: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vbitrevi.b $vr0, $vr0, 7 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <16 x i8> @llvm.loongarch.lsx.vbitrevi.b(<16 x i8> %va, i32 7) |
| ret <16 x i8> %res |
| } |
| |
| declare <8 x i16> @llvm.loongarch.lsx.vbitrevi.h(<8 x i16>, i32) |
| |
| define <8 x i16> @lsx_vbitrevi_h(<8 x i16> %va) nounwind { |
| ; CHECK-LABEL: lsx_vbitrevi_h: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vbitrevi.h $vr0, $vr0, 15 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i16> @llvm.loongarch.lsx.vbitrevi.h(<8 x i16> %va, i32 15) |
| ret <8 x i16> %res |
| } |
| |
| declare <4 x i32> @llvm.loongarch.lsx.vbitrevi.w(<4 x i32>, i32) |
| |
| define <4 x i32> @lsx_vbitrevi_w(<4 x i32> %va) nounwind { |
| ; CHECK-LABEL: lsx_vbitrevi_w: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vbitrevi.w $vr0, $vr0, 31 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i32> @llvm.loongarch.lsx.vbitrevi.w(<4 x i32> %va, i32 31) |
| ret <4 x i32> %res |
| } |
| |
| declare <2 x i64> @llvm.loongarch.lsx.vbitrevi.d(<2 x i64>, i32) |
| |
| define <2 x i64> @lsx_vbitrevi_d(<2 x i64> %va) nounwind { |
| ; LA32-LABEL: lsx_vbitrevi_d: |
| ; LA32: # %bb.0: # %entry |
| ; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI7_0) |
| ; LA32-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI7_0) |
| ; LA32-NEXT: vxor.v $vr0, $vr0, $vr1 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: lsx_vbitrevi_d: |
| ; LA64: # %bb.0: # %entry |
| ; LA64-NEXT: vbitrevi.d $vr0, $vr0, 63 |
| ; LA64-NEXT: ret |
| entry: |
| %res = call <2 x i64> @llvm.loongarch.lsx.vbitrevi.d(<2 x i64> %va, i32 63) |
| ret <2 x i64> %res |
| } |