| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA32 |
| ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA64 |
| |
| declare <16 x i8> @llvm.loongarch.lsx.vbitclr.b(<16 x i8>, <16 x i8>) |
| |
| define <16 x i8> @lsx_vbitclr_b(<16 x i8> %va, <16 x i8> %vb) nounwind { |
| ; CHECK-LABEL: lsx_vbitclr_b: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vbitclr.b $vr0, $vr0, $vr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <16 x i8> @llvm.loongarch.lsx.vbitclr.b(<16 x i8> %va, <16 x i8> %vb) |
| ret <16 x i8> %res |
| } |
| |
| declare <8 x i16> @llvm.loongarch.lsx.vbitclr.h(<8 x i16>, <8 x i16>) |
| |
| define <8 x i16> @lsx_vbitclr_h(<8 x i16> %va, <8 x i16> %vb) nounwind { |
| ; CHECK-LABEL: lsx_vbitclr_h: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vbitclr.h $vr0, $vr0, $vr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i16> @llvm.loongarch.lsx.vbitclr.h(<8 x i16> %va, <8 x i16> %vb) |
| ret <8 x i16> %res |
| } |
| |
| declare <4 x i32> @llvm.loongarch.lsx.vbitclr.w(<4 x i32>, <4 x i32>) |
| |
| define <4 x i32> @lsx_vbitclr_w(<4 x i32> %va, <4 x i32> %vb) nounwind { |
| ; CHECK-LABEL: lsx_vbitclr_w: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vbitclr.w $vr0, $vr0, $vr1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i32> @llvm.loongarch.lsx.vbitclr.w(<4 x i32> %va, <4 x i32> %vb) |
| ret <4 x i32> %res |
| } |
| |
| declare <2 x i64> @llvm.loongarch.lsx.vbitclr.d(<2 x i64>, <2 x i64>) |
| |
| define <2 x i64> @lsx_vbitclr_d(<2 x i64> %va, <2 x i64> %vb) nounwind { |
| ; LA32-LABEL: lsx_vbitclr_d: |
| ; LA32: # %bb.0: # %entry |
| ; LA32-NEXT: vrepli.d $vr2, 63 |
| ; LA32-NEXT: vand.v $vr1, $vr1, $vr2 |
| ; LA32-NEXT: vrepli.d $vr2, 1 |
| ; LA32-NEXT: vsll.d $vr1, $vr2, $vr1 |
| ; LA32-NEXT: vandn.v $vr0, $vr1, $vr0 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: lsx_vbitclr_d: |
| ; LA64: # %bb.0: # %entry |
| ; LA64-NEXT: vbitclr.d $vr0, $vr0, $vr1 |
| ; LA64-NEXT: ret |
| entry: |
| %res = call <2 x i64> @llvm.loongarch.lsx.vbitclr.d(<2 x i64> %va, <2 x i64> %vb) |
| ret <2 x i64> %res |
| } |
| |
| declare <16 x i8> @llvm.loongarch.lsx.vbitclri.b(<16 x i8>, i32) |
| |
| define <16 x i8> @lsx_vbitclri_b(<16 x i8> %va) nounwind { |
| ; CHECK-LABEL: lsx_vbitclri_b: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vbitclri.b $vr0, $vr0, 7 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <16 x i8> @llvm.loongarch.lsx.vbitclri.b(<16 x i8> %va, i32 7) |
| ret <16 x i8> %res |
| } |
| |
| declare <8 x i16> @llvm.loongarch.lsx.vbitclri.h(<8 x i16>, i32) |
| |
| define <8 x i16> @lsx_vbitclri_h(<8 x i16> %va) nounwind { |
| ; CHECK-LABEL: lsx_vbitclri_h: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vbitclri.h $vr0, $vr0, 15 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i16> @llvm.loongarch.lsx.vbitclri.h(<8 x i16> %va, i32 15) |
| ret <8 x i16> %res |
| } |
| |
| declare <4 x i32> @llvm.loongarch.lsx.vbitclri.w(<4 x i32>, i32) |
| |
| define <4 x i32> @lsx_vbitclri_w(<4 x i32> %va) nounwind { |
| ; CHECK-LABEL: lsx_vbitclri_w: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vbitclri.w $vr0, $vr0, 31 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i32> @llvm.loongarch.lsx.vbitclri.w(<4 x i32> %va, i32 31) |
| ret <4 x i32> %res |
| } |
| |
| declare <2 x i64> @llvm.loongarch.lsx.vbitclri.d(<2 x i64>, i32) |
| |
| define <2 x i64> @lsx_vbitclri_d(<2 x i64> %va) nounwind { |
| ; LA32-LABEL: lsx_vbitclri_d: |
| ; LA32: # %bb.0: # %entry |
| ; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI7_0) |
| ; LA32-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI7_0) |
| ; LA32-NEXT: vand.v $vr0, $vr0, $vr1 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: lsx_vbitclri_d: |
| ; LA64: # %bb.0: # %entry |
| ; LA64-NEXT: vbitclri.d $vr0, $vr0, 63 |
| ; LA64-NEXT: ret |
| entry: |
| %res = call <2 x i64> @llvm.loongarch.lsx.vbitclri.d(<2 x i64> %va, i32 63) |
| ret <2 x i64> %res |
| } |