| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA32 |
| ; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA64 |
| |
| define void @vec_reduce_umin_v16i8(ptr %src, ptr %dst) nounwind { |
| ; CHECK-LABEL: vec_reduce_umin_v16i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vld $vr0, $a0, 0 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 |
| ; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 |
| ; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 |
| ; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1 |
| ; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0 |
| ; CHECK-NEXT: ret |
| %v = load <16 x i8>, ptr %src |
| %res = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %v) |
| store i8 %res, ptr %dst |
| ret void |
| } |
| |
| define void @vec_reduce_umin_v8i8(ptr %src, ptr %dst) nounwind { |
| ; LA32-LABEL: vec_reduce_umin_v8i8: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: ld.w $a2, $a0, 0 |
| ; LA32-NEXT: ld.w $a0, $a0, 4 |
| ; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 |
| ; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 |
| ; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 |
| ; LA32-NEXT: vmin.bu $vr0, $vr1, $vr0 |
| ; LA32-NEXT: vbsrl.v $vr1, $vr0, 2 |
| ; LA32-NEXT: vmin.bu $vr0, $vr1, $vr0 |
| ; LA32-NEXT: vbsrl.v $vr1, $vr0, 1 |
| ; LA32-NEXT: vmin.bu $vr0, $vr1, $vr0 |
| ; LA32-NEXT: vstelm.b $vr0, $a1, 0, 0 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: vec_reduce_umin_v8i8: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: ld.d $a0, $a0, 0 |
| ; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 |
| ; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 |
| ; LA64-NEXT: vmin.bu $vr0, $vr1, $vr0 |
| ; LA64-NEXT: vbsrl.v $vr1, $vr0, 2 |
| ; LA64-NEXT: vmin.bu $vr0, $vr1, $vr0 |
| ; LA64-NEXT: vbsrl.v $vr1, $vr0, 1 |
| ; LA64-NEXT: vmin.bu $vr0, $vr1, $vr0 |
| ; LA64-NEXT: vstelm.b $vr0, $a1, 0, 0 |
| ; LA64-NEXT: ret |
| %v = load <8 x i8>, ptr %src |
| %res = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> %v) |
| store i8 %res, ptr %dst |
| ret void |
| } |
| |
| define void @vec_reduce_umin_v4i8(ptr %src, ptr %dst) nounwind { |
| ; CHECK-LABEL: vec_reduce_umin_v4i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: ld.w $a0, $a0, 0 |
| ; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 |
| ; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1 |
| ; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0 |
| ; CHECK-NEXT: ret |
| %v = load <4 x i8>, ptr %src |
| %res = call i8 @llvm.vector.reduce.umin.v4i8(<4 x i8> %v) |
| store i8 %res, ptr %dst |
| ret void |
| } |
| |
| define void @vec_reduce_umin_v2i8(ptr %src, ptr %dst) nounwind { |
| ; CHECK-LABEL: vec_reduce_umin_v2i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: ld.h $a0, $a0, 0 |
| ; CHECK-NEXT: vinsgr2vr.h $vr0, $a0, 0 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1 |
| ; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0 |
| ; CHECK-NEXT: ret |
| %v = load <2 x i8>, ptr %src |
| %res = call i8 @llvm.vector.reduce.umin.v2i8(<2 x i8> %v) |
| store i8 %res, ptr %dst |
| ret void |
| } |
| |
| define void @vec_reduce_umin_v8i16(ptr %src, ptr %dst) nounwind { |
| ; CHECK-LABEL: vec_reduce_umin_v8i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vld $vr0, $a0, 0 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 |
| ; CHECK-NEXT: vmin.hu $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 |
| ; CHECK-NEXT: vmin.hu $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 |
| ; CHECK-NEXT: vmin.hu $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0 |
| ; CHECK-NEXT: ret |
| %v = load <8 x i16>, ptr %src |
| %res = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %v) |
| store i16 %res, ptr %dst |
| ret void |
| } |
| |
| define void @vec_reduce_umin_v4i16(ptr %src, ptr %dst) nounwind { |
| ; LA32-LABEL: vec_reduce_umin_v4i16: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: ld.w $a2, $a0, 0 |
| ; LA32-NEXT: ld.w $a0, $a0, 4 |
| ; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 |
| ; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 |
| ; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 |
| ; LA32-NEXT: vmin.hu $vr0, $vr1, $vr0 |
| ; LA32-NEXT: vbsrl.v $vr1, $vr0, 2 |
| ; LA32-NEXT: vmin.hu $vr0, $vr1, $vr0 |
| ; LA32-NEXT: vstelm.h $vr0, $a1, 0, 0 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: vec_reduce_umin_v4i16: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: ld.d $a0, $a0, 0 |
| ; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 |
| ; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 |
| ; LA64-NEXT: vmin.hu $vr0, $vr1, $vr0 |
| ; LA64-NEXT: vbsrl.v $vr1, $vr0, 2 |
| ; LA64-NEXT: vmin.hu $vr0, $vr1, $vr0 |
| ; LA64-NEXT: vstelm.h $vr0, $a1, 0, 0 |
| ; LA64-NEXT: ret |
| %v = load <4 x i16>, ptr %src |
| %res = call i16 @llvm.vector.reduce.umin.v4i16(<4 x i16> %v) |
| store i16 %res, ptr %dst |
| ret void |
| } |
| |
| define void @vec_reduce_umin_v2i16(ptr %src, ptr %dst) nounwind { |
| ; CHECK-LABEL: vec_reduce_umin_v2i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: ld.w $a0, $a0, 0 |
| ; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 |
| ; CHECK-NEXT: vmin.hu $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0 |
| ; CHECK-NEXT: ret |
| %v = load <2 x i16>, ptr %src |
| %res = call i16 @llvm.vector.reduce.umin.v2i16(<2 x i16> %v) |
| store i16 %res, ptr %dst |
| ret void |
| } |
| |
| define void @vec_reduce_umin_v4i32(ptr %src, ptr %dst) nounwind { |
| ; LA32-LABEL: vec_reduce_umin_v4i32: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: vld $vr0, $a0, 0 |
| ; LA32-NEXT: vbsrl.v $vr1, $vr0, 8 |
| ; LA32-NEXT: vmin.wu $vr0, $vr1, $vr0 |
| ; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 |
| ; LA32-NEXT: vmin.wu $vr0, $vr1, $vr0 |
| ; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 |
| ; LA32-NEXT: st.w $a0, $a1, 0 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: vec_reduce_umin_v4i32: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: vld $vr0, $a0, 0 |
| ; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 |
| ; LA64-NEXT: vmin.wu $vr0, $vr1, $vr0 |
| ; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 |
| ; LA64-NEXT: vmin.wu $vr0, $vr1, $vr0 |
| ; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 |
| ; LA64-NEXT: ret |
| %v = load <4 x i32>, ptr %src |
| %res = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %v) |
| store i32 %res, ptr %dst |
| ret void |
| } |
| |
| define void @vec_reduce_umin_v2i32(ptr %src, ptr %dst) nounwind { |
| ; LA32-LABEL: vec_reduce_umin_v2i32: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: ld.w $a2, $a0, 0 |
| ; LA32-NEXT: ld.w $a0, $a0, 4 |
| ; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 |
| ; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 |
| ; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 |
| ; LA32-NEXT: vmin.wu $vr0, $vr1, $vr0 |
| ; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 |
| ; LA32-NEXT: st.w $a0, $a1, 0 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: vec_reduce_umin_v2i32: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: ld.d $a0, $a0, 0 |
| ; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 |
| ; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 |
| ; LA64-NEXT: vmin.wu $vr0, $vr1, $vr0 |
| ; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 |
| ; LA64-NEXT: ret |
| %v = load <2 x i32>, ptr %src |
| %res = call i32 @llvm.vector.reduce.umin.v2i32(<2 x i32> %v) |
| store i32 %res, ptr %dst |
| ret void |
| } |
| |
| define void @vec_reduce_umin_v2i64(ptr %src, ptr %dst) nounwind { |
| ; LA32-LABEL: vec_reduce_umin_v2i64: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: vld $vr0, $a0, 0 |
| ; LA32-NEXT: vpickve2gr.w $a0, $vr0, 2 |
| ; LA32-NEXT: vpickve2gr.w $a2, $vr0, 0 |
| ; LA32-NEXT: vpickve2gr.w $a3, $vr0, 3 |
| ; LA32-NEXT: vpickve2gr.w $a4, $vr0, 1 |
| ; LA32-NEXT: sltu $a5, $a4, $a3 |
| ; LA32-NEXT: xor $a6, $a4, $a3 |
| ; LA32-NEXT: sltui $a6, $a6, 1 |
| ; LA32-NEXT: masknez $a5, $a5, $a6 |
| ; LA32-NEXT: sltu $a7, $a2, $a0 |
| ; LA32-NEXT: maskeqz $a6, $a7, $a6 |
| ; LA32-NEXT: or $a5, $a6, $a5 |
| ; LA32-NEXT: masknez $a0, $a0, $a5 |
| ; LA32-NEXT: maskeqz $a2, $a2, $a5 |
| ; LA32-NEXT: or $a0, $a2, $a0 |
| ; LA32-NEXT: masknez $a2, $a3, $a5 |
| ; LA32-NEXT: maskeqz $a3, $a4, $a5 |
| ; LA32-NEXT: or $a2, $a3, $a2 |
| ; LA32-NEXT: st.w $a2, $a1, 4 |
| ; LA32-NEXT: st.w $a0, $a1, 0 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: vec_reduce_umin_v2i64: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: vld $vr0, $a0, 0 |
| ; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 |
| ; LA64-NEXT: vmin.du $vr0, $vr1, $vr0 |
| ; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 |
| ; LA64-NEXT: ret |
| %v = load <2 x i64>, ptr %src |
| %res = call i64 @llvm.vector.reduce.umin.v2i64(<2 x i64> %v) |
| store i64 %res, ptr %dst |
| ret void |
| } |