| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s |
| |
| define void @vec_reduce_or_v32i8(ptr %src, ptr %dst) nounwind { |
| ; CHECK-LABEL: vec_reduce_or_v32i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: xvld $xr0, $a0, 0 |
| ; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 |
| ; CHECK-NEXT: vor.v $vr0, $vr0, $vr1 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 |
| ; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 |
| ; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 |
| ; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1 |
| ; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0 |
| ; CHECK-NEXT: ret |
| %v = load <32 x i8>, ptr %src |
| %res = call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %v) |
| store i8 %res, ptr %dst |
| ret void |
| } |
| |
| define void @vec_reduce_or_v16i16(ptr %src, ptr %dst) nounwind { |
| ; CHECK-LABEL: vec_reduce_or_v16i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: xvld $xr0, $a0, 0 |
| ; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 |
| ; CHECK-NEXT: vor.v $vr0, $vr0, $vr1 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 |
| ; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 |
| ; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 |
| ; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0 |
| ; CHECK-NEXT: ret |
| %v = load <16 x i16>, ptr %src |
| %res = call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %v) |
| store i16 %res, ptr %dst |
| ret void |
| } |
| |
| define void @vec_reduce_or_v8i32(ptr %src, ptr %dst) nounwind { |
| ; CHECK-LABEL: vec_reduce_or_v8i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: xvld $xr0, $a0, 0 |
| ; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 |
| ; CHECK-NEXT: vor.v $vr0, $vr0, $vr1 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 |
| ; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 |
| ; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 |
| ; CHECK-NEXT: ret |
| %v = load <8 x i32>, ptr %src |
| %res = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %v) |
| store i32 %res, ptr %dst |
| ret void |
| } |
| |
| define void @vec_reduce_or_v4i64(ptr %src, ptr %dst) nounwind { |
| ; CHECK-LABEL: vec_reduce_or_v4i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: xvld $xr0, $a0, 0 |
| ; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 |
| ; CHECK-NEXT: vor.v $vr0, $vr0, $vr1 |
| ; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 |
| ; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 |
| ; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 |
| ; CHECK-NEXT: ret |
| %v = load <4 x i64>, ptr %src |
| %res = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %v) |
| store i64 %res, ptr %dst |
| ret void |
| } |
| |