| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 |
| ; RUN: llc < %s -start-before=codegenprepare | FileCheck %s |
| |
| target triple = "aarch64-unknown-linux-gnu" |
| |
| define i32 @vmask_reduce_i32_v8i8(<8 x i8> %a, <8 x i8> %b) { |
| ; CHECK-LABEL: vmask_reduce_i32_v8i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmgt v0.8b, v1.8b, v0.8b |
| ; CHECK-NEXT: addv b0, v0.8b |
| ; CHECK-NEXT: smov w0, v0.b[0] |
| ; CHECK-NEXT: ret |
| %mask = icmp slt <8 x i8> %a, %b |
| %t1 = sext <8 x i1> %mask to <8 x i8> |
| %t2 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %t1) |
| %t3 = sext i8 %t2 to i32 |
| ret i32 %t3 |
| } |
| |
| define i32 @vmask_reduce_i32_v16i8(<16 x i8> %a, <16 x i8> %b) { |
| ; CHECK-LABEL: vmask_reduce_i32_v16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmgt v0.16b, v1.16b, v0.16b |
| ; CHECK-NEXT: addv b0, v0.16b |
| ; CHECK-NEXT: smov w0, v0.b[0] |
| ; CHECK-NEXT: ret |
| %mask = icmp slt <16 x i8> %a, %b |
| %t1 = sext <16 x i1> %mask to <16 x i8> |
| %t2 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %t1) |
| %t3 = sext i8 %t2 to i32 |
| ret i32 %t3 |
| } |
| |
| define i32 @vmask_reduce_i32_v4i16(<4 x i16> %a, <4 x i16> %b) { |
| ; CHECK-LABEL: vmask_reduce_i32_v4i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmgt v0.4h, v1.4h, v0.4h |
| ; CHECK-NEXT: addv h0, v0.4h |
| ; CHECK-NEXT: smov w0, v0.h[0] |
| ; CHECK-NEXT: ret |
| %mask = icmp slt <4 x i16> %a, %b |
| %t1 = sext <4 x i1> %mask to <4 x i16> |
| %t2 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %t1) |
| %t3 = sext i16 %t2 to i32 |
| ret i32 %t3 |
| } |
| |
| define i32 @vmask_reduce_i32_v8i16(<8 x i16> %a, <8 x i16> %b) { |
| ; CHECK-LABEL: vmask_reduce_i32_v8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmgt v0.8h, v1.8h, v0.8h |
| ; CHECK-NEXT: addv h0, v0.8h |
| ; CHECK-NEXT: smov w0, v0.h[0] |
| ; CHECK-NEXT: ret |
| %mask = icmp slt <8 x i16> %a, %b |
| %t1 = sext <8 x i1> %mask to <8 x i16> |
| %t2 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %t1) |
| %t3 = sext i16 %t2 to i32 |
| ret i32 %t3 |
| } |
| |
| define i64 @vmask_reduce_i32_v2i32(<2 x i32> %a, <2 x i32> %b) { |
| ; CHECK-LABEL: vmask_reduce_i32_v2i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmgt v0.2s, v1.2s, v0.2s |
| ; CHECK-NEXT: addp v0.2s, v0.2s, v0.2s |
| ; CHECK-NEXT: smov x0, v0.s[0] |
| ; CHECK-NEXT: ret |
| %mask = icmp slt <2 x i32> %a, %b |
| %t1 = sext <2 x i1> %mask to <2 x i32> |
| %t2 = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %t1) |
| %t3 = sext i32 %t2 to i64 |
| ret i64 %t3 |
| } |
| |
| define i64 @vmask_reduce_i64_v8i8(<8 x i8> %a, <8 x i8> %b) { |
| ; CHECK-LABEL: vmask_reduce_i64_v8i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmgt v0.8b, v1.8b, v0.8b |
| ; CHECK-NEXT: addv b0, v0.8b |
| ; CHECK-NEXT: smov x0, v0.b[0] |
| ; CHECK-NEXT: ret |
| %mask = icmp slt <8 x i8> %a, %b |
| %t1 = sext <8 x i1> %mask to <8 x i8> |
| %t2 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %t1) |
| %t3 = sext i8 %t2 to i64 |
| ret i64 %t3 |
| } |
| |
| define i64 @vmask_reduce_i64_v16i8(<16 x i8> %a, <16 x i8> %b) { |
| ; CHECK-LABEL: vmask_reduce_i64_v16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmgt v0.16b, v1.16b, v0.16b |
| ; CHECK-NEXT: addv b0, v0.16b |
| ; CHECK-NEXT: smov x0, v0.b[0] |
| ; CHECK-NEXT: ret |
| %mask = icmp slt <16 x i8> %a, %b |
| %t1 = sext <16 x i1> %mask to <16 x i8> |
| %t2 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %t1) |
| %t3 = sext i8 %t2 to i64 |
| ret i64 %t3 |
| } |
| |
| define i64 @vmask_reduce_i64_v4i16(<4 x i16> %a, <4 x i16> %b) { |
| ; CHECK-LABEL: vmask_reduce_i64_v4i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmgt v0.4h, v1.4h, v0.4h |
| ; CHECK-NEXT: addv h0, v0.4h |
| ; CHECK-NEXT: smov x0, v0.h[0] |
| ; CHECK-NEXT: ret |
| %mask = icmp slt <4 x i16> %a, %b |
| %t1 = sext <4 x i1> %mask to <4 x i16> |
| %t2 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %t1) |
| %t3 = sext i16 %t2 to i64 |
| ret i64 %t3 |
| } |
| |
| define i64 @vmask_reduce_i64_v8i16(<8 x i16> %a, <8 x i16> %b) { |
| ; CHECK-LABEL: vmask_reduce_i64_v8i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmgt v0.8h, v1.8h, v0.8h |
| ; CHECK-NEXT: addv h0, v0.8h |
| ; CHECK-NEXT: smov x0, v0.h[0] |
| ; CHECK-NEXT: ret |
| %mask = icmp slt <8 x i16> %a, %b |
| %t1 = sext <8 x i1> %mask to <8 x i16> |
| %t2 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %t1) |
| %t3 = sext i16 %t2 to i64 |
| ret i64 %t3 |
| } |
| |
| ; TODO: We should use a saddlv here to avoid the smov. |
| define i64 @vmask_reduce_i64_v4i32(<4 x i32> %a, <4 x i32> %b) { |
| ; CHECK-LABEL: vmask_reduce_i64_v4i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmgt v0.4s, v1.4s, v0.4s |
| ; CHECK-NEXT: addv s0, v0.4s |
| ; CHECK-NEXT: smov x0, v0.s[0] |
| ; CHECK-NEXT: ret |
| %mask = icmp slt <4 x i32> %a, %b |
| %t1 = sext <4 x i1> %mask to <4 x i32> |
| %t2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %t1) |
| %t3 = sext i32 %t2 to i64 |
| ret i64 %t3 |
| } |
| |
| define i32 @vmask_popcount_i32_v16i8(<16 x i8> %a, <16 x i8> %b) { |
| ; CHECK-LABEL: vmask_popcount_i32_v16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmgt v0.16b, v1.16b, v0.16b |
| ; CHECK-NEXT: addv b0, v0.16b |
| ; CHECK-NEXT: smov w8, v0.b[0] |
| ; CHECK-NEXT: neg w0, w8 |
| ; CHECK-NEXT: ret |
| %mask = icmp slt <16 x i8> %a, %b |
| %t1 = sext <16 x i1> %mask to <16 x i8> |
| %t2 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %t1) |
| %t3 = sext i8 %t2 to i32 |
| %t4 = sub i32 0, %t3 |
| ret i32 %t4 |
| } |
| |
| define i32 @vmask_popcount_i32_v4i16(<4 x i16> %a, <4 x i16> %b) { |
| ; CHECK-LABEL: vmask_popcount_i32_v4i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmgt v0.4h, v1.4h, v0.4h |
| ; CHECK-NEXT: addv h0, v0.4h |
| ; CHECK-NEXT: smov w8, v0.h[0] |
| ; CHECK-NEXT: neg w0, w8 |
| ; CHECK-NEXT: ret |
| %mask = icmp slt <4 x i16> %a, %b |
| %t1 = sext <4 x i1> %mask to <4 x i16> |
| %t2 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %t1) |
| %t3 = sext i16 %t2 to i32 |
| %t4 = sub i32 0, %t3 |
| ret i32 %t4 |
| } |
| |
| define i64 @vmask_popcount_i64_v16i8(<16 x i8> %a, <16 x i8> %b) { |
| ; CHECK-LABEL: vmask_popcount_i64_v16i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmgt v0.16b, v1.16b, v0.16b |
| ; CHECK-NEXT: addv b0, v0.16b |
| ; CHECK-NEXT: smov x8, v0.b[0] |
| ; CHECK-NEXT: neg x0, x8 |
| ; CHECK-NEXT: ret |
| %mask = icmp slt <16 x i8> %a, %b |
| %t1 = sext <16 x i1> %mask to <16 x i8> |
| %t2 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %t1) |
| %t3 = sext i8 %t2 to i64 |
| %t4 = sub i64 0, %t3 |
| ret i64 %t4 |
| } |
| |
| define i64 @vmask_popcount_i64_v4i16(<4 x i16> %a, <4 x i16> %b) { |
| ; CHECK-LABEL: vmask_popcount_i64_v4i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmgt v0.4h, v1.4h, v0.4h |
| ; CHECK-NEXT: addv h0, v0.4h |
| ; CHECK-NEXT: smov x8, v0.h[0] |
| ; CHECK-NEXT: neg x0, x8 |
| ; CHECK-NEXT: ret |
| %mask = icmp slt <4 x i16> %a, %b |
| %t1 = sext <4 x i1> %mask to <4 x i16> |
| %t2 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %t1) |
| %t3 = sext i16 %t2 to i64 |
| %t4 = sub i64 0, %t3 |
| ret i64 %t4 |
| } |