| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zbb | FileCheck %s --check-prefixes=CHECK,RV32 |
| ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zbb | FileCheck %s --check-prefixes=CHECK,RV64 |
| |
| define i2 @test_v2i1(<2 x i1> %x) { |
| ; CHECK-LABEL: test_v2i1: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = bitcast <2 x i1> %x to i2 |
| %b = call i2 @llvm.ctpop.i2(i2 %a) |
| ret i2 %b |
| } |
| |
| define i4 @test_v4i1(<4 x i1> %x) { |
| ; CHECK-LABEL: test_v4i1: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = bitcast <4 x i1> %x to i4 |
| %b = call i4 @llvm.ctpop.i4(i4 %a) |
| ret i4 %b |
| } |
| |
| define i8 @test_v8i1(<8 x i1> %x) { |
| ; CHECK-LABEL: test_v8i1: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = bitcast <8 x i1> %x to i8 |
| %b = call i8 @llvm.ctpop.i8(i8 %a) |
| ret i8 %b |
| } |
| |
| define i16 @test_v16i1(<16 x i1> %x) { |
| ; CHECK-LABEL: test_v16i1: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = bitcast <16 x i1> %x to i16 |
| %b = call i16 @llvm.ctpop.i16(i16 %a) |
| ret i16 %b |
| } |
| |
| define i32 @test_v32i1(<32 x i1> %x) { |
| ; CHECK-LABEL: test_v32i1: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: li a0, 32 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = bitcast <32 x i1> %x to i32 |
| %b = call i32 @llvm.ctpop.i32(i32 %a) |
| ret i32 %b |
| } |
| |
| define i64 @test_v64i1(<64 x i1> %x) { |
| ; RV32-LABEL: test_v64i1: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: li a0, 64 |
| ; RV32-NEXT: vsetvli zero, a0, e8, m4, ta, ma |
| ; RV32-NEXT: vcpop.m a0, v0 |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: test_v64i1: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: li a0, 64 |
| ; RV64-NEXT: vsetvli zero, a0, e8, m4, ta, ma |
| ; RV64-NEXT: vcpop.m a0, v0 |
| ; RV64-NEXT: ret |
| entry: |
| %a = bitcast <64 x i1> %x to i64 |
| %b = call i64 @llvm.ctpop.i64(i64 %a) |
| ret i64 %b |
| } |
| |
| define i128 @test_v128i1(<128 x i1> %x) { |
| ; RV32-LABEL: test_v128i1: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: li a1, 128 |
| ; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, ma |
| ; RV32-NEXT: vcpop.m a1, v0 |
| ; RV32-NEXT: sw a1, 0(a0) |
| ; RV32-NEXT: sw zero, 4(a0) |
| ; RV32-NEXT: sw zero, 8(a0) |
| ; RV32-NEXT: sw zero, 12(a0) |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: test_v128i1: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: li a0, 128 |
| ; RV64-NEXT: vsetvli zero, a0, e8, m8, ta, ma |
| ; RV64-NEXT: vcpop.m a0, v0 |
| ; RV64-NEXT: li a1, 0 |
| ; RV64-NEXT: ret |
| entry: |
| %a = bitcast <128 x i1> %x to i128 |
| %b = call i128 @llvm.ctpop.i128(i128 %a) |
| ret i128 %b |
| } |
| |
| define i32 @test_trunc_v128i1(<128 x i1> %x) { |
| ; CHECK-LABEL: test_trunc_v128i1: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: li a0, 128 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = bitcast <128 x i1> %x to i128 |
| %b = call i128 @llvm.ctpop.i128(i128 %a) |
| %c = trunc i128 %b to i32 |
| ret i32 %c |
| } |
| |
| define i256 @test_v256i1(<256 x i1> %x) { |
| ; RV32-LABEL: test_v256i1: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vslidedown.vi v9, v0, 1 |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: vslidedown.vi v10, v8, 1 |
| ; RV32-NEXT: vmv.x.s a2, v0 |
| ; RV32-NEXT: vmv.x.s a3, v8 |
| ; RV32-NEXT: vsrl.vx v11, v9, a1 |
| ; RV32-NEXT: vsrl.vx v12, v0, a1 |
| ; RV32-NEXT: vmv.x.s a4, v9 |
| ; RV32-NEXT: vsrl.vx v9, v10, a1 |
| ; RV32-NEXT: vsrl.vx v8, v8, a1 |
| ; RV32-NEXT: vmv.x.s a1, v10 |
| ; RV32-NEXT: cpop a3, a3 |
| ; RV32-NEXT: cpop a2, a2 |
| ; RV32-NEXT: vmv.x.s a5, v11 |
| ; RV32-NEXT: vmv.x.s a6, v12 |
| ; RV32-NEXT: vmv.x.s a7, v9 |
| ; RV32-NEXT: vmv.x.s t0, v8 |
| ; RV32-NEXT: cpop a1, a1 |
| ; RV32-NEXT: cpop a4, a4 |
| ; RV32-NEXT: cpop t0, t0 |
| ; RV32-NEXT: cpop a7, a7 |
| ; RV32-NEXT: cpop a6, a6 |
| ; RV32-NEXT: cpop a5, a5 |
| ; RV32-NEXT: add a3, a3, t0 |
| ; RV32-NEXT: add a1, a1, a7 |
| ; RV32-NEXT: add a2, a2, a6 |
| ; RV32-NEXT: add a4, a4, a5 |
| ; RV32-NEXT: add a5, a3, a1 |
| ; RV32-NEXT: add a6, a2, a4 |
| ; RV32-NEXT: add a1, a6, a5 |
| ; RV32-NEXT: sltu a3, a5, a3 |
| ; RV32-NEXT: sltu a4, a6, a2 |
| ; RV32-NEXT: sltu a2, a1, a6 |
| ; RV32-NEXT: add a3, a4, a3 |
| ; RV32-NEXT: add a3, a3, a2 |
| ; RV32-NEXT: beq a3, a4, .LBB8_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: sltu a2, a3, a4 |
| ; RV32-NEXT: .LBB8_2: # %entry |
| ; RV32-NEXT: sw zero, 16(a0) |
| ; RV32-NEXT: sw zero, 20(a0) |
| ; RV32-NEXT: sw zero, 24(a0) |
| ; RV32-NEXT: sw zero, 28(a0) |
| ; RV32-NEXT: sw a1, 0(a0) |
| ; RV32-NEXT: sw a3, 4(a0) |
| ; RV32-NEXT: sw a2, 8(a0) |
| ; RV32-NEXT: sw zero, 12(a0) |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: test_v256i1: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vslidedown.vi v9, v0, 1 |
| ; RV64-NEXT: vmv.x.s a1, v0 |
| ; RV64-NEXT: vslidedown.vi v10, v8, 1 |
| ; RV64-NEXT: vmv.x.s a2, v8 |
| ; RV64-NEXT: vmv.x.s a3, v9 |
| ; RV64-NEXT: vmv.x.s a4, v10 |
| ; RV64-NEXT: cpop a2, a2 |
| ; RV64-NEXT: cpop a1, a1 |
| ; RV64-NEXT: cpop a4, a4 |
| ; RV64-NEXT: cpop a3, a3 |
| ; RV64-NEXT: add a2, a2, a4 |
| ; RV64-NEXT: add a1, a1, a3 |
| ; RV64-NEXT: add a2, a1, a2 |
| ; RV64-NEXT: sltu a1, a2, a1 |
| ; RV64-NEXT: sd a2, 0(a0) |
| ; RV64-NEXT: sd a1, 8(a0) |
| ; RV64-NEXT: sd zero, 16(a0) |
| ; RV64-NEXT: sd zero, 24(a0) |
| ; RV64-NEXT: ret |
| entry: |
| %a = bitcast <256 x i1> %x to i256 |
| %b = call i256 @llvm.ctpop.i256(i256 %a) |
| ret i256 %b |
| } |
| |
| define i32 @test_trunc_v256i1(<256 x i1> %x) { |
| ; RV32-LABEL: test_trunc_v256i1: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV32-NEXT: vslidedown.vi v9, v0, 1 |
| ; RV32-NEXT: li a0, 32 |
| ; RV32-NEXT: vslidedown.vi v10, v8, 1 |
| ; RV32-NEXT: vmv.x.s a1, v0 |
| ; RV32-NEXT: vmv.x.s a2, v8 |
| ; RV32-NEXT: vsrl.vx v11, v9, a0 |
| ; RV32-NEXT: vsrl.vx v12, v0, a0 |
| ; RV32-NEXT: vmv.x.s a3, v9 |
| ; RV32-NEXT: vsrl.vx v9, v10, a0 |
| ; RV32-NEXT: vsrl.vx v8, v8, a0 |
| ; RV32-NEXT: vmv.x.s a0, v10 |
| ; RV32-NEXT: cpop a2, a2 |
| ; RV32-NEXT: cpop a1, a1 |
| ; RV32-NEXT: vmv.x.s a4, v11 |
| ; RV32-NEXT: vmv.x.s a5, v12 |
| ; RV32-NEXT: vmv.x.s a6, v9 |
| ; RV32-NEXT: vmv.x.s a7, v8 |
| ; RV32-NEXT: cpop a0, a0 |
| ; RV32-NEXT: cpop a3, a3 |
| ; RV32-NEXT: cpop a7, a7 |
| ; RV32-NEXT: cpop a6, a6 |
| ; RV32-NEXT: cpop a5, a5 |
| ; RV32-NEXT: cpop a4, a4 |
| ; RV32-NEXT: add a2, a2, a7 |
| ; RV32-NEXT: add a0, a0, a6 |
| ; RV32-NEXT: add a1, a1, a5 |
| ; RV32-NEXT: add a3, a3, a4 |
| ; RV32-NEXT: add a0, a2, a0 |
| ; RV32-NEXT: add a1, a1, a3 |
| ; RV32-NEXT: add a0, a1, a0 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: test_trunc_v256i1: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma |
| ; RV64-NEXT: vslidedown.vi v9, v0, 1 |
| ; RV64-NEXT: vmv.x.s a0, v0 |
| ; RV64-NEXT: vslidedown.vi v10, v8, 1 |
| ; RV64-NEXT: vmv.x.s a1, v8 |
| ; RV64-NEXT: vmv.x.s a2, v9 |
| ; RV64-NEXT: vmv.x.s a3, v10 |
| ; RV64-NEXT: cpop a1, a1 |
| ; RV64-NEXT: cpop a0, a0 |
| ; RV64-NEXT: cpop a3, a3 |
| ; RV64-NEXT: cpop a2, a2 |
| ; RV64-NEXT: add a1, a1, a3 |
| ; RV64-NEXT: add a0, a0, a2 |
| ; RV64-NEXT: add a0, a0, a1 |
| ; RV64-NEXT: ret |
| entry: |
| %a = bitcast <256 x i1> %x to i256 |
| %b = call i256 @llvm.ctpop.i256(i256 %a) |
| %c = trunc i256 %b to i32 |
| ret i32 %c |
| } |