| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zbb | FileCheck %s --check-prefixes=CHECK,V |
| ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zbb | FileCheck %s --check-prefixes=CHECK,V |
| ; RUN: llc < %s -mtriple=riscv64 -mattr=+zve32x,+zvl128b,+zbb | FileCheck %s --check-prefixes=CHECK,ZVE |
| |
| define i32 @test_v2i1(<2 x i1> %x) { |
| ; V-LABEL: test_v2i1: |
| ; V: # %bb.0: |
| ; V-NEXT: vsetivli zero, 2, e8, mf8, ta, ma |
| ; V-NEXT: vcpop.m a0, v0 |
| ; V-NEXT: ret |
| ; |
| ; ZVE-LABEL: test_v2i1: |
| ; ZVE: # %bb.0: |
| ; ZVE-NEXT: vsetivli zero, 2, e8, mf4, ta, ma |
| ; ZVE-NEXT: vcpop.m a0, v0 |
| ; ZVE-NEXT: ret |
| %a = zext <2 x i1> %x to <2 x i32> |
| %b = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %a) |
| ret i32 %b |
| } |
| |
| define i32 @test_v4i1(<4 x i1> %x) { |
| ; CHECK-LABEL: test_v4i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| %a = zext <4 x i1> %x to <4 x i32> |
| %b = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a) |
| ret i32 %b |
| } |
| |
| define i32 @test_v8i1(<8 x i1> %x) { |
| ; CHECK-LABEL: test_v8i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| %a = zext <8 x i1> %x to <8 x i32> |
| %b = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %a) |
| ret i32 %b |
| } |
| |
| define i32 @test_v16i1(<16 x i1> %x) { |
| ; CHECK-LABEL: test_v16i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| %a = zext <16 x i1> %x to <16 x i32> |
| %b = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %a) |
| ret i32 %b |
| } |
| |
| define i32 @test_v32i1(<32 x i1> %x) { |
| ; CHECK-LABEL: test_v32i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a0, 32 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| %a = zext <32 x i1> %x to <32 x i32> |
| %b = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %a) |
| ret i32 %b |
| } |
| |
| define i32 @test_v64i1(<64 x i1> %x) { |
| ; CHECK-LABEL: test_v64i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a0, 64 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| %a = zext <64 x i1> %x to <64 x i32> |
| %b = call i32 @llvm.vector.reduce.add.v64i32(<64 x i32> %a) |
| ret i32 %b |
| } |
| |
| define i32 @test_v128i1(<128 x i1> %x) { |
| ; CHECK-LABEL: test_v128i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a0, 128 |
| ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| %a = zext <128 x i1> %x to <128 x i32> |
| %b = call i32 @llvm.vector.reduce.add.v128i32(<128 x i32> %a) |
| ret i32 %b |
| } |
| |
| define i32 @test_v256i1(<256 x i1> %x) { |
| ; CHECK-LABEL: test_v256i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi sp, sp, -16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 4 |
| ; CHECK-NEXT: sub sp, sp, a0 |
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb |
| ; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma |
| ; CHECK-NEXT: vmv1r.v v7, v8 |
| ; CHECK-NEXT: vmv1r.v v6, v0 |
| ; CHECK-NEXT: li a0, 32 |
| ; CHECK-NEXT: vslidedown.vi v5, v8, 8 |
| ; CHECK-NEXT: vslidedown.vi v4, v0, 8 |
| ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v0, v5, 4 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma |
| ; CHECK-NEXT: vmv.v.i v16, 0 |
| ; CHECK-NEXT: vmerge.vim v8, v16, 1, v0 |
| ; CHECK-NEXT: csrr a1, vlenb |
| ; CHECK-NEXT: slli a1, a1, 3 |
| ; CHECK-NEXT: add a1, sp, a1 |
| ; CHECK-NEXT: addi a1, a1, 16 |
| ; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill |
| ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v0, v4, 4 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma |
| ; CHECK-NEXT: vmerge.vim v8, v16, 1, v0 |
| ; CHECK-NEXT: addi a1, sp, 16 |
| ; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill |
| ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v0, v7, 4 |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma |
| ; CHECK-NEXT: vmerge.vim v8, v16, 1, v0 |
| ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslidedown.vi v0, v6, 4 |
| ; CHECK-NEXT: csrr a1, vlenb |
| ; CHECK-NEXT: slli a1, a1, 3 |
| ; CHECK-NEXT: add a1, sp, a1 |
| ; CHECK-NEXT: addi a1, a1, 16 |
| ; CHECK-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload |
| ; CHECK-NEXT: addi a1, sp, 16 |
| ; CHECK-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload |
| ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma |
| ; CHECK-NEXT: vadd.vv v16, v16, v24 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 3 |
| ; CHECK-NEXT: add a0, sp, a0 |
| ; CHECK-NEXT: addi a0, a0, 16 |
| ; CHECK-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill |
| ; CHECK-NEXT: vmv.v.i v16, 0 |
| ; CHECK-NEXT: vmerge.vim v24, v16, 1, v0 |
| ; CHECK-NEXT: vadd.vv v8, v24, v8 |
| ; CHECK-NEXT: addi a0, sp, 16 |
| ; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill |
| ; CHECK-NEXT: vmv1r.v v0, v5 |
| ; CHECK-NEXT: vmerge.vim v8, v16, 1, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v4 |
| ; CHECK-NEXT: vmerge.vim v24, v16, 1, v0 |
| ; CHECK-NEXT: vadd.vv v8, v24, v8 |
| ; CHECK-NEXT: vmv1r.v v0, v7 |
| ; CHECK-NEXT: vmerge.vim v24, v16, 1, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v6 |
| ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 |
| ; CHECK-NEXT: vadd.vv v16, v16, v24 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 3 |
| ; CHECK-NEXT: add a0, sp, a0 |
| ; CHECK-NEXT: addi a0, a0, 16 |
| ; CHECK-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload |
| ; CHECK-NEXT: addi a0, sp, 16 |
| ; CHECK-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload |
| ; CHECK-NEXT: vadd.vv v24, v0, v24 |
| ; CHECK-NEXT: vadd.vv v8, v16, v8 |
| ; CHECK-NEXT: vadd.vv v8, v8, v24 |
| ; CHECK-NEXT: vmv.s.x v16, zero |
| ; CHECK-NEXT: vredsum.vs v8, v8, v16 |
| ; CHECK-NEXT: vmv.x.s a0, v8 |
| ; CHECK-NEXT: csrr a1, vlenb |
| ; CHECK-NEXT: slli a1, a1, 4 |
| ; CHECK-NEXT: add sp, sp, a1 |
| ; CHECK-NEXT: .cfi_def_cfa sp, 16 |
| ; CHECK-NEXT: addi sp, sp, 16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 0 |
| ; CHECK-NEXT: ret |
| %a = zext <256 x i1> %x to <256 x i32> |
| %b = call i32 @llvm.vector.reduce.add.v256i32(<256 x i32> %a) |
| ret i32 %b |
| } |
| |
| ; FIXME: Optimize this case with Zve32x. We have to use mf4 and set the VL to |
| ; VLEN/64. |
| define i32 @test_nxv1i1(<vscale x 1 x i1> %x) { |
| ; V-LABEL: test_nxv1i1: |
| ; V: # %bb.0: # %entry |
| ; V-NEXT: vsetvli a0, zero, e8, mf8, ta, ma |
| ; V-NEXT: vcpop.m a0, v0 |
| ; V-NEXT: ret |
| ; |
| ; ZVE-LABEL: test_nxv1i1: |
| ; ZVE: # %bb.0: # %entry |
| ; ZVE-NEXT: vsetvli a0, zero, e32, m1, ta, ma |
| ; ZVE-NEXT: vmv.v.i v8, 0 |
| ; ZVE-NEXT: csrr a0, vlenb |
| ; ZVE-NEXT: srli a0, a0, 3 |
| ; ZVE-NEXT: vsetvli zero, a0, e32, m1, ta, ma |
| ; ZVE-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; ZVE-NEXT: vsetivli zero, 1, e32, m1, ta, ma |
| ; ZVE-NEXT: vmv.s.x v9, zero |
| ; ZVE-NEXT: vsetvli zero, a0, e32, m1, ta, ma |
| ; ZVE-NEXT: vredsum.vs v9, v8, v9 |
| ; ZVE-NEXT: vmv.x.s a0, v9 |
| ; ZVE-NEXT: ret |
| entry: |
| %a = zext <vscale x 1 x i1> %x to <vscale x 1 x i32> |
| %b = call i32 @llvm.vector.reduce.add.nxv1i32(<vscale x 1 x i32> %a) |
| ret i32 %b |
| } |
| |
| define i32 @test_nxv2i1(<vscale x 2 x i1> %x) { |
| ; CHECK-LABEL: test_nxv2i1: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = zext <vscale x 2 x i1> %x to <vscale x 2 x i32> |
| %b = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> %a) |
| ret i32 %b |
| } |
| |
| define i32 @test_nxv4i1(<vscale x 4 x i1> %x) { |
| ; CHECK-LABEL: test_nxv4i1: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = zext <vscale x 4 x i1> %x to <vscale x 4 x i32> |
| %b = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %a) |
| ret i32 %b |
| } |
| |
| define i32 @test_nxv8i1(<vscale x 8 x i1> %x) { |
| ; CHECK-LABEL: test_nxv8i1: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = zext <vscale x 8 x i1> %x to <vscale x 8 x i32> |
| %b = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> %a) |
| ret i32 %b |
| } |
| |
| define i32 @test_nxv16i1(<vscale x 16 x i1> %x) { |
| ; CHECK-LABEL: test_nxv16i1: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = zext <vscale x 16 x i1> %x to <vscale x 16 x i32> |
| %b = call i32 @llvm.vector.reduce.add.nxv16i32(<vscale x 16 x i32> %a) |
| ret i32 %b |
| } |
| |
| define i32 @test_nxv32i1(<vscale x 32 x i1> %x) { |
| ; CHECK-LABEL: test_nxv32i1: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = zext <vscale x 32 x i1> %x to <vscale x 32 x i32> |
| %b = call i32 @llvm.vector.reduce.add.nxv32i32(<vscale x 32 x i32> %a) |
| ret i32 %b |
| } |
| |
| define i32 @test_nxv64i1(<vscale x 64 x i1> %x) { |
| ; CHECK-LABEL: test_nxv64i1: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = zext <vscale x 64 x i1> %x to <vscale x 64 x i32> |
| %b = call i32 @llvm.vector.reduce.add.nxv64i32(<vscale x 64 x i32> %a) |
| ret i32 %b |
| } |
| |
| define i32 @test_nxv128i1(<vscale x 128 x i1> %x) { |
| ; CHECK-LABEL: test_nxv128i1: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addi sp, sp, -16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 3 |
| ; CHECK-NEXT: sub sp, sp, a0 |
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma |
| ; CHECK-NEXT: vmv1r.v v7, v8 |
| ; CHECK-NEXT: vmv1r.v v6, v0 |
| ; CHECK-NEXT: vmv.v.i v16, 0 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: vmerge.vim v8, v16, 1, v0 |
| ; CHECK-NEXT: addi a1, sp, 16 |
| ; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill |
| ; CHECK-NEXT: srli a1, a0, 1 |
| ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v0, v0, a1 |
| ; CHECK-NEXT: srli a0, a0, 2 |
| ; CHECK-NEXT: vmv8r.v v8, v16 |
| ; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma |
| ; CHECK-NEXT: vmerge.vim v24, v16, 1, v0 |
| ; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v0, v0, a0 |
| ; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma |
| ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 |
| ; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v0, v6, a0 |
| ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v6, v7, a1 |
| ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma |
| ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v0, v7, a0 |
| ; CHECK-NEXT: vslidedown.vx v5, v6, a0 |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu |
| ; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v5 |
| ; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t |
| ; CHECK-NEXT: vadd.vv v8, v8, v16 |
| ; CHECK-NEXT: vmv1r.v v0, v6 |
| ; CHECK-NEXT: vadd.vi v24, v24, 1, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v7 |
| ; CHECK-NEXT: addi a0, sp, 16 |
| ; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload |
| ; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t |
| ; CHECK-NEXT: vadd.vv v16, v16, v24 |
| ; CHECK-NEXT: vadd.vv v8, v16, v8 |
| ; CHECK-NEXT: vmv.s.x v16, zero |
| ; CHECK-NEXT: vredsum.vs v8, v8, v16 |
| ; CHECK-NEXT: vmv.x.s a0, v8 |
| ; CHECK-NEXT: csrr a1, vlenb |
| ; CHECK-NEXT: slli a1, a1, 3 |
| ; CHECK-NEXT: add sp, sp, a1 |
| ; CHECK-NEXT: .cfi_def_cfa sp, 16 |
| ; CHECK-NEXT: addi sp, sp, 16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = zext <vscale x 128 x i1> %x to <vscale x 128 x i32> |
| %b = call i32 @llvm.vector.reduce.add.nxv128i32(<vscale x 128 x i32> %a) |
| ret i32 %b |
| } |
| |
| define i32 @test_nxv256i1(<vscale x 256 x i1> %x) { |
| ; CHECK-LABEL: test_nxv256i1: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addi sp, sp, -16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 4 |
| ; CHECK-NEXT: mv a1, a0 |
| ; CHECK-NEXT: slli a0, a0, 1 |
| ; CHECK-NEXT: add a0, a0, a1 |
| ; CHECK-NEXT: sub sp, sp, a0 |
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma |
| ; CHECK-NEXT: vmv1r.v v6, v10 |
| ; CHECK-NEXT: vmv1r.v v7, v9 |
| ; CHECK-NEXT: vmv1r.v v5, v8 |
| ; CHECK-NEXT: vmv1r.v v4, v0 |
| ; CHECK-NEXT: vmv.v.i v16, 0 |
| ; CHECK-NEXT: csrr a1, vlenb |
| ; CHECK-NEXT: vmerge.vim v8, v16, 1, v0 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 3 |
| ; CHECK-NEXT: mv a2, a0 |
| ; CHECK-NEXT: slli a0, a0, 2 |
| ; CHECK-NEXT: add a0, a0, a2 |
| ; CHECK-NEXT: add a0, sp, a0 |
| ; CHECK-NEXT: addi a0, a0, 16 |
| ; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill |
| ; CHECK-NEXT: vmv1r.v v0, v5 |
| ; CHECK-NEXT: vmerge.vim v8, v16, 1, v0 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 5 |
| ; CHECK-NEXT: add a0, sp, a0 |
| ; CHECK-NEXT: addi a0, a0, 16 |
| ; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill |
| ; CHECK-NEXT: srli a0, a1, 1 |
| ; CHECK-NEXT: srli a1, a1, 2 |
| ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v3, v4, a0 |
| ; CHECK-NEXT: vslidedown.vx v2, v5, a0 |
| ; CHECK-NEXT: vmv.v.v v0, v3 |
| ; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma |
| ; CHECK-NEXT: vmerge.vim v8, v16, 1, v0 |
| ; CHECK-NEXT: csrr a2, vlenb |
| ; CHECK-NEXT: slli a2, a2, 3 |
| ; CHECK-NEXT: mv a3, a2 |
| ; CHECK-NEXT: slli a2, a2, 1 |
| ; CHECK-NEXT: add a2, a2, a3 |
| ; CHECK-NEXT: add a2, sp, a2 |
| ; CHECK-NEXT: addi a2, a2, 16 |
| ; CHECK-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill |
| ; CHECK-NEXT: vmv1r.v v0, v2 |
| ; CHECK-NEXT: vmv8r.v v8, v16 |
| ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 |
| ; CHECK-NEXT: csrr a2, vlenb |
| ; CHECK-NEXT: slli a2, a2, 4 |
| ; CHECK-NEXT: add a2, sp, a2 |
| ; CHECK-NEXT: addi a2, a2, 16 |
| ; CHECK-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill |
| ; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v0, v3, a1 |
| ; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma |
| ; CHECK-NEXT: vmerge.vim v16, v8, 1, v0 |
| ; CHECK-NEXT: csrr a2, vlenb |
| ; CHECK-NEXT: slli a2, a2, 3 |
| ; CHECK-NEXT: add a2, sp, a2 |
| ; CHECK-NEXT: addi a2, a2, 16 |
| ; CHECK-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill |
| ; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v0, v2, a1 |
| ; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma |
| ; CHECK-NEXT: vmerge.vim v24, v8, 1, v0 |
| ; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v0, v4, a1 |
| ; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma |
| ; CHECK-NEXT: vmerge.vim v16, v8, 1, v0 |
| ; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v0, v5, a1 |
| ; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma |
| ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v0, v6, a1 |
| ; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, mu |
| ; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t |
| ; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v0, v7, a1 |
| ; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, mu |
| ; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t |
| ; CHECK-NEXT: vadd.vv v8, v16, v8 |
| ; CHECK-NEXT: addi a2, sp, 16 |
| ; CHECK-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill |
| ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v5, v7, a0 |
| ; CHECK-NEXT: vslidedown.vx v4, v6, a0 |
| ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v0, v4, a1 |
| ; CHECK-NEXT: vslidedown.vx v3, v5, a1 |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu |
| ; CHECK-NEXT: vadd.vi v24, v24, 1, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v3 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 3 |
| ; CHECK-NEXT: add a0, sp, a0 |
| ; CHECK-NEXT: addi a0, a0, 16 |
| ; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload |
| ; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t |
| ; CHECK-NEXT: vadd.vv v8, v8, v24 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 3 |
| ; CHECK-NEXT: add a0, sp, a0 |
| ; CHECK-NEXT: addi a0, a0, 16 |
| ; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill |
| ; CHECK-NEXT: vmv1r.v v0, v4 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 4 |
| ; CHECK-NEXT: add a0, sp, a0 |
| ; CHECK-NEXT: addi a0, a0, 16 |
| ; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload |
| ; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v5 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 3 |
| ; CHECK-NEXT: mv a1, a0 |
| ; CHECK-NEXT: slli a0, a0, 1 |
| ; CHECK-NEXT: add a0, a0, a1 |
| ; CHECK-NEXT: add a0, sp, a0 |
| ; CHECK-NEXT: addi a0, a0, 16 |
| ; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload |
| ; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t |
| ; CHECK-NEXT: vadd.vv v16, v8, v16 |
| ; CHECK-NEXT: vmv1r.v v0, v6 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 5 |
| ; CHECK-NEXT: add a0, sp, a0 |
| ; CHECK-NEXT: addi a0, a0, 16 |
| ; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload |
| ; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t |
| ; CHECK-NEXT: vmv1r.v v0, v7 |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 3 |
| ; CHECK-NEXT: mv a1, a0 |
| ; CHECK-NEXT: slli a0, a0, 2 |
| ; CHECK-NEXT: add a0, a0, a1 |
| ; CHECK-NEXT: add a0, sp, a0 |
| ; CHECK-NEXT: addi a0, a0, 16 |
| ; CHECK-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload |
| ; CHECK-NEXT: vadd.vi v24, v24, 1, v0.t |
| ; CHECK-NEXT: vadd.vv v24, v24, v8 |
| ; CHECK-NEXT: addi a0, sp, 16 |
| ; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: slli a0, a0, 3 |
| ; CHECK-NEXT: add a0, sp, a0 |
| ; CHECK-NEXT: addi a0, a0, 16 |
| ; CHECK-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload |
| ; CHECK-NEXT: vadd.vv v8, v8, v0 |
| ; CHECK-NEXT: vadd.vv v16, v24, v16 |
| ; CHECK-NEXT: vadd.vv v8, v16, v8 |
| ; CHECK-NEXT: vmv.s.x v16, zero |
| ; CHECK-NEXT: vredsum.vs v8, v8, v16 |
| ; CHECK-NEXT: vmv.x.s a0, v8 |
| ; CHECK-NEXT: csrr a1, vlenb |
| ; CHECK-NEXT: slli a1, a1, 4 |
| ; CHECK-NEXT: mv a2, a1 |
| ; CHECK-NEXT: slli a1, a1, 1 |
| ; CHECK-NEXT: add a1, a1, a2 |
| ; CHECK-NEXT: add sp, sp, a1 |
| ; CHECK-NEXT: .cfi_def_cfa sp, 16 |
| ; CHECK-NEXT: addi sp, sp, 16 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = zext <vscale x 256 x i1> %x to <vscale x 256 x i32> |
| %b = call i32 @llvm.vector.reduce.add.nxv256i32(<vscale x 256 x i32> %a) |
| ret i32 %b |
| } |
| |
| define i4 @test_narrow_v16i1(<16 x i1> %x) { |
| ; CHECK-LABEL: test_narrow_v16i1: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
| ; CHECK-NEXT: vcpop.m a0, v0 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = zext <16 x i1> %x to <16 x i4> |
| %b = call i4 @llvm.vector.reduce.add.v16i4(<16 x i4> %a) |
| ret i4 %b |
| } |
| |
| define i16 @test_narrow_nxv64i1(<vscale x 64 x i1> %x) { |
| ; CHECK-LABEL: test_narrow_nxv64i1: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: csrr a0, vlenb |
| ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma |
| ; CHECK-NEXT: vmv.v.i v16, 0 |
| ; CHECK-NEXT: srli a0, a0, 1 |
| ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma |
| ; CHECK-NEXT: vslidedown.vx v8, v0, a0 |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu |
| ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 |
| ; CHECK-NEXT: vmv1r.v v0, v8 |
| ; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t |
| ; CHECK-NEXT: vmv.s.x v8, zero |
| ; CHECK-NEXT: vredsum.vs v8, v16, v8 |
| ; CHECK-NEXT: vmv.x.s a0, v8 |
| ; CHECK-NEXT: ret |
| entry: |
| %a = zext <vscale x 64 x i1> %x to <vscale x 64 x i16> |
| %b = call i16 @llvm.vector.reduce.add.nxv64i16(<vscale x 64 x i16> %a) |
| ret i16 %b |
| } |