blob: 5ef6b291e3095173ceb519536c17d2752e14d4ca [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define i32 @reduce_sum_2xi32(<2 x i32> %v) {
; RV32-LABEL: reduce_sum_2xi32:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vmv.x.s a1, v8
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_2xi32:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vslidedown.vi v8, v8, 1
; RV64-NEXT: vmv.x.s a1, v8
; RV64-NEXT: addw a0, a0, a1
; RV64-NEXT: ret
%e0 = extractelement <2 x i32> %v, i32 0
%e1 = extractelement <2 x i32> %v, i32 1
%add0 = add i32 %e0, %e1
ret i32 %add0
}
define i32 @reduce_sum_4xi32(<4 x i32> %v) {
; RV32-LABEL: reduce_sum_4xi32:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vslidedown.vi v9, v8, 1
; RV32-NEXT: vmv.x.s a1, v9
; RV32-NEXT: vslidedown.vi v9, v8, 2
; RV32-NEXT: vmv.x.s a2, v9
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vmv.x.s a3, v8
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a2, a2, a3
; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_4xi32:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vslidedown.vi v9, v8, 1
; RV64-NEXT: vmv.x.s a1, v9
; RV64-NEXT: vslidedown.vi v9, v8, 2
; RV64-NEXT: vmv.x.s a2, v9
; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vmv.x.s a3, v8
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a2, a2, a3
; RV64-NEXT: addw a0, a0, a2
; RV64-NEXT: ret
%e0 = extractelement <4 x i32> %v, i32 0
%e1 = extractelement <4 x i32> %v, i32 1
%e2 = extractelement <4 x i32> %v, i32 2
%e3 = extractelement <4 x i32> %v, i32 3
%add0 = add i32 %e0, %e1
%add1 = add i32 %add0, %e2
%add2 = add i32 %add1, %e3
ret i32 %add2
}
define i32 @reduce_sum_8xi32(<8 x i32> %v) {
; RV32-LABEL: reduce_sum_8xi32:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vslidedown.vi v10, v8, 1
; RV32-NEXT: vmv.x.s a1, v10
; RV32-NEXT: vslidedown.vi v10, v8, 2
; RV32-NEXT: vmv.x.s a2, v10
; RV32-NEXT: vslidedown.vi v10, v8, 3
; RV32-NEXT: vmv.x.s a3, v10
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 4
; RV32-NEXT: vmv.x.s a4, v10
; RV32-NEXT: vslidedown.vi v10, v8, 5
; RV32-NEXT: vmv.x.s a5, v10
; RV32-NEXT: vslidedown.vi v10, v8, 6
; RV32-NEXT: vmv.x.s a6, v10
; RV32-NEXT: vslidedown.vi v8, v8, 7
; RV32-NEXT: vmv.x.s a7, v8
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a2, a2, a3
; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: add a4, a4, a5
; RV32-NEXT: add a4, a4, a6
; RV32-NEXT: add a0, a0, a4
; RV32-NEXT: add a0, a0, a7
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_8xi32:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vslidedown.vi v10, v8, 1
; RV64-NEXT: vmv.x.s a1, v10
; RV64-NEXT: vslidedown.vi v10, v8, 2
; RV64-NEXT: vmv.x.s a2, v10
; RV64-NEXT: vslidedown.vi v10, v8, 3
; RV64-NEXT: vmv.x.s a3, v10
; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 4
; RV64-NEXT: vmv.x.s a4, v10
; RV64-NEXT: vslidedown.vi v10, v8, 5
; RV64-NEXT: vmv.x.s a5, v10
; RV64-NEXT: vslidedown.vi v10, v8, 6
; RV64-NEXT: vmv.x.s a6, v10
; RV64-NEXT: vslidedown.vi v8, v8, 7
; RV64-NEXT: vmv.x.s a7, v8
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a2, a2, a3
; RV64-NEXT: add a0, a0, a2
; RV64-NEXT: add a4, a4, a5
; RV64-NEXT: add a4, a4, a6
; RV64-NEXT: add a0, a0, a4
; RV64-NEXT: addw a0, a0, a7
; RV64-NEXT: ret
%e0 = extractelement <8 x i32> %v, i32 0
%e1 = extractelement <8 x i32> %v, i32 1
%e2 = extractelement <8 x i32> %v, i32 2
%e3 = extractelement <8 x i32> %v, i32 3
%e4 = extractelement <8 x i32> %v, i32 4
%e5 = extractelement <8 x i32> %v, i32 5
%e6 = extractelement <8 x i32> %v, i32 6
%e7 = extractelement <8 x i32> %v, i32 7
%add0 = add i32 %e0, %e1
%add1 = add i32 %add0, %e2
%add2 = add i32 %add1, %e3
%add3 = add i32 %add2, %e4
%add4 = add i32 %add3, %e5
%add5 = add i32 %add4, %e6
%add6 = add i32 %add5, %e7
ret i32 %add6
}
define i32 @reduce_sum_16xi32(<16 x i32> %v) {
; RV32-LABEL: reduce_sum_16xi32:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -128
; RV32-NEXT: .cfi_def_cfa_offset 128
; RV32-NEXT: sw ra, 124(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 120(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s2, 116(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: .cfi_offset s2, -12
; RV32-NEXT: addi s0, sp, 128
; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: andi sp, sp, -64
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vslidedown.vi v12, v8, 1
; RV32-NEXT: vmv.x.s a1, v12
; RV32-NEXT: vslidedown.vi v12, v8, 2
; RV32-NEXT: vmv.x.s a2, v12
; RV32-NEXT: vslidedown.vi v12, v8, 3
; RV32-NEXT: vmv.x.s a3, v12
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 4
; RV32-NEXT: vmv.x.s a4, v12
; RV32-NEXT: vslidedown.vi v12, v8, 5
; RV32-NEXT: vmv.x.s a5, v12
; RV32-NEXT: vslidedown.vi v12, v8, 6
; RV32-NEXT: vmv.x.s a6, v12
; RV32-NEXT: vslidedown.vi v12, v8, 7
; RV32-NEXT: vmv.x.s a7, v12
; RV32-NEXT: mv t0, sp
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vse32.v v8, (t0)
; RV32-NEXT: lw t0, 32(sp)
; RV32-NEXT: lw t1, 36(sp)
; RV32-NEXT: lw t2, 40(sp)
; RV32-NEXT: lw t3, 44(sp)
; RV32-NEXT: lw t4, 48(sp)
; RV32-NEXT: lw t5, 52(sp)
; RV32-NEXT: lw t6, 56(sp)
; RV32-NEXT: lw s2, 60(sp)
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a2, a2, a3
; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: add a4, a4, a5
; RV32-NEXT: add a4, a4, a6
; RV32-NEXT: add a0, a0, a4
; RV32-NEXT: add a7, a7, t0
; RV32-NEXT: add a0, a0, a7
; RV32-NEXT: add t1, t1, t2
; RV32-NEXT: add t1, t1, t3
; RV32-NEXT: add a0, a0, t1
; RV32-NEXT: add t4, t4, t5
; RV32-NEXT: add t4, t4, t6
; RV32-NEXT: add t4, t4, s2
; RV32-NEXT: add a0, a0, t4
; RV32-NEXT: addi sp, s0, -128
; RV32-NEXT: lw ra, 124(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 120(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s2, 116(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 128
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_16xi32:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -128
; RV64-NEXT: .cfi_def_cfa_offset 128
; RV64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s2, 104(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: .cfi_offset s2, -24
; RV64-NEXT: addi s0, sp, 128
; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: andi sp, sp, -64
; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vslidedown.vi v12, v8, 1
; RV64-NEXT: vmv.x.s a1, v12
; RV64-NEXT: vslidedown.vi v12, v8, 2
; RV64-NEXT: vmv.x.s a2, v12
; RV64-NEXT: vslidedown.vi v12, v8, 3
; RV64-NEXT: vmv.x.s a3, v12
; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-NEXT: vslidedown.vi v12, v8, 4
; RV64-NEXT: vmv.x.s a4, v12
; RV64-NEXT: vslidedown.vi v12, v8, 5
; RV64-NEXT: vmv.x.s a5, v12
; RV64-NEXT: vslidedown.vi v12, v8, 6
; RV64-NEXT: vmv.x.s a6, v12
; RV64-NEXT: vslidedown.vi v12, v8, 7
; RV64-NEXT: vmv.x.s a7, v12
; RV64-NEXT: mv t0, sp
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vse32.v v8, (t0)
; RV64-NEXT: lw t0, 32(sp)
; RV64-NEXT: lw t1, 36(sp)
; RV64-NEXT: lw t2, 40(sp)
; RV64-NEXT: lw t3, 44(sp)
; RV64-NEXT: lw t4, 48(sp)
; RV64-NEXT: lw t5, 52(sp)
; RV64-NEXT: lw t6, 56(sp)
; RV64-NEXT: lw s2, 60(sp)
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a2, a2, a3
; RV64-NEXT: add a0, a0, a2
; RV64-NEXT: add a4, a4, a5
; RV64-NEXT: add a4, a4, a6
; RV64-NEXT: add a0, a0, a4
; RV64-NEXT: add a7, a7, t0
; RV64-NEXT: add a0, a0, a7
; RV64-NEXT: add t1, t1, t2
; RV64-NEXT: add t1, t1, t3
; RV64-NEXT: add a0, a0, t1
; RV64-NEXT: add t4, t4, t5
; RV64-NEXT: add t4, t4, t6
; RV64-NEXT: add t4, t4, s2
; RV64-NEXT: addw a0, a0, t4
; RV64-NEXT: addi sp, s0, -128
; RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s2, 104(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 128
; RV64-NEXT: ret
%e0 = extractelement <16 x i32> %v, i32 0
%e1 = extractelement <16 x i32> %v, i32 1
%e2 = extractelement <16 x i32> %v, i32 2
%e3 = extractelement <16 x i32> %v, i32 3
%e4 = extractelement <16 x i32> %v, i32 4
%e5 = extractelement <16 x i32> %v, i32 5
%e6 = extractelement <16 x i32> %v, i32 6
%e7 = extractelement <16 x i32> %v, i32 7
%e8 = extractelement <16 x i32> %v, i32 8
%e9 = extractelement <16 x i32> %v, i32 9
%e10 = extractelement <16 x i32> %v, i32 10
%e11 = extractelement <16 x i32> %v, i32 11
%e12 = extractelement <16 x i32> %v, i32 12
%e13 = extractelement <16 x i32> %v, i32 13
%e14 = extractelement <16 x i32> %v, i32 14
%e15 = extractelement <16 x i32> %v, i32 15
%add0 = add i32 %e0, %e1
%add1 = add i32 %add0, %e2
%add2 = add i32 %add1, %e3
%add3 = add i32 %add2, %e4
%add4 = add i32 %add3, %e5
%add5 = add i32 %add4, %e6
%add6 = add i32 %add5, %e7
%add7 = add i32 %add6, %e8
%add8 = add i32 %add7, %e9
%add9 = add i32 %add8, %e10
%add10 = add i32 %add9, %e11
%add11 = add i32 %add10, %e12
%add12 = add i32 %add11, %e13
%add13 = add i32 %add12, %e14
%add14 = add i32 %add13, %e15
ret i32 %add14
}
define i32 @reduce_sum_16xi32_prefix2(ptr %p) {
; RV32-LABEL: reduce_sum_16xi32_prefix2:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vmv.x.s a1, v8
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_16xi32_prefix2:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 1
; RV64-NEXT: vmv.x.s a1, v8
; RV64-NEXT: addw a0, a0, a1
; RV64-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
%e0 = extractelement <16 x i32> %v, i32 0
%e1 = extractelement <16 x i32> %v, i32 1
%add0 = add i32 %e0, %e1
ret i32 %add0
}
define i32 @reduce_sum_16xi32_prefix3(ptr %p) {
; RV32-LABEL: reduce_sum_16xi32_prefix3:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v9, v8, 1
; RV32-NEXT: vmv.x.s a1, v9
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vmv.x.s a2, v8
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_16xi32_prefix3:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v9, v8, 1
; RV64-NEXT: vmv.x.s a1, v9
; RV64-NEXT: vslidedown.vi v8, v8, 2
; RV64-NEXT: vmv.x.s a2, v8
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: addw a0, a0, a2
; RV64-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
%e0 = extractelement <16 x i32> %v, i32 0
%e1 = extractelement <16 x i32> %v, i32 1
%e2 = extractelement <16 x i32> %v, i32 2
%add0 = add i32 %e0, %e1
%add1 = add i32 %add0, %e2
ret i32 %add1
}
define i32 @reduce_sum_16xi32_prefix4(ptr %p) {
; RV32-LABEL: reduce_sum_16xi32_prefix4:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v9, v8, 1
; RV32-NEXT: vmv.x.s a1, v9
; RV32-NEXT: vslidedown.vi v9, v8, 2
; RV32-NEXT: vmv.x.s a2, v9
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vmv.x.s a3, v8
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a2, a2, a3
; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_16xi32_prefix4:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v9, v8, 1
; RV64-NEXT: vmv.x.s a1, v9
; RV64-NEXT: vslidedown.vi v9, v8, 2
; RV64-NEXT: vmv.x.s a2, v9
; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vmv.x.s a3, v8
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a2, a2, a3
; RV64-NEXT: addw a0, a0, a2
; RV64-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
%e0 = extractelement <16 x i32> %v, i32 0
%e1 = extractelement <16 x i32> %v, i32 1
%e2 = extractelement <16 x i32> %v, i32 2
%e3 = extractelement <16 x i32> %v, i32 3
%add0 = add i32 %e0, %e1
%add1 = add i32 %add0, %e2
%add2 = add i32 %add1, %e3
ret i32 %add2
}
define i32 @reduce_sum_16xi32_prefix5(ptr %p) {
; RV32-LABEL: reduce_sum_16xi32_prefix5:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 1
; RV32-NEXT: vmv.x.s a1, v10
; RV32-NEXT: vslidedown.vi v10, v8, 2
; RV32-NEXT: vmv.x.s a2, v10
; RV32-NEXT: vslidedown.vi v10, v8, 3
; RV32-NEXT: vmv.x.s a3, v10
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 4
; RV32-NEXT: vmv.x.s a4, v8
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a2, a2, a3
; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: add a0, a0, a4
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_16xi32_prefix5:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 1
; RV64-NEXT: vmv.x.s a1, v10
; RV64-NEXT: vslidedown.vi v10, v8, 2
; RV64-NEXT: vmv.x.s a2, v10
; RV64-NEXT: vslidedown.vi v10, v8, 3
; RV64-NEXT: vmv.x.s a3, v10
; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 4
; RV64-NEXT: vmv.x.s a4, v8
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a2, a2, a3
; RV64-NEXT: add a0, a0, a2
; RV64-NEXT: addw a0, a0, a4
; RV64-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
%e0 = extractelement <16 x i32> %v, i32 0
%e1 = extractelement <16 x i32> %v, i32 1
%e2 = extractelement <16 x i32> %v, i32 2
%e3 = extractelement <16 x i32> %v, i32 3
%e4 = extractelement <16 x i32> %v, i32 4
%add0 = add i32 %e0, %e1
%add1 = add i32 %add0, %e2
%add2 = add i32 %add1, %e3
%add3 = add i32 %add2, %e4
ret i32 %add3
}
define i32 @reduce_sum_16xi32_prefix6(ptr %p) {
; RV32-LABEL: reduce_sum_16xi32_prefix6:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 1
; RV32-NEXT: vmv.x.s a1, v10
; RV32-NEXT: vslidedown.vi v10, v8, 2
; RV32-NEXT: vmv.x.s a2, v10
; RV32-NEXT: vslidedown.vi v10, v8, 3
; RV32-NEXT: vmv.x.s a3, v10
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 4
; RV32-NEXT: vmv.x.s a4, v10
; RV32-NEXT: vslidedown.vi v8, v8, 5
; RV32-NEXT: vmv.x.s a5, v8
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a2, a2, a3
; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: add a4, a4, a5
; RV32-NEXT: add a0, a0, a4
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_16xi32_prefix6:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 1
; RV64-NEXT: vmv.x.s a1, v10
; RV64-NEXT: vslidedown.vi v10, v8, 2
; RV64-NEXT: vmv.x.s a2, v10
; RV64-NEXT: vslidedown.vi v10, v8, 3
; RV64-NEXT: vmv.x.s a3, v10
; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 4
; RV64-NEXT: vmv.x.s a4, v10
; RV64-NEXT: vslidedown.vi v8, v8, 5
; RV64-NEXT: vmv.x.s a5, v8
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a2, a2, a3
; RV64-NEXT: add a0, a0, a2
; RV64-NEXT: add a4, a4, a5
; RV64-NEXT: addw a0, a0, a4
; RV64-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
%e0 = extractelement <16 x i32> %v, i32 0
%e1 = extractelement <16 x i32> %v, i32 1
%e2 = extractelement <16 x i32> %v, i32 2
%e3 = extractelement <16 x i32> %v, i32 3
%e4 = extractelement <16 x i32> %v, i32 4
%e5 = extractelement <16 x i32> %v, i32 5
%add0 = add i32 %e0, %e1
%add1 = add i32 %add0, %e2
%add2 = add i32 %add1, %e3
%add3 = add i32 %add2, %e4
%add4 = add i32 %add3, %e5
ret i32 %add4
}
define i32 @reduce_sum_16xi32_prefix7(ptr %p) {
; RV32-LABEL: reduce_sum_16xi32_prefix7:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 1
; RV32-NEXT: vmv.x.s a1, v10
; RV32-NEXT: vslidedown.vi v10, v8, 2
; RV32-NEXT: vmv.x.s a2, v10
; RV32-NEXT: vslidedown.vi v10, v8, 3
; RV32-NEXT: vmv.x.s a3, v10
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 4
; RV32-NEXT: vmv.x.s a4, v10
; RV32-NEXT: vslidedown.vi v10, v8, 5
; RV32-NEXT: vmv.x.s a5, v10
; RV32-NEXT: vslidedown.vi v8, v8, 6
; RV32-NEXT: vmv.x.s a6, v8
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a2, a2, a3
; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: add a4, a4, a5
; RV32-NEXT: add a4, a4, a6
; RV32-NEXT: add a0, a0, a4
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_16xi32_prefix7:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 1
; RV64-NEXT: vmv.x.s a1, v10
; RV64-NEXT: vslidedown.vi v10, v8, 2
; RV64-NEXT: vmv.x.s a2, v10
; RV64-NEXT: vslidedown.vi v10, v8, 3
; RV64-NEXT: vmv.x.s a3, v10
; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 4
; RV64-NEXT: vmv.x.s a4, v10
; RV64-NEXT: vslidedown.vi v10, v8, 5
; RV64-NEXT: vmv.x.s a5, v10
; RV64-NEXT: vslidedown.vi v8, v8, 6
; RV64-NEXT: vmv.x.s a6, v8
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a2, a2, a3
; RV64-NEXT: add a0, a0, a2
; RV64-NEXT: add a4, a4, a5
; RV64-NEXT: add a4, a4, a6
; RV64-NEXT: addw a0, a0, a4
; RV64-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
%e0 = extractelement <16 x i32> %v, i32 0
%e1 = extractelement <16 x i32> %v, i32 1
%e2 = extractelement <16 x i32> %v, i32 2
%e3 = extractelement <16 x i32> %v, i32 3
%e4 = extractelement <16 x i32> %v, i32 4
%e5 = extractelement <16 x i32> %v, i32 5
%e6 = extractelement <16 x i32> %v, i32 6
%add0 = add i32 %e0, %e1
%add1 = add i32 %add0, %e2
%add2 = add i32 %add1, %e3
%add3 = add i32 %add2, %e4
%add4 = add i32 %add3, %e5
%add5 = add i32 %add4, %e6
ret i32 %add5
}
define i32 @reduce_sum_16xi32_prefix8(ptr %p) {
; RV32-LABEL: reduce_sum_16xi32_prefix8:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 1
; RV32-NEXT: vmv.x.s a1, v10
; RV32-NEXT: vslidedown.vi v10, v8, 2
; RV32-NEXT: vmv.x.s a2, v10
; RV32-NEXT: vslidedown.vi v10, v8, 3
; RV32-NEXT: vmv.x.s a3, v10
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 4
; RV32-NEXT: vmv.x.s a4, v10
; RV32-NEXT: vslidedown.vi v10, v8, 5
; RV32-NEXT: vmv.x.s a5, v10
; RV32-NEXT: vslidedown.vi v10, v8, 6
; RV32-NEXT: vmv.x.s a6, v10
; RV32-NEXT: vslidedown.vi v8, v8, 7
; RV32-NEXT: vmv.x.s a7, v8
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a2, a2, a3
; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: add a4, a4, a5
; RV32-NEXT: add a4, a4, a6
; RV32-NEXT: add a0, a0, a4
; RV32-NEXT: add a0, a0, a7
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_16xi32_prefix8:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 1
; RV64-NEXT: vmv.x.s a1, v10
; RV64-NEXT: vslidedown.vi v10, v8, 2
; RV64-NEXT: vmv.x.s a2, v10
; RV64-NEXT: vslidedown.vi v10, v8, 3
; RV64-NEXT: vmv.x.s a3, v10
; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 4
; RV64-NEXT: vmv.x.s a4, v10
; RV64-NEXT: vslidedown.vi v10, v8, 5
; RV64-NEXT: vmv.x.s a5, v10
; RV64-NEXT: vslidedown.vi v10, v8, 6
; RV64-NEXT: vmv.x.s a6, v10
; RV64-NEXT: vslidedown.vi v8, v8, 7
; RV64-NEXT: vmv.x.s a7, v8
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a2, a2, a3
; RV64-NEXT: add a0, a0, a2
; RV64-NEXT: add a4, a4, a5
; RV64-NEXT: add a4, a4, a6
; RV64-NEXT: add a0, a0, a4
; RV64-NEXT: addw a0, a0, a7
; RV64-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
%e0 = extractelement <16 x i32> %v, i32 0
%e1 = extractelement <16 x i32> %v, i32 1
%e2 = extractelement <16 x i32> %v, i32 2
%e3 = extractelement <16 x i32> %v, i32 3
%e4 = extractelement <16 x i32> %v, i32 4
%e5 = extractelement <16 x i32> %v, i32 5
%e6 = extractelement <16 x i32> %v, i32 6
%e7 = extractelement <16 x i32> %v, i32 7
%add0 = add i32 %e0, %e1
%add1 = add i32 %add0, %e2
%add2 = add i32 %add1, %e3
%add3 = add i32 %add2, %e4
%add4 = add i32 %add3, %e5
%add5 = add i32 %add4, %e6
%add6 = add i32 %add5, %e7
ret i32 %add6
}
define i32 @reduce_sum_16xi32_prefix9(ptr %p) {
; RV32-LABEL: reduce_sum_16xi32_prefix9:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -128
; RV32-NEXT: .cfi_def_cfa_offset 128
; RV32-NEXT: sw ra, 124(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 120(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 128
; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: andi sp, sp, -64
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 1
; RV32-NEXT: vmv.x.s a1, v12
; RV32-NEXT: vslidedown.vi v12, v8, 2
; RV32-NEXT: vmv.x.s a2, v12
; RV32-NEXT: vslidedown.vi v12, v8, 3
; RV32-NEXT: vmv.x.s a3, v12
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 4
; RV32-NEXT: vmv.x.s a4, v12
; RV32-NEXT: vslidedown.vi v12, v8, 5
; RV32-NEXT: vmv.x.s a5, v12
; RV32-NEXT: vslidedown.vi v12, v8, 6
; RV32-NEXT: vmv.x.s a6, v12
; RV32-NEXT: vslidedown.vi v12, v8, 7
; RV32-NEXT: vmv.x.s a7, v12
; RV32-NEXT: mv t0, sp
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vse32.v v8, (t0)
; RV32-NEXT: lw t0, 32(sp)
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a2, a2, a3
; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: add a4, a4, a5
; RV32-NEXT: add a4, a4, a6
; RV32-NEXT: add a0, a0, a4
; RV32-NEXT: add a7, a7, t0
; RV32-NEXT: add a0, a0, a7
; RV32-NEXT: addi sp, s0, -128
; RV32-NEXT: lw ra, 124(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 120(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 128
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_16xi32_prefix9:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -128
; RV64-NEXT: .cfi_def_cfa_offset 128
; RV64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 128
; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: andi sp, sp, -64
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v12, v8, 1
; RV64-NEXT: vmv.x.s a1, v12
; RV64-NEXT: vslidedown.vi v12, v8, 2
; RV64-NEXT: vmv.x.s a2, v12
; RV64-NEXT: vslidedown.vi v12, v8, 3
; RV64-NEXT: vmv.x.s a3, v12
; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-NEXT: vslidedown.vi v12, v8, 4
; RV64-NEXT: vmv.x.s a4, v12
; RV64-NEXT: vslidedown.vi v12, v8, 5
; RV64-NEXT: vmv.x.s a5, v12
; RV64-NEXT: vslidedown.vi v12, v8, 6
; RV64-NEXT: vmv.x.s a6, v12
; RV64-NEXT: vslidedown.vi v12, v8, 7
; RV64-NEXT: vmv.x.s a7, v12
; RV64-NEXT: mv t0, sp
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vse32.v v8, (t0)
; RV64-NEXT: lw t0, 32(sp)
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a2, a2, a3
; RV64-NEXT: add a0, a0, a2
; RV64-NEXT: add a4, a4, a5
; RV64-NEXT: add a4, a4, a6
; RV64-NEXT: add a0, a0, a4
; RV64-NEXT: add a7, a7, t0
; RV64-NEXT: addw a0, a0, a7
; RV64-NEXT: addi sp, s0, -128
; RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 128
; RV64-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
%e0 = extractelement <16 x i32> %v, i32 0
%e1 = extractelement <16 x i32> %v, i32 1
%e2 = extractelement <16 x i32> %v, i32 2
%e3 = extractelement <16 x i32> %v, i32 3
%e4 = extractelement <16 x i32> %v, i32 4
%e5 = extractelement <16 x i32> %v, i32 5
%e6 = extractelement <16 x i32> %v, i32 6
%e7 = extractelement <16 x i32> %v, i32 7
%e8 = extractelement <16 x i32> %v, i32 8
%add0 = add i32 %e0, %e1
%add1 = add i32 %add0, %e2
%add2 = add i32 %add1, %e3
%add3 = add i32 %add2, %e4
%add4 = add i32 %add3, %e5
%add5 = add i32 %add4, %e6
%add6 = add i32 %add5, %e7
%add7 = add i32 %add6, %e8
ret i32 %add7
}
define i32 @reduce_sum_16xi32_prefix13(ptr %p) {
; RV32-LABEL: reduce_sum_16xi32_prefix13:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -128
; RV32-NEXT: .cfi_def_cfa_offset 128
; RV32-NEXT: sw ra, 124(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 120(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 128
; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: andi sp, sp, -64
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 1
; RV32-NEXT: vmv.x.s a1, v12
; RV32-NEXT: vslidedown.vi v12, v8, 2
; RV32-NEXT: vmv.x.s a2, v12
; RV32-NEXT: vslidedown.vi v12, v8, 3
; RV32-NEXT: vmv.x.s a3, v12
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 4
; RV32-NEXT: vmv.x.s a4, v12
; RV32-NEXT: vslidedown.vi v12, v8, 5
; RV32-NEXT: vmv.x.s a5, v12
; RV32-NEXT: vslidedown.vi v12, v8, 6
; RV32-NEXT: vmv.x.s a6, v12
; RV32-NEXT: vslidedown.vi v12, v8, 7
; RV32-NEXT: vmv.x.s a7, v12
; RV32-NEXT: mv t0, sp
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vse32.v v8, (t0)
; RV32-NEXT: lw t0, 32(sp)
; RV32-NEXT: lw t1, 36(sp)
; RV32-NEXT: lw t2, 40(sp)
; RV32-NEXT: lw t3, 44(sp)
; RV32-NEXT: lw t4, 48(sp)
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a2, a2, a3
; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: add a4, a4, a5
; RV32-NEXT: add a4, a4, a6
; RV32-NEXT: add a0, a0, a4
; RV32-NEXT: add a7, a7, t0
; RV32-NEXT: add a7, a7, t1
; RV32-NEXT: add a7, a7, t2
; RV32-NEXT: add a0, a0, a7
; RV32-NEXT: add t3, t3, t4
; RV32-NEXT: add a0, a0, t3
; RV32-NEXT: addi sp, s0, -128
; RV32-NEXT: lw ra, 124(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 120(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 128
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_16xi32_prefix13:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -128
; RV64-NEXT: .cfi_def_cfa_offset 128
; RV64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 128
; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: andi sp, sp, -64
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v12, v8, 1
; RV64-NEXT: vmv.x.s a1, v12
; RV64-NEXT: vslidedown.vi v12, v8, 2
; RV64-NEXT: vmv.x.s a2, v12
; RV64-NEXT: vslidedown.vi v12, v8, 3
; RV64-NEXT: vmv.x.s a3, v12
; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-NEXT: vslidedown.vi v12, v8, 4
; RV64-NEXT: vmv.x.s a4, v12
; RV64-NEXT: vslidedown.vi v12, v8, 5
; RV64-NEXT: vmv.x.s a5, v12
; RV64-NEXT: vslidedown.vi v12, v8, 6
; RV64-NEXT: vmv.x.s a6, v12
; RV64-NEXT: vslidedown.vi v12, v8, 7
; RV64-NEXT: vmv.x.s a7, v12
; RV64-NEXT: mv t0, sp
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vse32.v v8, (t0)
; RV64-NEXT: lw t0, 32(sp)
; RV64-NEXT: lw t1, 36(sp)
; RV64-NEXT: lw t2, 40(sp)
; RV64-NEXT: lw t3, 44(sp)
; RV64-NEXT: lw t4, 48(sp)
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a2, a2, a3
; RV64-NEXT: add a0, a0, a2
; RV64-NEXT: add a4, a4, a5
; RV64-NEXT: add a4, a4, a6
; RV64-NEXT: add a0, a0, a4
; RV64-NEXT: add a7, a7, t0
; RV64-NEXT: add a7, a7, t1
; RV64-NEXT: add a7, a7, t2
; RV64-NEXT: add a0, a0, a7
; RV64-NEXT: add t3, t3, t4
; RV64-NEXT: addw a0, a0, t3
; RV64-NEXT: addi sp, s0, -128
; RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 128
; RV64-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
%e0 = extractelement <16 x i32> %v, i32 0
%e1 = extractelement <16 x i32> %v, i32 1
%e2 = extractelement <16 x i32> %v, i32 2
%e3 = extractelement <16 x i32> %v, i32 3
%e4 = extractelement <16 x i32> %v, i32 4
%e5 = extractelement <16 x i32> %v, i32 5
%e6 = extractelement <16 x i32> %v, i32 6
%e7 = extractelement <16 x i32> %v, i32 7
%e8 = extractelement <16 x i32> %v, i32 8
%e9 = extractelement <16 x i32> %v, i32 9
%e10 = extractelement <16 x i32> %v, i32 10
%e11 = extractelement <16 x i32> %v, i32 11
%e12 = extractelement <16 x i32> %v, i32 12
%add0 = add i32 %e0, %e1
%add1 = add i32 %add0, %e2
%add2 = add i32 %add1, %e3
%add3 = add i32 %add2, %e4
%add4 = add i32 %add3, %e5
%add5 = add i32 %add4, %e6
%add6 = add i32 %add5, %e7
%add7 = add i32 %add6, %e8
%add8 = add i32 %add7, %e9
%add9 = add i32 %add8, %e10
%add10 = add i32 %add9, %e11
%add11 = add i32 %add10, %e12
ret i32 %add11
}
define i32 @reduce_sum_16xi32_prefix14(ptr %p) {
; RV32-LABEL: reduce_sum_16xi32_prefix14:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -128
; RV32-NEXT: .cfi_def_cfa_offset 128
; RV32-NEXT: sw ra, 124(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 120(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 128
; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: andi sp, sp, -64
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 1
; RV32-NEXT: vmv.x.s a1, v12
; RV32-NEXT: vslidedown.vi v12, v8, 2
; RV32-NEXT: vmv.x.s a2, v12
; RV32-NEXT: vslidedown.vi v12, v8, 3
; RV32-NEXT: vmv.x.s a3, v12
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 4
; RV32-NEXT: vmv.x.s a4, v12
; RV32-NEXT: vslidedown.vi v12, v8, 5
; RV32-NEXT: vmv.x.s a5, v12
; RV32-NEXT: vslidedown.vi v12, v8, 6
; RV32-NEXT: vmv.x.s a6, v12
; RV32-NEXT: vslidedown.vi v12, v8, 7
; RV32-NEXT: vmv.x.s a7, v12
; RV32-NEXT: mv t0, sp
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vse32.v v8, (t0)
; RV32-NEXT: lw t0, 32(sp)
; RV32-NEXT: lw t1, 36(sp)
; RV32-NEXT: lw t2, 40(sp)
; RV32-NEXT: lw t3, 44(sp)
; RV32-NEXT: lw t4, 48(sp)
; RV32-NEXT: lw t5, 52(sp)
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a2, a2, a3
; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: add a4, a4, a5
; RV32-NEXT: add a4, a4, a6
; RV32-NEXT: add a0, a0, a4
; RV32-NEXT: add a7, a7, t0
; RV32-NEXT: add a7, a7, t1
; RV32-NEXT: add a7, a7, t2
; RV32-NEXT: add a0, a0, a7
; RV32-NEXT: add t3, t3, t4
; RV32-NEXT: add t3, t3, t5
; RV32-NEXT: add a0, a0, t3
; RV32-NEXT: addi sp, s0, -128
; RV32-NEXT: lw ra, 124(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 120(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 128
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_16xi32_prefix14:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -128
; RV64-NEXT: .cfi_def_cfa_offset 128
; RV64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 128
; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: andi sp, sp, -64
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v12, v8, 1
; RV64-NEXT: vmv.x.s a1, v12
; RV64-NEXT: vslidedown.vi v12, v8, 2
; RV64-NEXT: vmv.x.s a2, v12
; RV64-NEXT: vslidedown.vi v12, v8, 3
; RV64-NEXT: vmv.x.s a3, v12
; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-NEXT: vslidedown.vi v12, v8, 4
; RV64-NEXT: vmv.x.s a4, v12
; RV64-NEXT: vslidedown.vi v12, v8, 5
; RV64-NEXT: vmv.x.s a5, v12
; RV64-NEXT: vslidedown.vi v12, v8, 6
; RV64-NEXT: vmv.x.s a6, v12
; RV64-NEXT: vslidedown.vi v12, v8, 7
; RV64-NEXT: vmv.x.s a7, v12
; RV64-NEXT: mv t0, sp
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vse32.v v8, (t0)
; RV64-NEXT: lw t0, 32(sp)
; RV64-NEXT: lw t1, 36(sp)
; RV64-NEXT: lw t2, 40(sp)
; RV64-NEXT: lw t3, 44(sp)
; RV64-NEXT: lw t4, 48(sp)
; RV64-NEXT: lw t5, 52(sp)
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a2, a2, a3
; RV64-NEXT: add a0, a0, a2
; RV64-NEXT: add a4, a4, a5
; RV64-NEXT: add a4, a4, a6
; RV64-NEXT: add a0, a0, a4
; RV64-NEXT: add a7, a7, t0
; RV64-NEXT: add a7, a7, t1
; RV64-NEXT: add a7, a7, t2
; RV64-NEXT: add a0, a0, a7
; RV64-NEXT: add t3, t3, t4
; RV64-NEXT: add t3, t3, t5
; RV64-NEXT: addw a0, a0, t3
; RV64-NEXT: addi sp, s0, -128
; RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 128
; RV64-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
%e0 = extractelement <16 x i32> %v, i32 0
%e1 = extractelement <16 x i32> %v, i32 1
%e2 = extractelement <16 x i32> %v, i32 2
%e3 = extractelement <16 x i32> %v, i32 3
%e4 = extractelement <16 x i32> %v, i32 4
%e5 = extractelement <16 x i32> %v, i32 5
%e6 = extractelement <16 x i32> %v, i32 6
%e7 = extractelement <16 x i32> %v, i32 7
%e8 = extractelement <16 x i32> %v, i32 8
%e9 = extractelement <16 x i32> %v, i32 9
%e10 = extractelement <16 x i32> %v, i32 10
%e11 = extractelement <16 x i32> %v, i32 11
%e12 = extractelement <16 x i32> %v, i32 12
%e13 = extractelement <16 x i32> %v, i32 13
%add0 = add i32 %e0, %e1
%add1 = add i32 %add0, %e2
%add2 = add i32 %add1, %e3
%add3 = add i32 %add2, %e4
%add4 = add i32 %add3, %e5
%add5 = add i32 %add4, %e6
%add6 = add i32 %add5, %e7
%add7 = add i32 %add6, %e8
%add8 = add i32 %add7, %e9
%add9 = add i32 %add8, %e10
%add10 = add i32 %add9, %e11
%add11 = add i32 %add10, %e12
%add12 = add i32 %add11, %e13
ret i32 %add12
}
define i32 @reduce_sum_16xi32_prefix15(ptr %p) {
; RV32-LABEL: reduce_sum_16xi32_prefix15:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -128
; RV32-NEXT: .cfi_def_cfa_offset 128
; RV32-NEXT: sw ra, 124(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 120(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 128
; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: andi sp, sp, -64
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 1
; RV32-NEXT: vmv.x.s a1, v12
; RV32-NEXT: vslidedown.vi v12, v8, 2
; RV32-NEXT: vmv.x.s a2, v12
; RV32-NEXT: vslidedown.vi v12, v8, 3
; RV32-NEXT: vmv.x.s a3, v12
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 4
; RV32-NEXT: vmv.x.s a4, v12
; RV32-NEXT: vslidedown.vi v12, v8, 5
; RV32-NEXT: vmv.x.s a5, v12
; RV32-NEXT: vslidedown.vi v12, v8, 6
; RV32-NEXT: vmv.x.s a6, v12
; RV32-NEXT: vslidedown.vi v12, v8, 7
; RV32-NEXT: vmv.x.s a7, v12
; RV32-NEXT: mv t0, sp
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vse32.v v8, (t0)
; RV32-NEXT: lw t0, 32(sp)
; RV32-NEXT: lw t1, 36(sp)
; RV32-NEXT: lw t2, 40(sp)
; RV32-NEXT: lw t3, 44(sp)
; RV32-NEXT: lw t4, 48(sp)
; RV32-NEXT: lw t5, 52(sp)
; RV32-NEXT: lw t6, 56(sp)
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a2, a2, a3
; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: add a4, a4, a5
; RV32-NEXT: add a4, a4, a6
; RV32-NEXT: add a0, a0, a4
; RV32-NEXT: add a7, a7, t0
; RV32-NEXT: add a7, a7, t1
; RV32-NEXT: add a7, a7, t2
; RV32-NEXT: add a0, a0, a7
; RV32-NEXT: add t3, t3, t4
; RV32-NEXT: add t3, t3, t5
; RV32-NEXT: add t3, t3, t6
; RV32-NEXT: add a0, a0, t3
; RV32-NEXT: addi sp, s0, -128
; RV32-NEXT: lw ra, 124(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 120(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 128
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_16xi32_prefix15:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -128
; RV64-NEXT: .cfi_def_cfa_offset 128
; RV64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 128
; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: andi sp, sp, -64
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v12, v8, 1
; RV64-NEXT: vmv.x.s a1, v12
; RV64-NEXT: vslidedown.vi v12, v8, 2
; RV64-NEXT: vmv.x.s a2, v12
; RV64-NEXT: vslidedown.vi v12, v8, 3
; RV64-NEXT: vmv.x.s a3, v12
; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-NEXT: vslidedown.vi v12, v8, 4
; RV64-NEXT: vmv.x.s a4, v12
; RV64-NEXT: vslidedown.vi v12, v8, 5
; RV64-NEXT: vmv.x.s a5, v12
; RV64-NEXT: vslidedown.vi v12, v8, 6
; RV64-NEXT: vmv.x.s a6, v12
; RV64-NEXT: vslidedown.vi v12, v8, 7
; RV64-NEXT: vmv.x.s a7, v12
; RV64-NEXT: mv t0, sp
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vse32.v v8, (t0)
; RV64-NEXT: lw t0, 32(sp)
; RV64-NEXT: lw t1, 36(sp)
; RV64-NEXT: lw t2, 40(sp)
; RV64-NEXT: lw t3, 44(sp)
; RV64-NEXT: lw t4, 48(sp)
; RV64-NEXT: lw t5, 52(sp)
; RV64-NEXT: lw t6, 56(sp)
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a2, a2, a3
; RV64-NEXT: add a0, a0, a2
; RV64-NEXT: add a4, a4, a5
; RV64-NEXT: add a4, a4, a6
; RV64-NEXT: add a0, a0, a4
; RV64-NEXT: add a7, a7, t0
; RV64-NEXT: add a7, a7, t1
; RV64-NEXT: add a7, a7, t2
; RV64-NEXT: add a0, a0, a7
; RV64-NEXT: add t3, t3, t4
; RV64-NEXT: add t3, t3, t5
; RV64-NEXT: add t3, t3, t6
; RV64-NEXT: addw a0, a0, t3
; RV64-NEXT: addi sp, s0, -128
; RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 128
; RV64-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
%e0 = extractelement <16 x i32> %v, i32 0
%e1 = extractelement <16 x i32> %v, i32 1
%e2 = extractelement <16 x i32> %v, i32 2
%e3 = extractelement <16 x i32> %v, i32 3
%e4 = extractelement <16 x i32> %v, i32 4
%e5 = extractelement <16 x i32> %v, i32 5
%e6 = extractelement <16 x i32> %v, i32 6
%e7 = extractelement <16 x i32> %v, i32 7
%e8 = extractelement <16 x i32> %v, i32 8
%e9 = extractelement <16 x i32> %v, i32 9
%e10 = extractelement <16 x i32> %v, i32 10
%e11 = extractelement <16 x i32> %v, i32 11
%e12 = extractelement <16 x i32> %v, i32 12
%e13 = extractelement <16 x i32> %v, i32 13
%e14 = extractelement <16 x i32> %v, i32 14
%add0 = add i32 %e0, %e1
%add1 = add i32 %add0, %e2
%add2 = add i32 %add1, %e3
%add3 = add i32 %add2, %e4
%add4 = add i32 %add3, %e5
%add5 = add i32 %add4, %e6
%add6 = add i32 %add5, %e7
%add7 = add i32 %add6, %e8
%add8 = add i32 %add7, %e9
%add9 = add i32 %add8, %e10
%add10 = add i32 %add9, %e11
%add11 = add i32 %add10, %e12
%add12 = add i32 %add11, %e13
%add13 = add i32 %add12, %e14
ret i32 %add13
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; CHECK: {{.*}}