blob: ed434deea1a8375d5ea063c50f8986508a7c3c30 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=riscv32 -mattr=+v,+xtheadmemidx,+xtheadmempair -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix RV32
; RUN: llc -mtriple=riscv64 -mattr=+v,+xtheadmemidx,+xtheadmempair -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix RV64
define void @test(ptr %ref_array, ptr %sad_array) {
; RV32-LABEL: test:
; RV32: # %bb.0: # %entry
; RV32-NEXT: th.lwd a2, a3, (a0), 0, 3
; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV32-NEXT: vle8.v v8, (a2)
; RV32-NEXT: vmv.v.i v9, 0
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vzext.vf4 v12, v8
; RV32-NEXT: vmv.s.x v8, zero
; RV32-NEXT: vredsum.vs v10, v12, v8
; RV32-NEXT: vmv.x.s a0, v10
; RV32-NEXT: th.swia a0, (a1), 4, 0
; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV32-NEXT: vle8.v v10, (a3)
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV32-NEXT: vslideup.vi v10, v9, 4
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vzext.vf4 v12, v10
; RV32-NEXT: vredsum.vs v8, v12, v8
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vse32.v v8, (a1)
; RV32-NEXT: ret
;
; RV64-LABEL: test:
; RV64: # %bb.0: # %entry
; RV64-NEXT: th.ldd a2, a3, (a0), 0, 4
; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64-NEXT: vle8.v v8, (a2)
; RV64-NEXT: vmv.v.i v9, 0
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vzext.vf4 v12, v8
; RV64-NEXT: vmv.s.x v8, zero
; RV64-NEXT: vredsum.vs v10, v12, v8
; RV64-NEXT: vmv.x.s a0, v10
; RV64-NEXT: th.swia a0, (a1), 4, 0
; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64-NEXT: vle8.v v10, (a3)
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64-NEXT: vslideup.vi v10, v9, 4
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vzext.vf4 v12, v10
; RV64-NEXT: vredsum.vs v8, v12, v8
; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vse32.v v8, (a1)
; RV64-NEXT: ret
entry:
%0 = load ptr, ptr %ref_array, align 8
%1 = load <4 x i8>, ptr %0, align 1
%2 = shufflevector <4 x i8> %1, <4 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%3 = zext <16 x i8> %2 to <16 x i32>
%4 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %3)
store i32 %4, ptr %sad_array, align 4, !tbaa !0
%arrayidx.1 = getelementptr ptr, ptr %ref_array, i64 1
%5 = load ptr, ptr %arrayidx.1, align 8, !tbaa !4
%6 = load <4 x i8>, ptr %5, align 1
%7 = shufflevector <4 x i8> %6, <4 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%8 = zext <16 x i8> %7 to <16 x i32>
%9 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %8)
%arrayidx2.1 = getelementptr i32, ptr %sad_array, i64 1
store i32 %9, ptr %arrayidx2.1, align 4
ret void
}
declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
!0 = !{!1, !1, i64 0}
!1 = !{!"int", !2, i64 0}
!2 = !{!"omnipotent char", !3, i64 0}
!3 = !{!"Simple C/C++ TBAA"}
!4 = !{!5, !5, i64 0}
!5 = !{!"any pointer", !2, i64 0}