blob: c24fbcdb9e8417ea09601b04841318e4aae21d93 [file] [log] [blame] [edit]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -o - %s | FileCheck %s
; Test for DS prefetch with flush points: preheader has single ds_load_b64 (2xf32).
; Loop has DS loads where some are used in same iteration, others are prefetches.
; Expected: s_wait_dscnt 0 in preheader (preheader flush optimization)
define amdgpu_kernel void @ds_prefetch_flushed(ptr addrspace(3) %lds, ptr addrspace(1) %out, i32 %n) {
; CHECK-LABEL: ds_prefetch_flushed:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
; CHECK-NEXT: s_clause 0x1
; CHECK-NEXT: s_load_b32 s1, s[4:5], 0x0 nv
; CHECK-NEXT: s_load_b32 s0, s[4:5], 0x10 nv
; CHECK-NEXT: v_and_b32_e32 v10, 0x3ff, v0
; CHECK-NEXT: v_mov_b32_e32 v4, 0
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
; CHECK-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v6, v4
; CHECK-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_mov_b32 v8, v4
; CHECK-NEXT: v_mov_b32_e32 v9, v4
; CHECK-NEXT: s_wait_kmcnt 0x0
; CHECK-NEXT: v_lshl_add_u32 v11, v10, 6, s1
; CHECK-NEXT: v_lshl_add_u32 v12, v10, 5, s1
; CHECK-NEXT: v_lshl_add_u32 v13, v10, 8, s1
; CHECK-NEXT: v_lshl_add_u32 v14, v10, 7, s1
; CHECK-NEXT: s_mov_b32 s1, 0
; CHECK-NEXT: ds_load_b64 v[0:1], v11 offset:4
; CHECK-NEXT: ds_load_b64 v[2:3], v12
; CHECK-NEXT: s_wait_dscnt 0x0
; CHECK-NEXT: .LBB0_1: ; %loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: s_barrier_signal -1
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; CHECK-NEXT: v_pk_add_f32 v[8:9], v[8:9], v[2:3]
; CHECK-NEXT: s_add_co_i32 s1, s1, 1
; CHECK-NEXT: s_cmp_lt_i32 s1, s0
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; CHECK-NEXT: v_pk_add_f32 v[8:9], v[8:9], v[0:1]
; CHECK-NEXT: s_wait_dscnt 0x1
; CHECK-NEXT: v_pk_add_f32 v[6:7], v[8:9], v[6:7]
; CHECK-NEXT: s_wait_dscnt 0x0
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
; CHECK-NEXT: v_pk_add_f32 v[8:9], v[6:7], v[4:5]
; CHECK-NEXT: s_barrier_wait -1
; CHECK-NEXT: ds_load_b64 v[16:17], v12
; CHECK-NEXT: ds_load_b64 v[18:19], v11
; CHECK-NEXT: ds_load_b64 v[6:7], v14
; CHECK-NEXT: ds_load_b64 v[4:5], v13
; CHECK-NEXT: v_dual_add_nc_u32 v12, 8, v12 :: v_dual_add_nc_u32 v11, 8, v11
; CHECK-NEXT: v_dual_add_nc_u32 v13, 8, v13 :: v_dual_add_nc_u32 v14, 8, v14
; CHECK-NEXT: s_wait_dscnt 0x2
; CHECK-NEXT: v_pk_add_f32 v[16:17], v[16:17], v[18:19]
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
; CHECK-NEXT: v_pk_add_f32 v[8:9], v[8:9], v[16:17]
; CHECK-NEXT: s_cbranch_scc1 .LBB0_1
; CHECK-NEXT: ; %bb.2: ; %exit
; CHECK-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 nv
; CHECK-NEXT: s_wait_kmcnt 0x0
; CHECK-NEXT: global_store_b64 v10, v[8:9], s[0:1] scale_offset
; CHECK-NEXT: s_endpgm
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%base1 = shl i32 %tid, 2
%base2 = shl i32 %tid, 3
%base3 = shl i32 %tid, 4
%base4 = shl i32 %tid, 5
; Preheader: single 64-bit DS load each (ds_load_b64 / 2 x float)
%ptr.pre2 = getelementptr <2 x float>, ptr addrspace(3) %lds, i32 %base2, i32 1
%init.v2 = load <2 x float>, ptr addrspace(3) %ptr.pre2, align 8
%ptr.pre1 = getelementptr <2 x float>, ptr addrspace(3) %lds, i32 %base1
%init.v1 = load <2 x float>, ptr addrspace(3) %ptr.pre1, align 8
br label %loop
loop:
%i = phi i32 [ 0, %entry ], [ %i.next, %loop ]
%acc = phi <2 x float> [ zeroinitializer, %entry ], [ %acc.next, %loop ]
%prefetch1 = phi <2 x float> [ zeroinitializer, %entry ], [ %load3, %loop ]
%prefetch2 = phi <2 x float> [ zeroinitializer, %entry ], [ %load4, %loop ]
%use.pre1 = fadd <2 x float> %acc, %init.v1
%use.pre2 = fadd <2 x float> %use.pre1, %init.v2
%use.pf1 = fadd <2 x float> %use.pre2, %prefetch1
%use.pf2 = fadd <2 x float> %use.pf1, %prefetch2
call void @llvm.amdgcn.s.barrier()
%off1 = add i32 %base1, %i
%ptr1 = getelementptr <2 x float>, ptr addrspace(3) %lds, i32 %off1
%load1 = load <2 x float>, ptr addrspace(3) %ptr1, align 8
%off2 = add i32 %base2, %i
%ptr2 = getelementptr <2 x float>, ptr addrspace(3) %lds, i32 %off2
%load2 = load <2 x float>, ptr addrspace(3) %ptr2, align 8
%off3 = add i32 %base3, %i
%ptr3 = getelementptr <2 x float>, ptr addrspace(3) %lds, i32 %off3
%load3 = load <2 x float>, ptr addrspace(3) %ptr3, align 8
%off4 = add i32 %base4, %i
%ptr4 = getelementptr <2 x float>, ptr addrspace(3) %lds, i32 %off4
%load4 = load <2 x float>, ptr addrspace(3) %ptr4, align 8
%sum = fadd <2 x float> %load1, %load2
%acc.next = fadd <2 x float> %use.pf2, %sum
%i.next = add i32 %i, 1
%cond = icmp slt i32 %i.next, %n
br i1 %cond, label %loop, label %exit, !llvm.loop !0
exit:
%out.ptr = getelementptr <2 x float>, ptr addrspace(1) %out, i32 %tid
store <2 x float> %acc.next, ptr addrspace(1) %out.ptr, align 8
ret void
}
!0 = !{!1}
!1 = !{!"llvm.loop.unroll.disable"}
declare i32 @llvm.amdgcn.workitem.id.x()
declare void @llvm.amdgcn.s.barrier()