blob: 0b512b00c3fbca6a72afb3825d02a4f9efe178cc [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck --check-prefix=OPT %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -amdgpu-barrier-signal-wait-latency=0 < %s | FileCheck --check-prefix=NOOPT %s
; Tests for scheduling independent work between s_barrier_signal and s_barrier_wait
; for latency hiding.
; Independent work should be scheduled between signal/wait
define amdgpu_kernel void @test_barrier_independent_valu(ptr addrspace(1) %out, i32 %size) #0 {
; OPT-LABEL: test_barrier_independent_valu:
; OPT: ; %bb.0: ; %entry
; OPT-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
; OPT-NEXT: v_and_b32_e32 v1, 0x3ff, v0
; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
; OPT-NEXT: v_lshlrev_b32_e32 v2, 2, v1
; OPT-NEXT: s_wait_kmcnt 0x0
; OPT-NEXT: v_xad_u32 v0, v1, -1, s2
; OPT-NEXT: global_store_b32 v2, v1, s[0:1]
; OPT-NEXT: s_barrier_signal -1
; OPT-NEXT: v_ashrrev_i32_e32 v1, 31, v0
; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; OPT-NEXT: v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
; OPT-NEXT: v_add_co_u32 v0, vcc_lo, s0, v0
; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
; OPT-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo
; OPT-NEXT: s_barrier_wait -1
; OPT-NEXT: global_load_b32 v0, v[0:1], off
; OPT-NEXT: s_wait_loadcnt 0x0
; OPT-NEXT: global_store_b32 v2, v0, s[0:1]
; OPT-NEXT: s_endpgm
;
; NOOPT-LABEL: test_barrier_independent_valu:
; NOOPT: ; %bb.0: ; %entry
; NOOPT-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
; NOOPT-NEXT: v_and_b32_e32 v2, 0x3ff, v0
; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
; NOOPT-NEXT: v_lshlrev_b32_e32 v3, 2, v2
; NOOPT-NEXT: s_wait_kmcnt 0x0
; NOOPT-NEXT: v_xad_u32 v0, v2, -1, s2
; NOOPT-NEXT: global_store_b32 v3, v2, s[0:1]
; NOOPT-NEXT: s_barrier_signal -1
; NOOPT-NEXT: s_barrier_wait -1
; NOOPT-NEXT: v_ashrrev_i32_e32 v1, 31, v0
; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; NOOPT-NEXT: v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
; NOOPT-NEXT: v_add_co_u32 v0, vcc_lo, s0, v0
; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
; NOOPT-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo
; NOOPT-NEXT: global_load_b32 v0, v[0:1], off
; NOOPT-NEXT: s_wait_loadcnt 0x0
; NOOPT-NEXT: global_store_b32 v3, v0, s[0:1]
; NOOPT-NEXT: s_endpgm
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%addr = getelementptr i32, ptr addrspace(1) %out, i32 %tid
store i32 %tid, ptr addrspace(1) %addr
call void @llvm.amdgcn.s.barrier.signal(i32 -1)
call void @llvm.amdgcn.s.barrier.wait(i16 -1)
%idx_base = sub i32 %size, 1
%idx = sub i32 %idx_base, %tid
%read_addr = getelementptr i32, ptr addrspace(1) %out, i32 %idx
%val = load i32, ptr addrspace(1) %read_addr
store i32 %val, ptr addrspace(1) %addr
ret void
}
; No independent work - signal/wait should stay adjacent
define amdgpu_kernel void @test_barrier_no_independent_work(ptr addrspace(3) %lds) #0 {
; OPT-LABEL: test_barrier_no_independent_work:
; OPT: ; %bb.0: ; %entry
; OPT-NEXT: s_load_b32 s0, s[4:5], 0x24
; OPT-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; OPT-NEXT: s_wait_kmcnt 0x0
; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
; OPT-NEXT: v_lshl_add_u32 v1, v0, 2, s0
; OPT-NEXT: ds_store_b32 v1, v0
; OPT-NEXT: s_barrier_signal -1
; OPT-NEXT: s_barrier_wait -1
; OPT-NEXT: ds_load_b32 v0, v1
; OPT-NEXT: s_wait_dscnt 0x0
; OPT-NEXT: ds_store_b32 v1, v0 offset:4
; OPT-NEXT: s_endpgm
;
; NOOPT-LABEL: test_barrier_no_independent_work:
; NOOPT: ; %bb.0: ; %entry
; NOOPT-NEXT: s_load_b32 s0, s[4:5], 0x24
; NOOPT-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; NOOPT-NEXT: s_wait_kmcnt 0x0
; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
; NOOPT-NEXT: v_lshl_add_u32 v1, v0, 2, s0
; NOOPT-NEXT: ds_store_b32 v1, v0
; NOOPT-NEXT: s_barrier_signal -1
; NOOPT-NEXT: s_barrier_wait -1
; NOOPT-NEXT: ds_load_b32 v0, v1
; NOOPT-NEXT: s_wait_dscnt 0x0
; NOOPT-NEXT: ds_store_b32 v1, v0 offset:4
; NOOPT-NEXT: s_endpgm
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%addr = getelementptr i32, ptr addrspace(3) %lds, i32 %tid
store i32 %tid, ptr addrspace(3) %addr
call void @llvm.amdgcn.s.barrier.signal(i32 -1)
call void @llvm.amdgcn.s.barrier.wait(i16 -1)
%val = load i32, ptr addrspace(3) %addr
%next = add i32 %tid, 1
%next_addr = getelementptr i32, ptr addrspace(3) %lds, i32 %next
store i32 %val, ptr addrspace(3) %next_addr
ret void
}
; Multiple barriers
define amdgpu_kernel void @test_barrier_multiple(ptr addrspace(1) %out, i32 %size) #0 {
; OPT-LABEL: test_barrier_multiple:
; OPT: ; %bb.0: ; %entry
; OPT-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
; OPT-NEXT: v_and_b32_e32 v1, 0x3ff, v0
; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
; OPT-NEXT: v_lshlrev_b32_e32 v2, 2, v1
; OPT-NEXT: s_wait_kmcnt 0x0
; OPT-NEXT: v_xad_u32 v0, v1, -1, s2
; OPT-NEXT: global_store_b32 v2, v1, s[0:1]
; OPT-NEXT: s_barrier_signal -1
; OPT-NEXT: v_ashrrev_i32_e32 v1, 31, v0
; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; OPT-NEXT: v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
; OPT-NEXT: v_add_co_u32 v0, vcc_lo, s0, v0
; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
; OPT-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo
; OPT-NEXT: s_barrier_wait -1
; OPT-NEXT: global_load_b32 v3, v[0:1], off
; OPT-NEXT: s_wait_loadcnt 0x0
; OPT-NEXT: global_store_b32 v2, v3, s[0:1]
; OPT-NEXT: s_barrier_signal -1
; OPT-NEXT: s_barrier_wait -1
; OPT-NEXT: global_load_b32 v0, v[0:1], off offset:-4
; OPT-NEXT: s_wait_loadcnt 0x0
; OPT-NEXT: global_store_b32 v2, v0, s[0:1]
; OPT-NEXT: s_endpgm
;
; NOOPT-LABEL: test_barrier_multiple:
; NOOPT: ; %bb.0: ; %entry
; NOOPT-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
; NOOPT-NEXT: v_and_b32_e32 v2, 0x3ff, v0
; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
; NOOPT-NEXT: v_lshlrev_b32_e32 v3, 2, v2
; NOOPT-NEXT: s_wait_kmcnt 0x0
; NOOPT-NEXT: v_xad_u32 v0, v2, -1, s2
; NOOPT-NEXT: global_store_b32 v3, v2, s[0:1]
; NOOPT-NEXT: s_barrier_signal -1
; NOOPT-NEXT: s_barrier_wait -1
; NOOPT-NEXT: v_ashrrev_i32_e32 v1, 31, v0
; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; NOOPT-NEXT: v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
; NOOPT-NEXT: v_add_co_u32 v0, vcc_lo, s0, v0
; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
; NOOPT-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo
; NOOPT-NEXT: global_load_b32 v2, v[0:1], off
; NOOPT-NEXT: s_wait_loadcnt 0x0
; NOOPT-NEXT: global_store_b32 v3, v2, s[0:1]
; NOOPT-NEXT: s_barrier_signal -1
; NOOPT-NEXT: s_barrier_wait -1
; NOOPT-NEXT: global_load_b32 v0, v[0:1], off offset:-4
; NOOPT-NEXT: s_wait_loadcnt 0x0
; NOOPT-NEXT: global_store_b32 v3, v0, s[0:1]
; NOOPT-NEXT: s_endpgm
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%addr = getelementptr i32, ptr addrspace(1) %out, i32 %tid
store i32 %tid, ptr addrspace(1) %addr
call void @llvm.amdgcn.s.barrier.signal(i32 -1)
call void @llvm.amdgcn.s.barrier.wait(i16 -1)
%idx1_base = sub i32 %size, 1
%idx1 = sub i32 %idx1_base, %tid
%read_addr1 = getelementptr i32, ptr addrspace(1) %out, i32 %idx1
%val1 = load i32, ptr addrspace(1) %read_addr1
store i32 %val1, ptr addrspace(1) %addr
call void @llvm.amdgcn.s.barrier.signal(i32 -1)
call void @llvm.amdgcn.s.barrier.wait(i16 -1)
%idx2_base = sub i32 %size, 2
%idx2 = sub i32 %idx2_base, %tid
%read_addr2 = getelementptr i32, ptr addrspace(1) %out, i32 %idx2
%val2 = load i32, ptr addrspace(1) %read_addr2
store i32 %val2, ptr addrspace(1) %addr
ret void
}
declare void @llvm.amdgcn.s.barrier.signal(i32) #1
declare void @llvm.amdgcn.s.barrier.wait(i16) #1
declare i32 @llvm.amdgcn.workitem.id.x() #2
attributes #0 = { nounwind }
attributes #1 = { convergent nounwind }
attributes #2 = { nounwind readnone }