| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx950 < %s | FileCheck %s |
| |
| declare void @llvm.amdgcn.sched.barrier(i32 %mask) |
| declare void @llvm.amdgcn.load.to.lds(ptr %in, ptr addrspace(3) %lds_out, i32 %size, i32 %offset, i32 %aux) |
| |
| define amdgpu_kernel void @test_waitcnt(ptr addrspace(1) %global_buffer, ptr addrspace(3) %lds_buffer1, ptr addrspace(3) %lds_buffer2) #0 { |
| ; This test checks if SIInsertWaitcnts pass inserts S_WAITCNT VMCNT(0) before DS_READ |
| ; CHECK-LABEL: test_waitcnt: |
| ; CHECK: ; %bb.0: ; %entry |
| ; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 |
| ; CHECK-NEXT: v_mov_b32_e32 v0, 0 |
| ; CHECK-NEXT: s_waitcnt lgkmcnt(0) |
| ; CHECK-NEXT: s_load_dword s6, s[0:1], 0x0 |
| ; CHECK-NEXT: s_add_u32 s4, s0, 64 |
| ; CHECK-NEXT: s_addc_u32 s5, s1, 0 |
| ; CHECK-NEXT: s_mov_b32 m0, s2 |
| ; CHECK-NEXT: s_waitcnt lgkmcnt(0) |
| ; CHECK-NEXT: v_mov_b32_e32 v3, s6 |
| ; CHECK-NEXT: global_store_dword v0, v3, s[0:1] offset:64 |
| ; CHECK-NEXT: global_load_lds_dword v0, s[4:5] offset:4 |
| ; CHECK-NEXT: ; sched_barrier mask(0x00000000) |
| ; CHECK-NEXT: v_mov_b32_e32 v1, s2 |
| ; CHECK-NEXT: v_mov_b32_e32 v2, s3 |
| ; CHECK-NEXT: ds_write_b32 v1, v3 |
| ; CHECK-NEXT: ds_write_b32 v2, v3 |
| ; CHECK-NEXT: ; sched_barrier mask(0x00000000) |
| ; CHECK-NEXT: ds_read_b32 v1, v1 |
| ; CHECK-NEXT: s_waitcnt lgkmcnt(0) |
| ; CHECK-NEXT: global_store_dword v0, v1, s[0:1] offset:16 |
| ; CHECK-NEXT: global_store_dword v0, v3, s[0:1] offset:32 |
| ; CHECK-NEXT: s_endpgm |
| entry: |
| ; VMEM accesses with alias.scope |
| %vmem_load = load i32, ptr addrspace(1) %global_buffer |
| %gepvmem = getelementptr i32, ptr addrspace(1) %global_buffer, i32 16 |
| store i32 %vmem_load, ptr addrspace(1) %gepvmem, align 4, !alias.scope !0 |
| |
| ; Global to LDS load |
| %gepvmem.ascast = addrspacecast ptr addrspace(1) %gepvmem to ptr |
| call void @llvm.amdgcn.load.to.lds(ptr %gepvmem.ascast, ptr addrspace(3) %lds_buffer1, i32 4, i32 4, i32 0), !alias.scope !9, !noalias !14 |
| |
| ; Insert scheduling barrier |
| call void @llvm.amdgcn.sched.barrier(i32 0) |
| |
| ; DS_WRITEs with alias.scope and noalias |
| store i32 %vmem_load, ptr addrspace(3) %lds_buffer1, align 4, !alias.scope !1, !noalias !12 |
| store i32 %vmem_load, ptr addrspace(3) %lds_buffer2, align 4, !alias.scope !6, !noalias !13 |
| |
| ; Insert scheduling barrier |
| call void @llvm.amdgcn.sched.barrier(i32 0) |
| |
| ; DS_READ with alias.scope missing |
| %lds_load = load i32, ptr addrspace(3) %lds_buffer1, align 4, !noalias !12 |
| |
| ; VMEM write |
| %gep = getelementptr i32, ptr addrspace(1) %global_buffer, i32 4 |
| %gep2 = getelementptr i32, ptr addrspace(1) %global_buffer, i32 8 |
| store i32 %lds_load, ptr addrspace(1) %gep, align 4, !alias.scope !0 |
| store i32 %vmem_load, ptr addrspace(1) %gep2, align 4, !alias.scope !0 |
| |
| ret void |
| } |
| |
| ; VMEM alias domain and scope |
| !5 = !{!"vmem.domain"} |
| !4 = !{!"vmem.scope", !5} |
| !0 = !{!4} |
| |
| ; LDS alias domains and scopes |
| !3 = !{!"lds1.domain"} |
| !2 = !{!"lds1.scope", !3} |
| !1 = !{!2} |
| |
| !8 = !{!"lds2.domain"} |
| !7 = !{!"lds2.scope", !8} |
| !6 = !{!7} |
| |
| !11 = !{!"lds1_off4.domain"} |
| !10 = !{!"lds1_off4.scope", !11} |
| !9 = !{!10} |
| |
| ; Noalias lists |
| !12 = !{!7, !10} |
| !13 = !{!2, !10} |
| !14 = !{!2, !7} |
| |
| attributes #0 = { nounwind } |