| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1200 < %s | FileCheck --check-prefix=GFX12 %s |
| ; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1200 -mattr=+cumode < %s | FileCheck --check-prefix=GFX12 %s |
| |
| define amdgpu_kernel void @global_last_use_load_0(ptr addrspace(1) %in, ptr addrspace(1) %out) { |
| ; GFX12-LABEL: global_last_use_load_0: |
| ; GFX12: ; %bb.0: ; %entry |
| ; GFX12-NEXT: s_load_b64 s[2:3], s[4:5], 0x0 |
| ; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 |
| ; GFX12-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX12-NEXT: s_wait_kmcnt 0x0 |
| ; GFX12-NEXT: s_load_b32 s2, s[2:3], 0x0 |
| ; GFX12-NEXT: s_wait_kmcnt 0x0 |
| ; GFX12-NEXT: v_mov_b32_e32 v1, s2 |
| ; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] |
| ; GFX12-NEXT: s_endpgm |
| entry: |
| %val = load i32, ptr addrspace(1) %in, align 4, !amdgpu.last.use !{} |
| store i32 %val, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @global_last_use_load_1(ptr addrspace(1) %in, ptr addrspace(1) %out) { |
| ; GFX12-LABEL: global_last_use_load_1: |
| ; GFX12: ; %bb.0: ; %entry |
| ; GFX12-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX12-NEXT: s_load_b64 s[2:3], s[4:5], 0x0 |
| ; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 |
| ; GFX12-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX12-NEXT: s_mov_b32 s4, 0x3ff |
| ; GFX12-NEXT: v_and_b32_e64 v1, v1, s4 |
| ; GFX12-NEXT: s_mov_b32 s4, 2 |
| ; GFX12-NEXT: s_wait_alu 0xfffe |
| ; GFX12-NEXT: v_lshlrev_b32_e64 v1, s4, v1 |
| ; GFX12-NEXT: s_wait_kmcnt 0x0 |
| ; GFX12-NEXT: global_load_b32 v1, v1, s[2:3] th:TH_LOAD_LU |
| ; GFX12-NEXT: s_wait_loadcnt 0x0 |
| ; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] |
| ; GFX12-NEXT: s_endpgm |
| entry: |
| %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| %val.gep = getelementptr inbounds i32, ptr addrspace(1) %in, i32 %tid |
| %val = load i32, ptr addrspace(1) %val.gep, align 4, !amdgpu.last.use !{} |
| store i32 %val, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @global_last_use_and_volatile_load(ptr addrspace(1) %in, ptr addrspace(1) %out) { |
| ; GFX12-LABEL: global_last_use_and_volatile_load: |
| ; GFX12: ; %bb.0: ; %entry |
| ; GFX12-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX12-NEXT: s_load_b64 s[2:3], s[4:5], 0x0 |
| ; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 |
| ; GFX12-NEXT: s_wait_kmcnt 0x0 |
| ; GFX12-NEXT: global_load_b32 v1, v0, s[2:3] th:TH_LOAD_BYPASS scope:SCOPE_SYS |
| ; GFX12-NEXT: s_wait_bvhcnt 0x0 |
| ; GFX12-NEXT: s_wait_samplecnt 0x0 |
| ; GFX12-NEXT: s_wait_loadcnt 0x0 |
| ; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] |
| ; GFX12-NEXT: s_endpgm |
| entry: |
| %val = load volatile i32, ptr addrspace(1) %in, align 4, !amdgpu.last.use !{} |
| store i32 %val, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @global_last_use_and_nontemporal_load(ptr addrspace(1) %in, ptr addrspace(1) %out) { |
| ; GFX12-LABEL: global_last_use_and_nontemporal_load: |
| ; GFX12: ; %bb.0: ; %entry |
| ; GFX12-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX12-NEXT: s_load_b64 s[2:3], s[4:5], 0x0 |
| ; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 |
| ; GFX12-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX12-NEXT: s_mov_b32 s4, 0x3ff |
| ; GFX12-NEXT: v_and_b32_e64 v1, v1, s4 |
| ; GFX12-NEXT: s_mov_b32 s4, 2 |
| ; GFX12-NEXT: s_wait_alu 0xfffe |
| ; GFX12-NEXT: v_lshlrev_b32_e64 v1, s4, v1 |
| ; GFX12-NEXT: s_wait_kmcnt 0x0 |
| ; GFX12-NEXT: global_load_b32 v1, v1, s[2:3] th:TH_LOAD_LU |
| ; GFX12-NEXT: s_wait_loadcnt 0x0 |
| ; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] |
| ; GFX12-NEXT: s_endpgm |
| entry: |
| %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| %val.gep = getelementptr inbounds i32, ptr addrspace(1) %in, i32 %tid |
| %val = load i32, ptr addrspace(1) %val.gep, align 4, !amdgpu.last.use !{}, !nontemporal !0 |
| store i32 %val, ptr addrspace(1) %out |
| ret void |
| } |
| !0 = !{i32 1} |
| declare i32 @llvm.amdgcn.workitem.id.x() |