blob: 3ebfdcff4309fa9b9af863df0e0e12b40ef2590b [file] [log] [blame] [edit]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck %s --check-prefixes=CHECK,DAGISEL
; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck %s --check-prefixes=CHECK,DAGISEL
; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck %s --check-prefixes=CHECK,GISEL
; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck %s --check-prefixes=CHECK,GISEL
; Test that in dynamic VGPR mode, the return of sponentry points after the area reserved for CWSR.
define amdgpu_cs ptr addrspace(5) @sponentry_cs_dvgpr_16(i32 %val) #0 {
; CHECK-LABEL: sponentry_cs_dvgpr_16:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; CHECK-NEXT: s_getreg_b32 s0, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; CHECK-NEXT: s_cmp_lg_u32 0, s33
; CHECK-NEXT: s_cmovk_i32 s33, 0x1c0
; CHECK-NEXT: s_cmp_lg_u32 0, s0
; CHECK-NEXT: scratch_store_b32 off, v0, s33 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: s_cmovk_i32 s0, 0x1c0
; CHECK-NEXT: ; return to shader part epilog
%local = alloca i32, addrspace(5)
store volatile i32 %val, ptr addrspace(5) %local
%stack.base = call ptr addrspace(5) @llvm.sponentry()
ret ptr addrspace(5) %stack.base
}
; CHECK: ScratchSize: 8
define amdgpu_cs ptr addrspace(5) @sponentry_cs_dvgpr_32(i32 %val) #1 {
; CHECK-LABEL: sponentry_cs_dvgpr_32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; CHECK-NEXT: s_getreg_b32 s0, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; CHECK-NEXT: s_cmp_lg_u32 0, s33
; CHECK-NEXT: s_cmovk_i32 s33, 0x380
; CHECK-NEXT: s_cmp_lg_u32 0, s0
; CHECK-NEXT: scratch_store_b32 off, v0, s33 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: s_cmovk_i32 s0, 0x380
; CHECK-NEXT: ; return to shader part epilog
%local = alloca i32, addrspace(5)
store volatile i32 %val, ptr addrspace(5) %local
%stack.base = call ptr addrspace(5) @llvm.sponentry()
ret ptr addrspace(5) %stack.base
}
; CHECK: ScratchSize: 8
; If we're not in dynamic VGPR mode, then sponentry can just return 0.
define amdgpu_cs ptr addrspace(5) @sponentry_cs_no_dvgpr(i32 %val) #2 {
; CHECK-LABEL: sponentry_cs_no_dvgpr:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_mov_b32 s0, 0
; CHECK-NEXT: scratch_store_b32 off, v0, off scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: ; return to shader part epilog
%local = alloca i32, addrspace(5)
store volatile i32 %val, ptr addrspace(5) %local
%stack.base = call ptr addrspace(5) @llvm.sponentry()
ret ptr addrspace(5) %stack.base
}
; CHECK: ScratchSize: 8
define amdgpu_cs ptr addrspace(5) @sponentry_cs_dvgpr_control_flow(i32 %val, ptr addrspace(5) %ptr) #0 {
; CHECK-LABEL: sponentry_cs_dvgpr_control_flow:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; CHECK-NEXT: s_mov_b32 s0, exec_lo
; CHECK-NEXT: s_cmp_lg_u32 0, s33
; CHECK-NEXT: s_cmovk_i32 s33, 0x1c0
; CHECK-NEXT: scratch_store_b32 off, v0, s33 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: v_cmpx_gt_i32_e32 0x43, v0
; CHECK-NEXT: ; %bb.1: ; %if.then
; CHECK-NEXT: s_getreg_b32 s1, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; CHECK-NEXT: s_cmp_lg_u32 0, s1
; CHECK-NEXT: s_cmovk_i32 s1, 0x1c0
; CHECK-NEXT: v_mov_b32_e32 v1, s1
; CHECK-NEXT: ; %bb.2: ; %if.end
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s0
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
; CHECK-NEXT: v_readfirstlane_b32 s0, v1
; CHECK-NEXT: s_wait_alu depctr_va_sdst(0)
; CHECK-NEXT: ; return to shader part epilog
entry:
%local = alloca i32, addrspace(5)
store volatile i32 %val, ptr addrspace(5) %local
%which = icmp slt i32 %val, 67
br i1 %which, label %if.then, label %if.end
if.then:
%stack.base = call ptr addrspace(5) @llvm.sponentry()
br label %if.end
if.end:
%ret = phi ptr addrspace(5) [ %stack.base, %if.then ], [ %ptr, %entry ]
ret ptr addrspace(5) %ret
}
; CHECK: ScratchSize: 8
declare amdgpu_gfx i32 @callee()
define amdgpu_cs ptr addrspace(5) @sponentry_cs_dvgpr_calls(i32 %val) #0 {
; DAGISEL-LABEL: sponentry_cs_dvgpr_calls:
; DAGISEL: ; %bb.0:
; DAGISEL-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; DAGISEL-NEXT: s_mov_b32 s1, callee@abs32@hi
; DAGISEL-NEXT: s_cmp_lg_u32 0, s33
; DAGISEL-NEXT: s_mov_b32 s0, callee@abs32@lo
; DAGISEL-NEXT: s_cmovk_i32 s33, 0x1c0
; DAGISEL-NEXT: s_cselect_b32 s32, 0x1d0, 16
; DAGISEL-NEXT: s_swappc_b64 s[30:31], s[0:1]
; DAGISEL-NEXT: s_getreg_b32 s0, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; DAGISEL-NEXT: s_wait_storecnt 0x0
; DAGISEL-NEXT: scratch_store_b32 off, v0, s33 scope:SCOPE_SYS
; DAGISEL-NEXT: s_wait_storecnt 0x0
; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; DAGISEL-NEXT: s_cmp_lg_u32 0, s0
; DAGISEL-NEXT: s_cmovk_i32 s0, 0x1c0
; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; DAGISEL-NEXT: ; return to shader part epilog
;
; GISEL-LABEL: sponentry_cs_dvgpr_calls:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; GISEL-NEXT: s_mov_b32 s0, callee@abs32@lo
; GISEL-NEXT: s_cmp_lg_u32 0, s33
; GISEL-NEXT: s_mov_b32 s1, callee@abs32@hi
; GISEL-NEXT: s_cmovk_i32 s33, 0x1c0
; GISEL-NEXT: s_cselect_b32 s32, 0x1d0, 16
; GISEL-NEXT: s_swappc_b64 s[30:31], s[0:1]
; GISEL-NEXT: s_getreg_b32 s0, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; GISEL-NEXT: s_wait_storecnt 0x0
; GISEL-NEXT: scratch_store_b32 off, v0, s33 scope:SCOPE_SYS
; GISEL-NEXT: s_wait_storecnt 0x0
; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; GISEL-NEXT: s_cmp_lg_u32 0, s0
; GISEL-NEXT: s_cmovk_i32 s0, 0x1c0
; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; GISEL-NEXT: ; return to shader part epilog
%local = alloca i32, addrspace(5)
%res = call amdgpu_gfx i32 @callee()
store volatile i32 %res, ptr addrspace(5) %local
%stack.base = call ptr addrspace(5) @llvm.sponentry()
ret ptr addrspace(5) %stack.base
}
; CHECK: ScratchSize: 16
define amdgpu_cs ptr addrspace(5) @sponentry_cs_dvgpr_realign(i32 %val) #0 {
; CHECK-LABEL: sponentry_cs_dvgpr_realign:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; CHECK-NEXT: s_getreg_b32 s0, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; CHECK-NEXT: s_cmp_lg_u32 0, s33
; CHECK-NEXT: s_cmovk_i32 s33, 0x200
; CHECK-NEXT: s_cmp_lg_u32 0, s0
; CHECK-NEXT: scratch_store_b32 off, v0, s33 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: s_cmovk_i32 s0, 0x200
; CHECK-NEXT: ; return to shader part epilog
%boop = alloca i32, addrspace(5)
%local = alloca i32, align 128, addrspace(5)
store volatile i32 %val, ptr addrspace(5) %local
%stack.base = call ptr addrspace(5) @llvm.sponentry()
ret ptr addrspace(5) %stack.base
}
; CHECK: ScratchSize: 128
define amdgpu_gfx ptr addrspace(5) @sponentry_gfx(i32 %val, ptr addrspace(5) %ptr) #0 {
; DAGISEL-LABEL: sponentry_gfx:
; DAGISEL: ; %bb.0: ; %entry
; DAGISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; DAGISEL-NEXT: s_wait_expcnt 0x0
; DAGISEL-NEXT: s_wait_samplecnt 0x0
; DAGISEL-NEXT: s_wait_bvhcnt 0x0
; DAGISEL-NEXT: s_wait_kmcnt 0x0
; DAGISEL-NEXT: s_mov_b32 s0, exec_lo
; DAGISEL-NEXT: s_wait_storecnt 0x0
; DAGISEL-NEXT: scratch_store_b32 off, v0, s32 offset:4 scope:SCOPE_SYS
; DAGISEL-NEXT: s_wait_storecnt 0x0
; DAGISEL-NEXT: v_cmpx_gt_i32_e32 0x43, v0
; DAGISEL-NEXT: ; %bb.1: ; %if.then
; DAGISEL-NEXT: v_mov_b32_e32 v1, s32
; DAGISEL-NEXT: ; %bb.2: ; %if.end
; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; DAGISEL-NEXT: v_mov_b32_e32 v0, v1
; DAGISEL-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: sponentry_gfx:
; GISEL: ; %bb.0: ; %entry
; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GISEL-NEXT: s_wait_expcnt 0x0
; GISEL-NEXT: s_wait_samplecnt 0x0
; GISEL-NEXT: s_wait_bvhcnt 0x0
; GISEL-NEXT: s_wait_kmcnt 0x0
; GISEL-NEXT: v_mov_b32_e32 v2, v0
; GISEL-NEXT: v_mov_b32_e32 v0, v1
; GISEL-NEXT: s_mov_b32 s0, exec_lo
; GISEL-NEXT: s_wait_storecnt 0x0
; GISEL-NEXT: scratch_store_b32 off, v2, s32 offset:4 scope:SCOPE_SYS
; GISEL-NEXT: s_wait_storecnt 0x0
; GISEL-NEXT: v_cmpx_gt_i32_e32 0x43, v2
; GISEL-NEXT: ; %bb.1: ; %if.then
; GISEL-NEXT: s_mov_b32 s1, s32
; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; GISEL-NEXT: v_mov_b32_e32 v0, s1
; GISEL-NEXT: ; %bb.2: ; %if.end
; GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GISEL-NEXT: s_setpc_b64 s[30:31]
entry:
%local = alloca i32, addrspace(5)
store volatile i32 %val, ptr addrspace(5) %local
%which = icmp slt i32 %val, 67
br i1 %which, label %if.then, label %if.end
if.then:
%stack.base = call ptr addrspace(5) @llvm.sponentry()
br label %if.end
if.end:
%ret = phi ptr addrspace(5) [ %stack.base, %if.then ], [ %ptr, %entry ]
ret ptr addrspace(5) %ret
}
; FIXME: Optimize away the 4 bytes for the sponentry frame index.
; CHECK: ScratchSize: 12
define amdgpu_gfx ptr addrspace(5) @sponentry_gfx_dvgpr_realign(i32 %val) #0 {
; CHECK-LABEL: sponentry_gfx_dvgpr_realign:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
; CHECK-NEXT: s_wait_expcnt 0x0
; CHECK-NEXT: s_wait_samplecnt 0x0
; CHECK-NEXT: s_wait_bvhcnt 0x0
; CHECK-NEXT: s_wait_kmcnt 0x0
; CHECK-NEXT: s_mov_b32 s0, s33
; CHECK-NEXT: s_add_co_i32 s33, s32, 0x7f
; CHECK-NEXT: s_mov_b32 s1, s34
; CHECK-NEXT: s_wait_alu depctr_sa_sdst(0)
; CHECK-NEXT: s_and_b32 s33, s33, 0xffffff80
; CHECK-NEXT: s_mov_b32 s34, s32
; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: scratch_store_b32 off, v0, s33 offset:128 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: s_wait_alu depctr_sa_sdst(0)
; CHECK-NEXT: v_mov_b32_e32 v0, s34
; CHECK-NEXT: s_addk_co_i32 s32, 0x180
; CHECK-NEXT: s_mov_b32 s32, s34
; CHECK-NEXT: s_mov_b32 s34, s1
; CHECK-NEXT: s_mov_b32 s33, s0
; CHECK-NEXT: s_wait_alu depctr_sa_sdst(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
%boop = alloca i32, addrspace(5)
%local = alloca i32, align 128, addrspace(5)
store volatile i32 %val, ptr addrspace(5) %local
%stack.base = call ptr addrspace(5) @llvm.sponentry()
ret ptr addrspace(5) %stack.base
}
; FIXME: Optimize away the sponentry fixed object.
; CHECK: ScratchSize: 384
define amdgpu_gfx ptr addrspace(5) @sponentry_gfx_stack_args(<32 x i32> %fill.sgprs, i32 %val, ptr addrspace(5) %ptr) #0 {
; DAGISEL-LABEL: sponentry_gfx_stack_args:
; DAGISEL: ; %bb.0:
; DAGISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; DAGISEL-NEXT: s_wait_expcnt 0x0
; DAGISEL-NEXT: s_wait_samplecnt 0x0
; DAGISEL-NEXT: s_wait_bvhcnt 0x0
; DAGISEL-NEXT: s_wait_kmcnt 0x0
; DAGISEL-NEXT: s_clause 0x1
; DAGISEL-NEXT: scratch_load_b32 v0, off, s32 offset:4
; DAGISEL-NEXT: scratch_load_b32 v1, off, s32
; DAGISEL-NEXT: s_wait_loadcnt 0x0
; DAGISEL-NEXT: s_wait_storecnt 0x0
; DAGISEL-NEXT: scratch_store_b32 v0, v1, off scope:SCOPE_SYS
; DAGISEL-NEXT: s_wait_storecnt 0x0
; DAGISEL-NEXT: v_mov_b32_e32 v0, s32
; DAGISEL-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: sponentry_gfx_stack_args:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GISEL-NEXT: s_wait_expcnt 0x0
; GISEL-NEXT: s_wait_samplecnt 0x0
; GISEL-NEXT: s_wait_bvhcnt 0x0
; GISEL-NEXT: s_wait_kmcnt 0x0
; GISEL-NEXT: s_clause 0x1
; GISEL-NEXT: scratch_load_b32 v0, off, s32
; GISEL-NEXT: scratch_load_b32 v1, off, s32 offset:4
; GISEL-NEXT: s_wait_loadcnt 0x0
; GISEL-NEXT: s_wait_storecnt 0x0
; GISEL-NEXT: scratch_store_b32 v1, v0, off scope:SCOPE_SYS
; GISEL-NEXT: s_wait_storecnt 0x0
; GISEL-NEXT: v_mov_b32_e32 v0, s32
; GISEL-NEXT: s_setpc_b64 s[30:31]
store volatile i32 %val, ptr addrspace(5) %ptr
%stack.base = call ptr addrspace(5) @llvm.sponentry()
ret ptr addrspace(5) %stack.base
}
; CHECK: ScratchSize: 12
define amdgpu_gfx ptr addrspace(5) @sponentry_gfx_dyn_alloc(i32 %val) #0 {
; DAGISEL-LABEL: sponentry_gfx_dyn_alloc:
; DAGISEL: ; %bb.0:
; DAGISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; DAGISEL-NEXT: s_wait_expcnt 0x0
; DAGISEL-NEXT: s_wait_samplecnt 0x0
; DAGISEL-NEXT: s_wait_bvhcnt 0x0
; DAGISEL-NEXT: s_wait_kmcnt 0x0
; DAGISEL-NEXT: v_lshl_add_u32 v1, v0, 2, 15
; DAGISEL-NEXT: s_mov_b32 s34, s33
; DAGISEL-NEXT: s_mov_b32 s1, exec_lo
; DAGISEL-NEXT: s_mov_b32 s0, 0
; DAGISEL-NEXT: s_mov_b32 s33, s32
; DAGISEL-NEXT: v_and_b32_e32 v1, -16, v1
; DAGISEL-NEXT: s_add_co_i32 s32, s32, 16
; DAGISEL-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1
; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; DAGISEL-NEXT: s_ctz_i32_b32 s2, s1
; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; DAGISEL-NEXT: v_readlane_b32 s3, v1, s2
; DAGISEL-NEXT: s_bitset0_b32 s1, s2
; DAGISEL-NEXT: s_max_u32 s0, s0, s3
; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; DAGISEL-NEXT: s_cmp_lg_u32 s1, 0
; DAGISEL-NEXT: s_cbranch_scc1 .LBB9_1
; DAGISEL-NEXT: ; %bb.2:
; DAGISEL-NEXT: s_mov_b32 s1, s32
; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; DAGISEL-NEXT: v_lshl_add_u32 v1, s0, 5, s1
; DAGISEL-NEXT: s_wait_storecnt 0x0
; DAGISEL-NEXT: scratch_store_b32 off, v0, s1 scope:SCOPE_SYS
; DAGISEL-NEXT: s_wait_storecnt 0x0
; DAGISEL-NEXT: v_mov_b32_e32 v0, s33
; DAGISEL-NEXT: v_readfirstlane_b32 s32, v1
; DAGISEL-NEXT: s_mov_b32 s32, s33
; DAGISEL-NEXT: s_mov_b32 s33, s34
; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; DAGISEL-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: sponentry_gfx_dyn_alloc:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GISEL-NEXT: s_wait_expcnt 0x0
; GISEL-NEXT: s_wait_samplecnt 0x0
; GISEL-NEXT: s_wait_bvhcnt 0x0
; GISEL-NEXT: s_wait_kmcnt 0x0
; GISEL-NEXT: v_lshl_add_u32 v1, v0, 2, 15
; GISEL-NEXT: s_mov_b32 s34, s33
; GISEL-NEXT: s_mov_b32 s1, exec_lo
; GISEL-NEXT: s_mov_b32 s0, 0
; GISEL-NEXT: s_mov_b32 s33, s32
; GISEL-NEXT: v_and_b32_e32 v1, -16, v1
; GISEL-NEXT: s_add_co_i32 s32, s32, 16
; GISEL-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1
; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; GISEL-NEXT: s_ctz_i32_b32 s2, s1
; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; GISEL-NEXT: v_readlane_b32 s3, v1, s2
; GISEL-NEXT: s_bitset0_b32 s1, s2
; GISEL-NEXT: s_max_u32 s0, s0, s3
; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; GISEL-NEXT: s_cmp_lg_u32 s1, 0
; GISEL-NEXT: s_cbranch_scc1 .LBB9_1
; GISEL-NEXT: ; %bb.2:
; GISEL-NEXT: s_mov_b32 s1, s32
; GISEL-NEXT: s_lshl_b32 s0, s0, 5
; GISEL-NEXT: s_wait_storecnt 0x0
; GISEL-NEXT: scratch_store_b32 off, v0, s1 scope:SCOPE_SYS
; GISEL-NEXT: s_wait_storecnt 0x0
; GISEL-NEXT: v_mov_b32_e32 v0, s33
; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; GISEL-NEXT: s_add_co_u32 s32, s1, s0
; GISEL-NEXT: s_mov_b32 s32, s33
; GISEL-NEXT: s_mov_b32 s33, s34
; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
; GISEL-NEXT: s_setpc_b64 s[30:31]
%local = alloca i32, i32 %val, addrspace(5)
store volatile i32 %val, ptr addrspace(5) %local
%stack.base = call ptr addrspace(5) @llvm.sponentry()
ret ptr addrspace(5) %stack.base
}
; CHECK: ScratchSize: 16
attributes #0 = { nounwind "amdgpu-dynamic-vgpr-block-size"="16" }
attributes #1 = { nounwind "amdgpu-dynamic-vgpr-block-size"="32" }
attributes #2 = { nounwind "amdgpu-dynamic-vgpr-block-size"="0" }