| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck %s --check-prefixes=CHECK,DAGISEL |
| ; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck %s --check-prefixes=CHECK,DAGISEL |
| ; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck %s --check-prefixes=CHECK,GISEL |
| ; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck %s --check-prefixes=CHECK,GISEL |
| |
| ; Test that in dynamic VGPR mode, the return of sponentry points after the area reserved for CWSR. |
| |
| define amdgpu_cs ptr addrspace(5) @sponentry_cs_dvgpr_16(i32 %val) #0 { |
| ; CHECK-LABEL: sponentry_cs_dvgpr_16: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) |
| ; CHECK-NEXT: s_getreg_b32 s0, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) |
| ; CHECK-NEXT: s_cmp_lg_u32 0, s33 |
| ; CHECK-NEXT: s_cmovk_i32 s33, 0x1c0 |
| ; CHECK-NEXT: s_cmp_lg_u32 0, s0 |
| ; CHECK-NEXT: scratch_store_b32 off, v0, s33 scope:SCOPE_SYS |
| ; CHECK-NEXT: s_wait_storecnt 0x0 |
| ; CHECK-NEXT: s_cmovk_i32 s0, 0x1c0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %local = alloca i32, addrspace(5) |
| store volatile i32 %val, ptr addrspace(5) %local |
| %stack.base = call ptr addrspace(5) @llvm.sponentry() |
| ret ptr addrspace(5) %stack.base |
| } |
| |
| ; CHECK: ScratchSize: 8 |
| |
| define amdgpu_cs ptr addrspace(5) @sponentry_cs_dvgpr_32(i32 %val) #1 { |
| ; CHECK-LABEL: sponentry_cs_dvgpr_32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) |
| ; CHECK-NEXT: s_getreg_b32 s0, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) |
| ; CHECK-NEXT: s_cmp_lg_u32 0, s33 |
| ; CHECK-NEXT: s_cmovk_i32 s33, 0x380 |
| ; CHECK-NEXT: s_cmp_lg_u32 0, s0 |
| ; CHECK-NEXT: scratch_store_b32 off, v0, s33 scope:SCOPE_SYS |
| ; CHECK-NEXT: s_wait_storecnt 0x0 |
| ; CHECK-NEXT: s_cmovk_i32 s0, 0x380 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %local = alloca i32, addrspace(5) |
| store volatile i32 %val, ptr addrspace(5) %local |
| %stack.base = call ptr addrspace(5) @llvm.sponentry() |
| ret ptr addrspace(5) %stack.base |
| } |
| |
| ; CHECK: ScratchSize: 8 |
| |
| ; If we're not in dynamic VGPR mode, then sponentry can just return 0. |
| |
| define amdgpu_cs ptr addrspace(5) @sponentry_cs_no_dvgpr(i32 %val) #2 { |
| ; CHECK-LABEL: sponentry_cs_no_dvgpr: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_mov_b32 s0, 0 |
| ; CHECK-NEXT: scratch_store_b32 off, v0, off scope:SCOPE_SYS |
| ; CHECK-NEXT: s_wait_storecnt 0x0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %local = alloca i32, addrspace(5) |
| store volatile i32 %val, ptr addrspace(5) %local |
| %stack.base = call ptr addrspace(5) @llvm.sponentry() |
| ret ptr addrspace(5) %stack.base |
| } |
| |
| ; CHECK: ScratchSize: 8 |
| |
| define amdgpu_cs ptr addrspace(5) @sponentry_cs_dvgpr_control_flow(i32 %val, ptr addrspace(5) %ptr) #0 { |
| ; CHECK-LABEL: sponentry_cs_dvgpr_control_flow: |
| ; CHECK: ; %bb.0: ; %entry |
| ; CHECK-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) |
| ; CHECK-NEXT: s_mov_b32 s0, exec_lo |
| ; CHECK-NEXT: s_cmp_lg_u32 0, s33 |
| ; CHECK-NEXT: s_cmovk_i32 s33, 0x1c0 |
| ; CHECK-NEXT: scratch_store_b32 off, v0, s33 scope:SCOPE_SYS |
| ; CHECK-NEXT: s_wait_storecnt 0x0 |
| ; CHECK-NEXT: v_cmpx_gt_i32_e32 0x43, v0 |
| ; CHECK-NEXT: ; %bb.1: ; %if.then |
| ; CHECK-NEXT: s_getreg_b32 s1, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) |
| ; CHECK-NEXT: s_cmp_lg_u32 0, s1 |
| ; CHECK-NEXT: s_cmovk_i32 s1, 0x1c0 |
| ; CHECK-NEXT: v_mov_b32_e32 v1, s1 |
| ; CHECK-NEXT: ; %bb.2: ; %if.end |
| ; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s0 |
| ; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v1 |
| ; CHECK-NEXT: s_wait_alu depctr_va_sdst(0) |
| ; CHECK-NEXT: ; return to shader part epilog |
| entry: |
| %local = alloca i32, addrspace(5) |
| store volatile i32 %val, ptr addrspace(5) %local |
| %which = icmp slt i32 %val, 67 |
| br i1 %which, label %if.then, label %if.end |
| |
| if.then: |
| %stack.base = call ptr addrspace(5) @llvm.sponentry() |
| br label %if.end |
| |
| if.end: |
| %ret = phi ptr addrspace(5) [ %stack.base, %if.then ], [ %ptr, %entry ] |
| ret ptr addrspace(5) %ret |
| } |
| |
| ; CHECK: ScratchSize: 8 |
| |
| declare amdgpu_gfx i32 @callee() |
| |
| define amdgpu_cs ptr addrspace(5) @sponentry_cs_dvgpr_calls(i32 %val) #0 { |
| ; DAGISEL-LABEL: sponentry_cs_dvgpr_calls: |
| ; DAGISEL: ; %bb.0: |
| ; DAGISEL-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) |
| ; DAGISEL-NEXT: s_mov_b32 s1, callee@abs32@hi |
| ; DAGISEL-NEXT: s_cmp_lg_u32 0, s33 |
| ; DAGISEL-NEXT: s_mov_b32 s0, callee@abs32@lo |
| ; DAGISEL-NEXT: s_cmovk_i32 s33, 0x1c0 |
| ; DAGISEL-NEXT: s_cselect_b32 s32, 0x1d0, 16 |
| ; DAGISEL-NEXT: s_swappc_b64 s[30:31], s[0:1] |
| ; DAGISEL-NEXT: s_getreg_b32 s0, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) |
| ; DAGISEL-NEXT: s_wait_storecnt 0x0 |
| ; DAGISEL-NEXT: scratch_store_b32 off, v0, s33 scope:SCOPE_SYS |
| ; DAGISEL-NEXT: s_wait_storecnt 0x0 |
| ; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; DAGISEL-NEXT: s_cmp_lg_u32 0, s0 |
| ; DAGISEL-NEXT: s_cmovk_i32 s0, 0x1c0 |
| ; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; DAGISEL-NEXT: ; return to shader part epilog |
| ; |
| ; GISEL-LABEL: sponentry_cs_dvgpr_calls: |
| ; GISEL: ; %bb.0: |
| ; GISEL-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) |
| ; GISEL-NEXT: s_mov_b32 s0, callee@abs32@lo |
| ; GISEL-NEXT: s_cmp_lg_u32 0, s33 |
| ; GISEL-NEXT: s_mov_b32 s1, callee@abs32@hi |
| ; GISEL-NEXT: s_cmovk_i32 s33, 0x1c0 |
| ; GISEL-NEXT: s_cselect_b32 s32, 0x1d0, 16 |
| ; GISEL-NEXT: s_swappc_b64 s[30:31], s[0:1] |
| ; GISEL-NEXT: s_getreg_b32 s0, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) |
| ; GISEL-NEXT: s_wait_storecnt 0x0 |
| ; GISEL-NEXT: scratch_store_b32 off, v0, s33 scope:SCOPE_SYS |
| ; GISEL-NEXT: s_wait_storecnt 0x0 |
| ; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; GISEL-NEXT: s_cmp_lg_u32 0, s0 |
| ; GISEL-NEXT: s_cmovk_i32 s0, 0x1c0 |
| ; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; GISEL-NEXT: ; return to shader part epilog |
| %local = alloca i32, addrspace(5) |
| %res = call amdgpu_gfx i32 @callee() |
| store volatile i32 %res, ptr addrspace(5) %local |
| %stack.base = call ptr addrspace(5) @llvm.sponentry() |
| ret ptr addrspace(5) %stack.base |
| } |
| |
| ; CHECK: ScratchSize: 16 |
| |
| define amdgpu_cs ptr addrspace(5) @sponentry_cs_dvgpr_realign(i32 %val) #0 { |
| ; CHECK-LABEL: sponentry_cs_dvgpr_realign: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) |
| ; CHECK-NEXT: s_getreg_b32 s0, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) |
| ; CHECK-NEXT: s_cmp_lg_u32 0, s33 |
| ; CHECK-NEXT: s_cmovk_i32 s33, 0x200 |
| ; CHECK-NEXT: s_cmp_lg_u32 0, s0 |
| ; CHECK-NEXT: scratch_store_b32 off, v0, s33 scope:SCOPE_SYS |
| ; CHECK-NEXT: s_wait_storecnt 0x0 |
| ; CHECK-NEXT: s_cmovk_i32 s0, 0x200 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %boop = alloca i32, addrspace(5) |
| %local = alloca i32, align 128, addrspace(5) |
| store volatile i32 %val, ptr addrspace(5) %local |
| %stack.base = call ptr addrspace(5) @llvm.sponentry() |
| ret ptr addrspace(5) %stack.base |
| } |
| |
| ; CHECK: ScratchSize: 128 |
| |
| define amdgpu_gfx ptr addrspace(5) @sponentry_gfx(i32 %val, ptr addrspace(5) %ptr) #0 { |
| ; DAGISEL-LABEL: sponentry_gfx: |
| ; DAGISEL: ; %bb.0: ; %entry |
| ; DAGISEL-NEXT: s_wait_loadcnt_dscnt 0x0 |
| ; DAGISEL-NEXT: s_wait_expcnt 0x0 |
| ; DAGISEL-NEXT: s_wait_samplecnt 0x0 |
| ; DAGISEL-NEXT: s_wait_bvhcnt 0x0 |
| ; DAGISEL-NEXT: s_wait_kmcnt 0x0 |
| ; DAGISEL-NEXT: s_mov_b32 s0, exec_lo |
| ; DAGISEL-NEXT: s_wait_storecnt 0x0 |
| ; DAGISEL-NEXT: scratch_store_b32 off, v0, s32 offset:4 scope:SCOPE_SYS |
| ; DAGISEL-NEXT: s_wait_storecnt 0x0 |
| ; DAGISEL-NEXT: v_cmpx_gt_i32_e32 0x43, v0 |
| ; DAGISEL-NEXT: ; %bb.1: ; %if.then |
| ; DAGISEL-NEXT: v_mov_b32_e32 v1, s32 |
| ; DAGISEL-NEXT: ; %bb.2: ; %if.end |
| ; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0 |
| ; DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) |
| ; DAGISEL-NEXT: v_mov_b32_e32 v0, v1 |
| ; DAGISEL-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GISEL-LABEL: sponentry_gfx: |
| ; GISEL: ; %bb.0: ; %entry |
| ; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 |
| ; GISEL-NEXT: s_wait_expcnt 0x0 |
| ; GISEL-NEXT: s_wait_samplecnt 0x0 |
| ; GISEL-NEXT: s_wait_bvhcnt 0x0 |
| ; GISEL-NEXT: s_wait_kmcnt 0x0 |
| ; GISEL-NEXT: v_mov_b32_e32 v2, v0 |
| ; GISEL-NEXT: v_mov_b32_e32 v0, v1 |
| ; GISEL-NEXT: s_mov_b32 s0, exec_lo |
| ; GISEL-NEXT: s_wait_storecnt 0x0 |
| ; GISEL-NEXT: scratch_store_b32 off, v2, s32 offset:4 scope:SCOPE_SYS |
| ; GISEL-NEXT: s_wait_storecnt 0x0 |
| ; GISEL-NEXT: v_cmpx_gt_i32_e32 0x43, v2 |
| ; GISEL-NEXT: ; %bb.1: ; %if.then |
| ; GISEL-NEXT: s_mov_b32 s1, s32 |
| ; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; GISEL-NEXT: v_mov_b32_e32 v0, s1 |
| ; GISEL-NEXT: ; %bb.2: ; %if.end |
| ; GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0 |
| ; GISEL-NEXT: s_setpc_b64 s[30:31] |
| entry: |
| %local = alloca i32, addrspace(5) |
| store volatile i32 %val, ptr addrspace(5) %local |
| %which = icmp slt i32 %val, 67 |
| br i1 %which, label %if.then, label %if.end |
| |
| if.then: |
| %stack.base = call ptr addrspace(5) @llvm.sponentry() |
| br label %if.end |
| |
| if.end: |
| %ret = phi ptr addrspace(5) [ %stack.base, %if.then ], [ %ptr, %entry ] |
| ret ptr addrspace(5) %ret |
| } |
| |
| ; FIXME: Optimize away the 4 bytes for the sponentry frame index. |
| ; CHECK: ScratchSize: 12 |
| |
| define amdgpu_gfx ptr addrspace(5) @sponentry_gfx_dvgpr_realign(i32 %val) #0 { |
| ; CHECK-LABEL: sponentry_gfx_dvgpr_realign: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 |
| ; CHECK-NEXT: s_wait_expcnt 0x0 |
| ; CHECK-NEXT: s_wait_samplecnt 0x0 |
| ; CHECK-NEXT: s_wait_bvhcnt 0x0 |
| ; CHECK-NEXT: s_wait_kmcnt 0x0 |
| ; CHECK-NEXT: s_mov_b32 s0, s33 |
| ; CHECK-NEXT: s_add_co_i32 s33, s32, 0x7f |
| ; CHECK-NEXT: s_mov_b32 s1, s34 |
| ; CHECK-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; CHECK-NEXT: s_and_b32 s33, s33, 0xffffff80 |
| ; CHECK-NEXT: s_mov_b32 s34, s32 |
| ; CHECK-NEXT: s_wait_storecnt 0x0 |
| ; CHECK-NEXT: scratch_store_b32 off, v0, s33 offset:128 scope:SCOPE_SYS |
| ; CHECK-NEXT: s_wait_storecnt 0x0 |
| ; CHECK-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s34 |
| ; CHECK-NEXT: s_addk_co_i32 s32, 0x180 |
| ; CHECK-NEXT: s_mov_b32 s32, s34 |
| ; CHECK-NEXT: s_mov_b32 s34, s1 |
| ; CHECK-NEXT: s_mov_b32 s33, s0 |
| ; CHECK-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; CHECK-NEXT: s_setpc_b64 s[30:31] |
| %boop = alloca i32, addrspace(5) |
| %local = alloca i32, align 128, addrspace(5) |
| store volatile i32 %val, ptr addrspace(5) %local |
| %stack.base = call ptr addrspace(5) @llvm.sponentry() |
| ret ptr addrspace(5) %stack.base |
| } |
| |
| ; FIXME: Optimize away the sponentry fixed object. |
| ; CHECK: ScratchSize: 384 |
| |
| define amdgpu_gfx ptr addrspace(5) @sponentry_gfx_stack_args(<32 x i32> %fill.vgprs, i32 %val, ptr addrspace(5) %ptr) #0 { |
| ; DAGISEL-LABEL: sponentry_gfx_stack_args: |
| ; DAGISEL: ; %bb.0: |
| ; DAGISEL-NEXT: s_wait_loadcnt_dscnt 0x0 |
| ; DAGISEL-NEXT: s_wait_expcnt 0x0 |
| ; DAGISEL-NEXT: s_wait_samplecnt 0x0 |
| ; DAGISEL-NEXT: s_wait_bvhcnt 0x0 |
| ; DAGISEL-NEXT: s_wait_kmcnt 0x0 |
| ; DAGISEL-NEXT: s_clause 0x1 |
| ; DAGISEL-NEXT: scratch_load_b32 v0, off, s32 offset:4 |
| ; DAGISEL-NEXT: scratch_load_b32 v1, off, s32 |
| ; DAGISEL-NEXT: s_wait_loadcnt 0x0 |
| ; DAGISEL-NEXT: s_wait_storecnt 0x0 |
| ; DAGISEL-NEXT: scratch_store_b32 v0, v1, off scope:SCOPE_SYS |
| ; DAGISEL-NEXT: s_wait_storecnt 0x0 |
| ; DAGISEL-NEXT: v_mov_b32_e32 v0, s32 |
| ; DAGISEL-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GISEL-LABEL: sponentry_gfx_stack_args: |
| ; GISEL: ; %bb.0: |
| ; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 |
| ; GISEL-NEXT: s_wait_expcnt 0x0 |
| ; GISEL-NEXT: s_wait_samplecnt 0x0 |
| ; GISEL-NEXT: s_wait_bvhcnt 0x0 |
| ; GISEL-NEXT: s_wait_kmcnt 0x0 |
| ; GISEL-NEXT: s_clause 0x1 |
| ; GISEL-NEXT: scratch_load_b32 v0, off, s32 |
| ; GISEL-NEXT: scratch_load_b32 v1, off, s32 offset:4 |
| ; GISEL-NEXT: s_wait_loadcnt 0x0 |
| ; GISEL-NEXT: s_wait_storecnt 0x0 |
| ; GISEL-NEXT: scratch_store_b32 v1, v0, off scope:SCOPE_SYS |
| ; GISEL-NEXT: s_wait_storecnt 0x0 |
| ; GISEL-NEXT: v_mov_b32_e32 v0, s32 |
| ; GISEL-NEXT: s_setpc_b64 s[30:31] |
| store volatile i32 %val, ptr addrspace(5) %ptr |
| %stack.base = call ptr addrspace(5) @llvm.sponentry() |
| ret ptr addrspace(5) %stack.base |
| } |
| |
| ; CHECK: ScratchSize: 12 |
| |
| define amdgpu_gfx ptr addrspace(5) @sponentry_gfx_dyn_alloc(i32 %val) #0 { |
| ; DAGISEL-LABEL: sponentry_gfx_dyn_alloc: |
| ; DAGISEL: ; %bb.0: |
| ; DAGISEL-NEXT: s_wait_loadcnt_dscnt 0x0 |
| ; DAGISEL-NEXT: s_wait_expcnt 0x0 |
| ; DAGISEL-NEXT: s_wait_samplecnt 0x0 |
| ; DAGISEL-NEXT: s_wait_bvhcnt 0x0 |
| ; DAGISEL-NEXT: s_wait_kmcnt 0x0 |
| ; DAGISEL-NEXT: s_mov_b32 s2, s33 |
| ; DAGISEL-NEXT: s_mov_b32 s33, s32 |
| ; DAGISEL-NEXT: s_xor_saveexec_b32 s0, -1 |
| ; DAGISEL-NEXT: s_clause 0x1 ; 8-byte Folded Spill |
| ; DAGISEL-NEXT: scratch_store_b32 off, v1, s33 offset:4 |
| ; DAGISEL-NEXT: scratch_store_b32 off, v2, s33 offset:8 |
| ; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; DAGISEL-NEXT: s_mov_b32 exec_lo, s0 |
| ; DAGISEL-NEXT: v_lshl_add_u32 v3, v0, 2, 15 |
| ; DAGISEL-NEXT: s_add_co_i32 s32, s32, 16 |
| ; DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) |
| ; DAGISEL-NEXT: v_and_b32_e32 v3, -16, v3 |
| ; DAGISEL-NEXT: s_or_saveexec_b32 s0, -1 |
| ; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; DAGISEL-NEXT: v_cndmask_b32_e64 v1, 0, v3, s0 |
| ; DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) |
| ; DAGISEL-NEXT: v_max_u32_dpp v1, v1, v1 row_shr:1 row_mask:0xf bank_mask:0xf |
| ; DAGISEL-NEXT: v_max_u32_dpp v1, v1, v1 row_shr:2 row_mask:0xf bank_mask:0xf |
| ; DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) |
| ; DAGISEL-NEXT: v_max_u32_dpp v1, v1, v1 row_shr:4 row_mask:0xf bank_mask:0xf |
| ; DAGISEL-NEXT: v_max_u32_dpp v1, v1, v1 row_shr:8 row_mask:0xf bank_mask:0xf |
| ; DAGISEL-NEXT: ds_swizzle_b32 v2, v1 offset:swizzle(BROADCAST,32,15) |
| ; DAGISEL-NEXT: s_wait_dscnt 0x0 |
| ; DAGISEL-NEXT: v_max_u32_e32 v1, v1, v2 |
| ; DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) |
| ; DAGISEL-NEXT: v_readlane_b32 s1, v1, 31 |
| ; DAGISEL-NEXT: s_mov_b32 exec_lo, s0 |
| ; DAGISEL-NEXT: s_mov_b32 s0, s32 |
| ; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; DAGISEL-NEXT: v_lshl_add_u32 v3, s1, 5, s0 |
| ; DAGISEL-NEXT: s_wait_storecnt 0x0 |
| ; DAGISEL-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS |
| ; DAGISEL-NEXT: s_wait_storecnt 0x0 |
| ; DAGISEL-NEXT: v_mov_b32_e32 v0, s33 |
| ; DAGISEL-NEXT: v_readfirstlane_b32 s32, v3 |
| ; DAGISEL-NEXT: s_mov_b32 s32, s33 |
| ; DAGISEL-NEXT: s_xor_saveexec_b32 s0, -1 |
| ; DAGISEL-NEXT: s_clause 0x1 ; 8-byte Folded Reload |
| ; DAGISEL-NEXT: scratch_load_b32 v1, off, s33 offset:4 |
| ; DAGISEL-NEXT: scratch_load_b32 v2, off, s33 offset:8 |
| ; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; DAGISEL-NEXT: s_mov_b32 exec_lo, s0 |
| ; DAGISEL-NEXT: s_mov_b32 s33, s2 |
| ; DAGISEL-NEXT: s_wait_loadcnt 0x0 |
| ; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; DAGISEL-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GISEL-LABEL: sponentry_gfx_dyn_alloc: |
| ; GISEL: ; %bb.0: |
| ; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 |
| ; GISEL-NEXT: s_wait_expcnt 0x0 |
| ; GISEL-NEXT: s_wait_samplecnt 0x0 |
| ; GISEL-NEXT: s_wait_bvhcnt 0x0 |
| ; GISEL-NEXT: s_wait_kmcnt 0x0 |
| ; GISEL-NEXT: s_mov_b32 s3, s33 |
| ; GISEL-NEXT: s_mov_b32 s33, s32 |
| ; GISEL-NEXT: s_xor_saveexec_b32 s0, -1 |
| ; GISEL-NEXT: s_clause 0x1 ; 8-byte Folded Spill |
| ; GISEL-NEXT: scratch_store_b32 off, v1, s33 offset:4 |
| ; GISEL-NEXT: scratch_store_b32 off, v2, s33 offset:8 |
| ; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; GISEL-NEXT: s_mov_b32 exec_lo, s0 |
| ; GISEL-NEXT: v_lshl_add_u32 v3, v0, 2, 15 |
| ; GISEL-NEXT: s_add_co_i32 s32, s32, 16 |
| ; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; GISEL-NEXT: s_mov_b32 s0, s32 |
| ; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) |
| ; GISEL-NEXT: v_and_b32_e32 v3, -16, v3 |
| ; GISEL-NEXT: s_or_saveexec_b32 s1, -1 |
| ; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, v3, s1 |
| ; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) |
| ; GISEL-NEXT: v_max_u32_dpp v1, v1, v1 row_shr:1 row_mask:0xf bank_mask:0xf |
| ; GISEL-NEXT: v_max_u32_dpp v1, v1, v1 row_shr:2 row_mask:0xf bank_mask:0xf |
| ; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) |
| ; GISEL-NEXT: v_max_u32_dpp v1, v1, v1 row_shr:4 row_mask:0xf bank_mask:0xf |
| ; GISEL-NEXT: v_max_u32_dpp v1, v1, v1 row_shr:8 row_mask:0xf bank_mask:0xf |
| ; GISEL-NEXT: ds_swizzle_b32 v2, v1 offset:swizzle(BROADCAST,32,15) |
| ; GISEL-NEXT: s_wait_dscnt 0x0 |
| ; GISEL-NEXT: v_max_u32_e32 v1, v1, v2 |
| ; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) |
| ; GISEL-NEXT: v_readlane_b32 s2, v1, 31 |
| ; GISEL-NEXT: s_mov_b32 exec_lo, s1 |
| ; GISEL-NEXT: s_lshl_b32 s1, s2, 5 |
| ; GISEL-NEXT: s_wait_storecnt 0x0 |
| ; GISEL-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS |
| ; GISEL-NEXT: s_wait_storecnt 0x0 |
| ; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; GISEL-NEXT: s_add_co_u32 s32, s0, s1 |
| ; GISEL-NEXT: v_mov_b32_e32 v0, s33 |
| ; GISEL-NEXT: s_mov_b32 s32, s33 |
| ; GISEL-NEXT: s_xor_saveexec_b32 s0, -1 |
| ; GISEL-NEXT: s_clause 0x1 ; 8-byte Folded Reload |
| ; GISEL-NEXT: scratch_load_b32 v1, off, s33 offset:4 |
| ; GISEL-NEXT: scratch_load_b32 v2, off, s33 offset:8 |
| ; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; GISEL-NEXT: s_mov_b32 exec_lo, s0 |
| ; GISEL-NEXT: s_mov_b32 s33, s3 |
| ; GISEL-NEXT: s_wait_loadcnt 0x0 |
| ; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; GISEL-NEXT: s_setpc_b64 s[30:31] |
| %local = alloca i32, i32 %val, addrspace(5) |
| store volatile i32 %val, ptr addrspace(5) %local |
| %stack.base = call ptr addrspace(5) @llvm.sponentry() |
| ret ptr addrspace(5) %stack.base |
| } |
| |
| ; CHECK: ScratchSize: 16 |
| |
| define amdgpu_cs_chain void @sponentry_cs_chain(i32 %val, ptr addrspace(5) %ptr) #0 { |
| ; DAGISEL-LABEL: sponentry_cs_chain: |
| ; DAGISEL: ; %bb.0: ; %entry |
| ; DAGISEL-NEXT: s_wait_loadcnt_dscnt 0x0 |
| ; DAGISEL-NEXT: s_wait_expcnt 0x0 |
| ; DAGISEL-NEXT: s_wait_samplecnt 0x0 |
| ; DAGISEL-NEXT: s_wait_bvhcnt 0x0 |
| ; DAGISEL-NEXT: s_wait_kmcnt 0x0 |
| ; DAGISEL-NEXT: v_mov_b32_e32 v0, v9 |
| ; DAGISEL-NEXT: s_mov_b32 s0, exec_lo |
| ; DAGISEL-NEXT: s_wait_storecnt 0x0 |
| ; DAGISEL-NEXT: scratch_store_b32 off, v8, s32 offset:4 scope:SCOPE_SYS |
| ; DAGISEL-NEXT: s_wait_storecnt 0x0 |
| ; DAGISEL-NEXT: v_cmpx_gt_i32_e32 0x43, v8 |
| ; DAGISEL-NEXT: ; %bb.1: ; %if.then |
| ; DAGISEL-NEXT: v_mov_b32_e32 v0, s32 |
| ; DAGISEL-NEXT: ; %bb.2: ; %if.end |
| ; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0 |
| ; DAGISEL-NEXT: scratch_store_b32 v9, v0, off scope:SCOPE_SYS |
| ; DAGISEL-NEXT: s_wait_storecnt 0x0 |
| ; DAGISEL-NEXT: s_alloc_vgpr 0 |
| ; DAGISEL-NEXT: s_endpgm |
| ; |
| ; GISEL-LABEL: sponentry_cs_chain: |
| ; GISEL: ; %bb.0: ; %entry |
| ; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 |
| ; GISEL-NEXT: s_wait_expcnt 0x0 |
| ; GISEL-NEXT: s_wait_samplecnt 0x0 |
| ; GISEL-NEXT: s_wait_bvhcnt 0x0 |
| ; GISEL-NEXT: s_wait_kmcnt 0x0 |
| ; GISEL-NEXT: v_mov_b32_e32 v0, v9 |
| ; GISEL-NEXT: s_mov_b32 s0, exec_lo |
| ; GISEL-NEXT: s_wait_storecnt 0x0 |
| ; GISEL-NEXT: scratch_store_b32 off, v8, s32 offset:4 scope:SCOPE_SYS |
| ; GISEL-NEXT: s_wait_storecnt 0x0 |
| ; GISEL-NEXT: v_cmpx_gt_i32_e32 0x43, v8 |
| ; GISEL-NEXT: ; %bb.1: ; %if.then |
| ; GISEL-NEXT: s_mov_b32 s1, s32 |
| ; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; GISEL-NEXT: v_mov_b32_e32 v0, s1 |
| ; GISEL-NEXT: ; %bb.2: ; %if.end |
| ; GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0 |
| ; GISEL-NEXT: scratch_store_b32 v9, v0, off scope:SCOPE_SYS |
| ; GISEL-NEXT: s_wait_storecnt 0x0 |
| ; GISEL-NEXT: s_alloc_vgpr 0 |
| ; GISEL-NEXT: s_endpgm |
| entry: |
| %local = alloca i32, addrspace(5) |
| store volatile i32 %val, ptr addrspace(5) %local |
| %which = icmp slt i32 %val, 67 |
| br i1 %which, label %if.then, label %if.end |
| |
| if.then: |
| %stack.base = call ptr addrspace(5) @llvm.sponentry() |
| br label %if.end |
| |
| if.end: |
| %ret = phi ptr addrspace(5) [ %stack.base, %if.then ], [ %ptr, %entry ] |
| store volatile ptr addrspace(5) %ret, ptr addrspace(5) %ptr |
| ret void |
| } |
| |
| ; FIXME: Optimize away the 4 bytes for the sponentry frame index. |
| ; CHECK: ScratchSize: 12 |
| |
| define amdgpu_cs_chain void @sponentry_cs_chain_dvgpr_realign(i32 %val) #0 { |
| ; CHECK-LABEL: sponentry_cs_chain_dvgpr_realign: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 |
| ; CHECK-NEXT: s_wait_expcnt 0x0 |
| ; CHECK-NEXT: s_wait_samplecnt 0x0 |
| ; CHECK-NEXT: s_wait_bvhcnt 0x0 |
| ; CHECK-NEXT: s_wait_kmcnt 0x0 |
| ; CHECK-NEXT: s_mov_b32 s34, s32 |
| ; CHECK-NEXT: s_add_co_i32 s33, s32, 0x7f |
| ; CHECK-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s34 |
| ; CHECK-NEXT: s_and_b32 s33, s33, 0xffffff80 |
| ; CHECK-NEXT: s_addk_co_i32 s32, 0x180 |
| ; CHECK-NEXT: s_wait_storecnt 0x0 |
| ; CHECK-NEXT: scratch_store_b32 off, v8, s33 offset:128 scope:SCOPE_SYS |
| ; CHECK-NEXT: s_wait_storecnt 0x0 |
| ; CHECK-NEXT: scratch_store_b32 off, v0, s33 offset:128 scope:SCOPE_SYS |
| ; CHECK-NEXT: s_wait_storecnt 0x0 |
| ; CHECK-NEXT: s_alloc_vgpr 0 |
| ; CHECK-NEXT: s_endpgm |
| %boop = alloca i32, addrspace(5) |
| %local = alloca i32, align 128, addrspace(5) |
| store volatile i32 %val, ptr addrspace(5) %local |
| %stack.base = call ptr addrspace(5) @llvm.sponentry() |
| store volatile ptr addrspace(5) %stack.base, ptr addrspace(5) %local |
| ret void |
| } |
| |
| ; FIXME: Optimize away the sponentry fixed object. |
| ; CHECK: ScratchSize: 384 |
| |
| define amdgpu_cs_chain void @sponentry_cs_chain_dyn_alloc(i32 %val) #0 { |
| ; DAGISEL-LABEL: sponentry_cs_chain_dyn_alloc: |
| ; DAGISEL: ; %bb.0: |
| ; DAGISEL-NEXT: s_wait_loadcnt_dscnt 0x0 |
| ; DAGISEL-NEXT: s_wait_expcnt 0x0 |
| ; DAGISEL-NEXT: s_wait_samplecnt 0x0 |
| ; DAGISEL-NEXT: s_wait_bvhcnt 0x0 |
| ; DAGISEL-NEXT: s_wait_kmcnt 0x0 |
| ; DAGISEL-NEXT: v_lshl_add_u32 v2, v8, 2, 15 |
| ; DAGISEL-NEXT: s_mov_b32 s33, s32 |
| ; DAGISEL-NEXT: s_add_co_i32 s32, s32, 16 |
| ; DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) |
| ; DAGISEL-NEXT: v_and_b32_e32 v2, -16, v2 |
| ; DAGISEL-NEXT: s_or_saveexec_b32 s0, -1 |
| ; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; DAGISEL-NEXT: v_cndmask_b32_e64 v0, 0, v2, s0 |
| ; DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) |
| ; DAGISEL-NEXT: v_max_u32_dpp v0, v0, v0 row_shr:1 row_mask:0xf bank_mask:0xf |
| ; DAGISEL-NEXT: v_max_u32_dpp v0, v0, v0 row_shr:2 row_mask:0xf bank_mask:0xf |
| ; DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) |
| ; DAGISEL-NEXT: v_max_u32_dpp v0, v0, v0 row_shr:4 row_mask:0xf bank_mask:0xf |
| ; DAGISEL-NEXT: v_max_u32_dpp v0, v0, v0 row_shr:8 row_mask:0xf bank_mask:0xf |
| ; DAGISEL-NEXT: ds_swizzle_b32 v1, v0 offset:swizzle(BROADCAST,32,15) |
| ; DAGISEL-NEXT: s_wait_dscnt 0x0 |
| ; DAGISEL-NEXT: v_max_u32_e32 v0, v0, v1 |
| ; DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) |
| ; DAGISEL-NEXT: v_readlane_b32 s1, v0, 31 |
| ; DAGISEL-NEXT: s_mov_b32 exec_lo, s0 |
| ; DAGISEL-NEXT: s_mov_b32 s0, s32 |
| ; DAGISEL-NEXT: v_mov_b32_e32 v3, s33 |
| ; DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; DAGISEL-NEXT: v_lshl_add_u32 v2, s1, 5, s0 |
| ; DAGISEL-NEXT: s_wait_storecnt 0x0 |
| ; DAGISEL-NEXT: scratch_store_b32 off, v8, s0 scope:SCOPE_SYS |
| ; DAGISEL-NEXT: s_wait_storecnt 0x0 |
| ; DAGISEL-NEXT: scratch_store_b32 off, v3, s0 scope:SCOPE_SYS |
| ; DAGISEL-NEXT: s_wait_storecnt 0x0 |
| ; DAGISEL-NEXT: v_readfirstlane_b32 s32, v2 |
| ; DAGISEL-NEXT: s_alloc_vgpr 0 |
| ; DAGISEL-NEXT: s_endpgm |
| ; |
| ; GISEL-LABEL: sponentry_cs_chain_dyn_alloc: |
| ; GISEL: ; %bb.0: |
| ; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 |
| ; GISEL-NEXT: s_wait_expcnt 0x0 |
| ; GISEL-NEXT: s_wait_samplecnt 0x0 |
| ; GISEL-NEXT: s_wait_bvhcnt 0x0 |
| ; GISEL-NEXT: s_wait_kmcnt 0x0 |
| ; GISEL-NEXT: v_lshl_add_u32 v2, v8, 2, 15 |
| ; GISEL-NEXT: s_mov_b32 s33, s32 |
| ; GISEL-NEXT: s_add_co_i32 s32, s32, 16 |
| ; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; GISEL-NEXT: s_mov_b32 s0, s32 |
| ; GISEL-NEXT: v_and_b32_e32 v2, -16, v2 |
| ; GISEL-NEXT: s_or_saveexec_b32 s1, -1 |
| ; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) |
| ; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, v2, s1 |
| ; GISEL-NEXT: v_max_u32_dpp v0, v0, v0 row_shr:1 row_mask:0xf bank_mask:0xf |
| ; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) |
| ; GISEL-NEXT: v_max_u32_dpp v0, v0, v0 row_shr:2 row_mask:0xf bank_mask:0xf |
| ; GISEL-NEXT: v_max_u32_dpp v0, v0, v0 row_shr:4 row_mask:0xf bank_mask:0xf |
| ; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) |
| ; GISEL-NEXT: v_max_u32_dpp v0, v0, v0 row_shr:8 row_mask:0xf bank_mask:0xf |
| ; GISEL-NEXT: ds_swizzle_b32 v1, v0 offset:swizzle(BROADCAST,32,15) |
| ; GISEL-NEXT: s_wait_dscnt 0x0 |
| ; GISEL-NEXT: v_max_u32_e32 v0, v0, v1 |
| ; GISEL-NEXT: v_readlane_b32 s2, v0, 31 |
| ; GISEL-NEXT: s_mov_b32 exec_lo, s1 |
| ; GISEL-NEXT: v_mov_b32_e32 v2, s33 |
| ; GISEL-NEXT: s_lshl_b32 s1, s2, 5 |
| ; GISEL-NEXT: s_wait_storecnt 0x0 |
| ; GISEL-NEXT: scratch_store_b32 off, v8, s0 scope:SCOPE_SYS |
| ; GISEL-NEXT: s_wait_storecnt 0x0 |
| ; GISEL-NEXT: s_wait_alu depctr_sa_sdst(0) |
| ; GISEL-NEXT: s_add_co_u32 s32, s0, s1 |
| ; GISEL-NEXT: scratch_store_b32 off, v2, s0 scope:SCOPE_SYS |
| ; GISEL-NEXT: s_wait_storecnt 0x0 |
| ; GISEL-NEXT: s_alloc_vgpr 0 |
| ; GISEL-NEXT: s_endpgm |
| %local = alloca i32, i32 %val, addrspace(5) |
| store volatile i32 %val, ptr addrspace(5) %local |
| %stack.base = call ptr addrspace(5) @llvm.sponentry() |
| store volatile ptr addrspace(5) %stack.base, ptr addrspace(5) %local |
| ret void |
| } |
| |
| ; CHECK: ScratchSize: 16 |
| |
| attributes #0 = { nounwind "amdgpu-dynamic-vgpr-block-size"="16" } |
| attributes #1 = { nounwind "amdgpu-dynamic-vgpr-block-size"="32" } |
| attributes #2 = { nounwind "amdgpu-dynamic-vgpr-block-size"="0" } |