blob: 746a742a881ff2bb12c525b8662409b090a78288 [file] [edit]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal -mcpu=gfx1250 < %s | FileCheck %s --check-prefix=GISEL
; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal -mcpu=gfx1250 < %s | FileCheck %s --check-prefix=DAGISEL
; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn--amdpal -mcpu=gfx1250 < %s | FileCheck %s --check-prefix=NRBS
declare i1 @llvm.amdgcn.s.alloc.vgpr(i32)
define amdgpu_cs void @test_alloc_vreg_const(ptr addrspace(1) %out) #0 {
; GISEL-LABEL: test_alloc_vreg_const:
; GISEL: ; %bb.0: ; %entry
; GISEL-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
; GISEL-NEXT: s_cmp_lg_u32 0, s33
; GISEL-NEXT: s_cmovk_i32 s33, 0x1c0
; GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
; GISEL-NEXT: s_alloc_vgpr 45
; GISEL-NEXT: s_cselect_b32 s0, 1, 0
; GISEL-NEXT: s_and_b32 s0, s0, 1
; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GISEL-NEXT: v_mov_b32_e32 v2, s0
; GISEL-NEXT: global_store_b32 v[0:1], v2, off
; GISEL-NEXT: s_alloc_vgpr 0
; GISEL-NEXT: s_endpgm
;
; DAGISEL-LABEL: test_alloc_vreg_const:
; DAGISEL: ; %bb.0: ; %entry
; DAGISEL-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
; DAGISEL-NEXT: s_cmp_lg_u32 0, s33
; DAGISEL-NEXT: s_cmovk_i32 s33, 0x1c0
; DAGISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
; DAGISEL-NEXT: s_alloc_vgpr 45
; DAGISEL-NEXT: s_cselect_b32 s0, -1, 0
; DAGISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0
; DAGISEL-NEXT: global_store_b32 v[0:1], v2, off
; DAGISEL-NEXT: s_alloc_vgpr 0
; DAGISEL-NEXT: s_endpgm
;
; NRBS-LABEL: test_alloc_vreg_const:
; NRBS: ; %bb.0: ; %entry
; NRBS-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; NRBS-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
; NRBS-NEXT: s_cmp_lg_u32 0, s33
; NRBS-NEXT: s_cmovk_i32 s33, 0x1c0
; NRBS-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
; NRBS-NEXT: s_alloc_vgpr 45
; NRBS-NEXT: s_cselect_b32 s0, 1, 0
; NRBS-NEXT: s_and_b32 s0, s0, 1
; NRBS-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; NRBS-NEXT: s_cmp_lg_u32 s0, 0
; NRBS-NEXT: s_cselect_b32 s0, 1, 0
; NRBS-NEXT: v_mov_b32_e32 v2, s0
; NRBS-NEXT: global_store_b32 v[0:1], v2, off
; NRBS-NEXT: s_alloc_vgpr 0
; NRBS-NEXT: s_endpgm
entry:
%scc = call i1 @llvm.amdgcn.s.alloc.vgpr(i32 45)
%sel = select i1 %scc, i32 1, i32 0
store i32 %sel, ptr addrspace(1) %out
ret void
}
define amdgpu_cs void @test_alloc_vreg_var(i32 inreg %n, ptr addrspace(1) %out) #0 {
; GISEL-LABEL: test_alloc_vreg_var:
; GISEL: ; %bb.0: ; %entry
; GISEL-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
; GISEL-NEXT: s_cmp_lg_u32 0, s33
; GISEL-NEXT: s_cmovk_i32 s33, 0x1c0
; GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
; GISEL-NEXT: s_alloc_vgpr s0
; GISEL-NEXT: s_cselect_b32 s0, 1, 0
; GISEL-NEXT: s_and_b32 s0, s0, 1
; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GISEL-NEXT: v_mov_b32_e32 v2, s0
; GISEL-NEXT: global_store_b32 v[0:1], v2, off
; GISEL-NEXT: s_alloc_vgpr 0
; GISEL-NEXT: s_endpgm
;
; DAGISEL-LABEL: test_alloc_vreg_var:
; DAGISEL: ; %bb.0: ; %entry
; DAGISEL-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
; DAGISEL-NEXT: s_cmp_lg_u32 0, s33
; DAGISEL-NEXT: s_cmovk_i32 s33, 0x1c0
; DAGISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
; DAGISEL-NEXT: s_alloc_vgpr s0
; DAGISEL-NEXT: s_cselect_b32 s0, -1, 0
; DAGISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0
; DAGISEL-NEXT: global_store_b32 v[0:1], v2, off
; DAGISEL-NEXT: s_alloc_vgpr 0
; DAGISEL-NEXT: s_endpgm
;
; NRBS-LABEL: test_alloc_vreg_var:
; NRBS: ; %bb.0: ; %entry
; NRBS-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; NRBS-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
; NRBS-NEXT: s_cmp_lg_u32 0, s33
; NRBS-NEXT: s_cmovk_i32 s33, 0x1c0
; NRBS-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
; NRBS-NEXT: s_alloc_vgpr s0
; NRBS-NEXT: s_cselect_b32 s0, 1, 0
; NRBS-NEXT: s_and_b32 s0, s0, 1
; NRBS-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; NRBS-NEXT: s_cmp_lg_u32 s0, 0
; NRBS-NEXT: s_cselect_b32 s0, 1, 0
; NRBS-NEXT: v_mov_b32_e32 v2, s0
; NRBS-NEXT: global_store_b32 v[0:1], v2, off
; NRBS-NEXT: s_alloc_vgpr 0
; NRBS-NEXT: s_endpgm
entry:
%scc = call i1 @llvm.amdgcn.s.alloc.vgpr(i32 %n)
%sel = select i1 %scc, i32 1, i32 0
store i32 %sel, ptr addrspace(1) %out
ret void
}
define amdgpu_cs void @test_alloc_vreg_vgpr(i32 %n, ptr addrspace(1) %out) #0 {
; GISEL-LABEL: test_alloc_vreg_vgpr:
; GISEL: ; %bb.0: ; %entry
; GISEL-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GISEL-NEXT: s_cmp_lg_u32 0, s33
; GISEL-NEXT: s_cmovk_i32 s33, 0x1c0
; GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
; GISEL-NEXT: v_readfirstlane_b32 s0, v0
; GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GISEL-NEXT: s_alloc_vgpr s0
; GISEL-NEXT: s_cselect_b32 s0, 1, 0
; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GISEL-NEXT: s_and_b32 s0, s0, 1
; GISEL-NEXT: v_mov_b32_e32 v0, s0
; GISEL-NEXT: global_store_b32 v[4:5], v0, off
; GISEL-NEXT: s_alloc_vgpr 0
; GISEL-NEXT: s_endpgm
;
; DAGISEL-LABEL: test_alloc_vreg_vgpr:
; DAGISEL: ; %bb.0: ; %entry
; DAGISEL-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; DAGISEL-NEXT: s_cmp_lg_u32 0, s33
; DAGISEL-NEXT: s_cmovk_i32 s33, 0x1c0
; DAGISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
; DAGISEL-NEXT: v_readfirstlane_b32 s0, v0
; DAGISEL-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; DAGISEL-NEXT: s_alloc_vgpr s0
; DAGISEL-NEXT: s_cselect_b32 s0, -1, 0
; DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; DAGISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
; DAGISEL-NEXT: global_store_b32 v[2:3], v0, off
; DAGISEL-NEXT: s_alloc_vgpr 0
; DAGISEL-NEXT: s_endpgm
;
; NRBS-LABEL: test_alloc_vreg_vgpr:
; NRBS: ; %bb.0: ; %entry
; NRBS-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2)
; NRBS-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; NRBS-NEXT: s_cmp_lg_u32 0, s33
; NRBS-NEXT: s_cmovk_i32 s33, 0x1c0
; NRBS-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
; NRBS-NEXT: v_readfirstlane_b32 s0, v0
; NRBS-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; NRBS-NEXT: s_alloc_vgpr s0
; NRBS-NEXT: s_cselect_b32 s0, 1, 0
; NRBS-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; NRBS-NEXT: s_and_b32 s0, s0, 1
; NRBS-NEXT: s_cmp_lg_u32 s0, 0
; NRBS-NEXT: s_cselect_b32 s0, 1, 0
; NRBS-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; NRBS-NEXT: v_mov_b32_e32 v0, s0
; NRBS-NEXT: global_store_b32 v[4:5], v0, off
; NRBS-NEXT: s_alloc_vgpr 0
; NRBS-NEXT: s_endpgm
entry:
%scc = call i1 @llvm.amdgcn.s.alloc.vgpr(i32 %n)
%sel = select i1 %scc, i32 1, i32 0
store i32 %sel, ptr addrspace(1) %out
ret void
}
attributes #0 = { "amdgpu-dynamic-vgpr-block-size" = "16" }