blob: 00507c1eafd6e4385a72c4f03ea8d188bc213f0e [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx942 < %s | FileCheck -check-prefixes=GFX942 %s
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx90a < %s | FileCheck -check-prefixes=GFX90a %s
define amdgpu_kernel void @preload_block_count_x(ptr addrspace(1) inreg %out) #0 {
; GFX942-LABEL: preload_block_count_x:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_load_dword s4, s[0:1], 0x8
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB0_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB0_0:
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, s4
; GFX942-NEXT: global_store_dword v0, v1, s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: preload_block_count_x:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_load_dword s8, s[4:5], 0x8
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB0_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB0_0:
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: v_mov_b32_e32 v1, s8
; GFX90a-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%load = load i32, ptr addrspace(4) %imp_arg_ptr
store i32 %load, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @preload_unused_arg_block_count_x(ptr addrspace(1) inreg %out, i32 inreg) #0 {
; GFX942-LABEL: preload_unused_arg_block_count_x:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x8
; GFX942-NEXT: s_load_dword s6, s[0:1], 0x10
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB1_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB1_0:
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, s6
; GFX942-NEXT: global_store_dword v0, v1, s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: preload_unused_arg_block_count_x:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x8
; GFX90a-NEXT: s_load_dword s10, s[4:5], 0x10
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB1_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB1_0:
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: v_mov_b32_e32 v1, s10
; GFX90a-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%load = load i32, ptr addrspace(4) %imp_arg_ptr
store i32 %load, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @no_free_sgprs_block_count_x(ptr addrspace(1) inreg %out, i256 inreg) {
; GFX942-LABEL: no_free_sgprs_block_count_x:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB2_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB2_0:
; GFX942-NEXT: s_load_dword s0, s[4:5], 0x28
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s0
; GFX942-NEXT: global_store_dword v0, v1, s[8:9]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: no_free_sgprs_block_count_x:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx4 s[12:15], s[8:9], 0x0
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB2_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB2_0:
; GFX90a-NEXT: s_load_dword s0, s[8:9], 0x28
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: v_mov_b32_e32 v1, s0
; GFX90a-NEXT: global_store_dword v0, v1, s[12:13]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%load = load i32, ptr addrspace(4) %imp_arg_ptr
store i32 %load, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @no_inreg_block_count_x(ptr addrspace(1) %out) #0 {
; GFX942-LABEL: no_inreg_block_count_x:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_load_dword s4, s[0:1], 0x8
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s4
; GFX942-NEXT: global_store_dword v0, v1, s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: no_inreg_block_count_x:
; GFX90a: ; %bb.0:
; GFX90a-NEXT: s_load_dword s2, s[4:5], 0x8
; GFX90a-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: v_mov_b32_e32 v1, s2
; GFX90a-NEXT: global_store_dword v0, v1, s[0:1]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%load = load i32, ptr addrspace(4) %imp_arg_ptr
store i32 %load, ptr addrspace(1) %out
ret void
}
; Implicit arg preloading is currently restricted to cases where all explicit
; args are inreg (preloaded).
define amdgpu_kernel void @mixed_inreg_block_count_x(ptr addrspace(1) %out, i32 inreg) #0 {
; GFX942-LABEL: mixed_inreg_block_count_x:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_load_dword s4, s[0:1], 0x10
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s4
; GFX942-NEXT: global_store_dword v0, v1, s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: mixed_inreg_block_count_x:
; GFX90a: ; %bb.0:
; GFX90a-NEXT: s_load_dword s2, s[4:5], 0x10
; GFX90a-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: v_mov_b32_e32 v1, s2
; GFX90a-NEXT: global_store_dword v0, v1, s[0:1]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%load = load i32, ptr addrspace(4) %imp_arg_ptr
store i32 %load, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @incorrect_type_i64_block_count_x(ptr addrspace(1) inreg %out) #0 {
; GFX942-LABEL: incorrect_type_i64_block_count_x:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB5_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB5_0:
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
; GFX942-NEXT: v_mov_b32_e32 v2, 0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
; GFX942-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: incorrect_type_i64_block_count_x:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB5_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB5_0:
; GFX90a-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX90a-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
; GFX90a-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%load = load i64, ptr addrspace(4) %imp_arg_ptr
store i64 %load, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @incorrect_type_i16_block_count_x(ptr addrspace(1) inreg %out) #0 {
; GFX942-LABEL: incorrect_type_i16_block_count_x:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB6_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB6_0:
; GFX942-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s0
; GFX942-NEXT: global_store_short v0, v1, s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: incorrect_type_i16_block_count_x:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB6_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB6_0:
; GFX90a-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: v_mov_b32_e32 v1, s0
; GFX90a-NEXT: global_store_short v0, v1, s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%load = load i16, ptr addrspace(4) %imp_arg_ptr
store i16 %load, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @preload_block_count_y(ptr addrspace(1) inreg %out) #0 {
; GFX942-LABEL: preload_block_count_y:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x8
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB7_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB7_0:
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, s5
; GFX942-NEXT: global_store_dword v0, v1, s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: preload_block_count_y:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x8
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB7_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB7_0:
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: v_mov_b32_e32 v1, s9
; GFX90a-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 4
%load = load i32, ptr addrspace(4) %gep
store i32 %load, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg %out) #0 {
; GFX942-LABEL: random_incorrect_offset:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB8_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB8_0:
; GFX942-NEXT: s_mov_b32 s4, 8
; GFX942-NEXT: s_load_dword s0, s[0:1], s4 offset:0x2
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s0
; GFX942-NEXT: global_store_dword v0, v1, s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: random_incorrect_offset:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB8_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB8_0:
; GFX90a-NEXT: s_mov_b32 s0, 8
; GFX90a-NEXT: s_load_dword s0, s[4:5], s0 offset:0x2
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: v_mov_b32_e32 v1, s0
; GFX90a-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 2
%load = load i32, ptr addrspace(4) %gep
store i32 %load, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @preload_block_count_z(ptr addrspace(1) inreg %out) #0 {
; GFX942-LABEL: preload_block_count_z:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x8
; GFX942-NEXT: s_load_dword s6, s[0:1], 0x10
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB9_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB9_0:
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, s6
; GFX942-NEXT: global_store_dword v0, v1, s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: preload_block_count_z:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x8
; GFX90a-NEXT: s_load_dword s10, s[4:5], 0x10
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB9_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB9_0:
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: v_mov_b32_e32 v1, s10
; GFX90a-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 8
%load = load i32, ptr addrspace(4) %gep
store i32 %load, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @preload_block_count_x_imparg_align_ptr_i8(ptr addrspace(1) inreg %out, i8 inreg %val) #0 {
; GFX942-LABEL: preload_block_count_x_imparg_align_ptr_i8:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x8
; GFX942-NEXT: s_load_dword s6, s[0:1], 0x10
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB10_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB10_0:
; GFX942-NEXT: s_and_b32 s0, s4, 0xff
; GFX942-NEXT: s_add_i32 s0, s6, s0
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, s0
; GFX942-NEXT: global_store_dword v0, v1, s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: preload_block_count_x_imparg_align_ptr_i8:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x8
; GFX90a-NEXT: s_load_dword s10, s[4:5], 0x10
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB10_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB10_0:
; GFX90a-NEXT: s_and_b32 s0, s8, 0xff
; GFX90a-NEXT: s_add_i32 s0, s10, s0
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: v_mov_b32_e32 v1, s0
; GFX90a-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%load = load i32, ptr addrspace(4) %imp_arg_ptr
%ext = zext i8 %val to i32
%add = add i32 %load, %ext
store i32 %add, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @preload_block_count_xyz(ptr addrspace(1) inreg %out) #0 {
; GFX942-LABEL: preload_block_count_xyz:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x8
; GFX942-NEXT: s_load_dword s6, s[0:1], 0x10
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB11_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB11_0:
; GFX942-NEXT: v_mov_b32_e32 v3, 0
; GFX942-NEXT: v_mov_b32_e32 v0, s4
; GFX942-NEXT: v_mov_b32_e32 v1, s5
; GFX942-NEXT: v_mov_b32_e32 v2, s6
; GFX942-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: preload_block_count_xyz:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x8
; GFX90a-NEXT: s_load_dword s10, s[4:5], 0x10
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB11_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB11_0:
; GFX90a-NEXT: v_mov_b32_e32 v3, 0
; GFX90a-NEXT: v_mov_b32_e32 v0, s8
; GFX90a-NEXT: v_mov_b32_e32 v1, s9
; GFX90a-NEXT: v_mov_b32_e32 v2, s10
; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%gep_x = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 0
%load_x = load i32, ptr addrspace(4) %gep_x
%gep_y = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 4
%load_y = load i32, ptr addrspace(4) %gep_y
%gep_z = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 8
%load_z = load i32, ptr addrspace(4) %gep_z
%ins.0 = insertelement <3 x i32> poison, i32 %load_x, i32 0
%ins.1 = insertelement <3 x i32> %ins.0, i32 %load_y, i32 1
%ins.2 = insertelement <3 x i32> %ins.1, i32 %load_z, i32 2
store <3 x i32> %ins.2, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @preload_workgroup_size_x(ptr addrspace(1) inreg %out) #0 {
; GFX942-LABEL: preload_workgroup_size_x:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x8
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB12_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB12_0:
; GFX942-NEXT: s_and_b32 s0, s7, 0xffff
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, s0
; GFX942-NEXT: global_store_dword v0, v1, s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: preload_workgroup_size_x:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x8
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB12_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB12_0:
; GFX90a-NEXT: s_and_b32 s0, s11, 0xffff
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: v_mov_b32_e32 v1, s0
; GFX90a-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 12
%load = load i16, ptr addrspace(4) %gep
%conv = zext i16 %load to i32
store i32 %conv, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @preload_workgroup_size_y(ptr addrspace(1) inreg %out) #0 {
; GFX942-LABEL: preload_workgroup_size_y:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x8
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB13_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB13_0:
; GFX942-NEXT: s_lshr_b32 s0, s7, 16
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, s0
; GFX942-NEXT: global_store_dword v0, v1, s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: preload_workgroup_size_y:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x8
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB13_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB13_0:
; GFX90a-NEXT: s_lshr_b32 s0, s11, 16
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: v_mov_b32_e32 v1, s0
; GFX90a-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 14
%load = load i16, ptr addrspace(4) %gep
%conv = zext i16 %load to i32
store i32 %conv, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @preload_workgroup_size_z(ptr addrspace(1) inreg %out) #0 {
; GFX942-LABEL: preload_workgroup_size_z:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x8
; GFX942-NEXT: s_load_dword s8, s[0:1], 0x18
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB14_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB14_0:
; GFX942-NEXT: s_and_b32 s0, s8, 0xffff
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, s0
; GFX942-NEXT: global_store_dword v0, v1, s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: preload_workgroup_size_z:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x8
; GFX90a-NEXT: s_load_dword s12, s[4:5], 0x18
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB14_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB14_0:
; GFX90a-NEXT: s_and_b32 s0, s12, 0xffff
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: v_mov_b32_e32 v1, s0
; GFX90a-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 16
%load = load i16, ptr addrspace(4) %gep
%conv = zext i16 %load to i32
store i32 %conv, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @preload_workgroup_size_xyz(ptr addrspace(1) inreg %out) #0 {
; GFX942-LABEL: preload_workgroup_size_xyz:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x8
; GFX942-NEXT: s_load_dword s8, s[0:1], 0x18
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB15_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB15_0:
; GFX942-NEXT: s_lshr_b32 s0, s7, 16
; GFX942-NEXT: s_and_b32 s1, s7, 0xffff
; GFX942-NEXT: s_and_b32 s4, s8, 0xffff
; GFX942-NEXT: v_mov_b32_e32 v3, 0
; GFX942-NEXT: v_mov_b32_e32 v0, s1
; GFX942-NEXT: v_mov_b32_e32 v1, s0
; GFX942-NEXT: v_mov_b32_e32 v2, s4
; GFX942-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: preload_workgroup_size_xyz:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x8
; GFX90a-NEXT: s_load_dword s12, s[4:5], 0x18
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB15_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB15_0:
; GFX90a-NEXT: s_lshr_b32 s0, s11, 16
; GFX90a-NEXT: s_and_b32 s1, s11, 0xffff
; GFX90a-NEXT: s_and_b32 s2, s12, 0xffff
; GFX90a-NEXT: v_mov_b32_e32 v3, 0
; GFX90a-NEXT: v_mov_b32_e32 v0, s1
; GFX90a-NEXT: v_mov_b32_e32 v1, s0
; GFX90a-NEXT: v_mov_b32_e32 v2, s2
; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%gep_x = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 12
%load_x = load i16, ptr addrspace(4) %gep_x
%conv_x = zext i16 %load_x to i32
%gep_y = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 14
%load_y = load i16, ptr addrspace(4) %gep_y
%conv_y = zext i16 %load_y to i32
%gep_z = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 16
%load_z = load i16, ptr addrspace(4) %gep_z
%conv_z = zext i16 %load_z to i32
%ins.0 = insertelement <3 x i32> poison, i32 %conv_x, i32 0
%ins.1 = insertelement <3 x i32> %ins.0, i32 %conv_y, i32 1
%ins.2 = insertelement <3 x i32> %ins.1, i32 %conv_z, i32 2
store <3 x i32> %ins.2, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @preload_remainder_x(ptr addrspace(1) inreg %out) #0 {
; GFX942-LABEL: preload_remainder_x:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x8
; GFX942-NEXT: s_load_dword s8, s[0:1], 0x18
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB16_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB16_0:
; GFX942-NEXT: s_lshr_b32 s0, s8, 16
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, s0
; GFX942-NEXT: global_store_dword v0, v1, s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: preload_remainder_x:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x8
; GFX90a-NEXT: s_load_dword s12, s[4:5], 0x18
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB16_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB16_0:
; GFX90a-NEXT: s_lshr_b32 s0, s12, 16
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: v_mov_b32_e32 v1, s0
; GFX90a-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 18
%load = load i16, ptr addrspace(4) %gep
%conv = zext i16 %load to i32
store i32 %conv, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @preloadremainder_y(ptr addrspace(1) inreg %out) #0 {
; GFX942-LABEL: preloadremainder_y:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x8
; GFX942-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x18
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB17_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB17_0:
; GFX942-NEXT: s_and_b32 s0, s9, 0xffff
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, s0
; GFX942-NEXT: global_store_dword v0, v1, s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: preloadremainder_y:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x8
; GFX90a-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x18
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB17_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB17_0:
; GFX90a-NEXT: s_and_b32 s0, s13, 0xffff
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: v_mov_b32_e32 v1, s0
; GFX90a-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 20
%load = load i16, ptr addrspace(4) %gep
%conv = zext i16 %load to i32
store i32 %conv, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @preloadremainder_z(ptr addrspace(1) inreg %out) #0 {
; GFX942-LABEL: preloadremainder_z:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x8
; GFX942-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x18
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB18_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB18_0:
; GFX942-NEXT: s_lshr_b32 s0, s9, 16
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, s0
; GFX942-NEXT: global_store_dword v0, v1, s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: preloadremainder_z:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x8
; GFX90a-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x18
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB18_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB18_0:
; GFX90a-NEXT: s_lshr_b32 s0, s13, 16
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: v_mov_b32_e32 v1, s0
; GFX90a-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 22
%load = load i16, ptr addrspace(4) %gep
%conv = zext i16 %load to i32
store i32 %conv, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @preloadremainder_xyz(ptr addrspace(1) inreg %out) #0 {
; GFX942-LABEL: preloadremainder_xyz:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x8
; GFX942-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x18
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB19_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB19_0:
; GFX942-NEXT: s_lshr_b32 s0, s9, 16
; GFX942-NEXT: s_lshr_b32 s1, s8, 16
; GFX942-NEXT: s_and_b32 s4, s9, 0xffff
; GFX942-NEXT: v_mov_b32_e32 v3, 0
; GFX942-NEXT: v_mov_b32_e32 v0, s1
; GFX942-NEXT: v_mov_b32_e32 v1, s4
; GFX942-NEXT: v_mov_b32_e32 v2, s0
; GFX942-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: preloadremainder_xyz:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x8
; GFX90a-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x18
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB19_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB19_0:
; GFX90a-NEXT: s_lshr_b32 s0, s13, 16
; GFX90a-NEXT: s_lshr_b32 s1, s12, 16
; GFX90a-NEXT: s_and_b32 s2, s13, 0xffff
; GFX90a-NEXT: v_mov_b32_e32 v3, 0
; GFX90a-NEXT: v_mov_b32_e32 v0, s1
; GFX90a-NEXT: v_mov_b32_e32 v1, s2
; GFX90a-NEXT: v_mov_b32_e32 v2, s0
; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%gep_x = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 18
%load_x = load i16, ptr addrspace(4) %gep_x
%conv_x = zext i16 %load_x to i32
%gep_y = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 20
%load_y = load i16, ptr addrspace(4) %gep_y
%conv_y = zext i16 %load_y to i32
%gep_z = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 22
%load_z = load i16, ptr addrspace(4) %gep_z
%conv_z = zext i16 %load_z to i32
%ins.0 = insertelement <3 x i32> poison, i32 %conv_x, i32 0
%ins.1 = insertelement <3 x i32> %ins.0, i32 %conv_y, i32 1
%ins.2 = insertelement <3 x i32> %ins.1, i32 %conv_z, i32 2
store <3 x i32> %ins.2, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @no_free_sgprs_preloadremainder_z(ptr addrspace(1) inreg %out) {
; GFX942-LABEL: no_free_sgprs_preloadremainder_z:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB20_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB20_0:
; GFX942-NEXT: s_lshr_b32 s0, s15, 16
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, s0
; GFX942-NEXT: global_store_dword v0, v1, s[8:9]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: no_free_sgprs_preloadremainder_z:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[12:13], s[8:9], 0x0
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB20_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB20_0:
; GFX90a-NEXT: s_load_dword s0, s[8:9], 0x1c
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_lshr_b32 s0, s0, 16
; GFX90a-NEXT: v_mov_b32_e32 v1, s0
; GFX90a-NEXT: global_store_dword v0, v1, s[12:13]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 22
%load = load i16, ptr addrspace(4) %gep
%conv = zext i16 %load to i32
store i32 %conv, ptr addrspace(1) %out
ret void
}
; Check for consistency between isel and earlier passes preload SGPR accounting with max preload SGPRs.
define amdgpu_kernel void @preload_block_max_user_sgprs(ptr addrspace(1) inreg %out, i192 inreg %t0, i32 inreg %t1) #0 {
; GFX942-LABEL: preload_block_max_user_sgprs:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x8
; GFX942-NEXT: s_load_dword s12, s[0:1], 0x28
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB21_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB21_0:
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, s12
; GFX942-NEXT: global_store_dword v0, v1, s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: preload_block_max_user_sgprs:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x8
; GFX90a-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x18
; GFX90a-NEXT: s_load_dword s14, s[4:5], 0x20
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB21_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB21_0:
; GFX90a-NEXT: s_load_dword s0, s[4:5], 0x28
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: v_mov_b32_e32 v1, s0
; GFX90a-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%load = load i32, ptr addrspace(4) %imp_arg_ptr
store i32 %load, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @preload_block_count_z_workgroup_size_z_remainder_z(ptr addrspace(1) inreg %out) #0 {
; GFX942-LABEL: preload_block_count_z_workgroup_size_z_remainder_z:
; GFX942: ; %bb.1:
; GFX942-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX942-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x8
; GFX942-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x18
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_branch .LBB22_0
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB22_0:
; GFX942-NEXT: s_lshr_b32 s0, s9, 16
; GFX942-NEXT: s_and_b32 s1, s8, 0xffff
; GFX942-NEXT: v_mov_b32_e32 v3, 0
; GFX942-NEXT: v_mov_b32_e32 v0, s6
; GFX942-NEXT: v_mov_b32_e32 v1, s1
; GFX942-NEXT: v_mov_b32_e32 v2, s0
; GFX942-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3]
; GFX942-NEXT: s_endpgm
;
; GFX90a-LABEL: preload_block_count_z_workgroup_size_z_remainder_z:
; GFX90a: ; %bb.1:
; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
; GFX90a-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x8
; GFX90a-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x18
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB22_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB22_0:
; GFX90a-NEXT: s_lshr_b32 s0, s13, 16
; GFX90a-NEXT: s_and_b32 s1, s12, 0xffff
; GFX90a-NEXT: v_mov_b32_e32 v3, 0
; GFX90a-NEXT: v_mov_b32_e32 v0, s10
; GFX90a-NEXT: v_mov_b32_e32 v1, s1
; GFX90a-NEXT: v_mov_b32_e32 v2, s0
; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
; GFX90a-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%gep0 = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 8
%gep1 = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 16
%gep2 = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 22
%load0 = load i32, ptr addrspace(4) %gep0
%load1 = load i16, ptr addrspace(4) %gep1
%load2 = load i16, ptr addrspace(4) %gep2
%conv1 = zext i16 %load1 to i32
%conv2 = zext i16 %load2 to i32
%ins.0 = insertelement <3 x i32> poison, i32 %load0, i32 0
%ins.1 = insertelement <3 x i32> %ins.0, i32 %conv1, i32 1
%ins.2 = insertelement <3 x i32> %ins.1, i32 %conv2, i32 2
store <3 x i32> %ins.2, ptr addrspace(1) %out
ret void
}
attributes #0 = { "amdgpu-agpr-alloc"="0" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }