blob: e08ce09fe605ada2ecac71b0ec0d2be1a33aa139 [file] [log] [blame] [edit]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: opt -passes=instcombine < %s | llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 | FileCheck -check-prefixes=GFX12,GFX12-SDAG %s
; RUN: opt -passes=instcombine < %s | llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s
; RUN: opt -passes=instcombine < %s | llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 | FileCheck -check-prefixes=GFX12,GFX12-GISEL %s
; RUN: opt -passes=instcombine < %s | llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
; The address calculation can be simplified and folded because of known bits of mbcnt.
define amdgpu_ps <2 x float> @global_load_scale_add_foldable_knownbits(ptr addrspace(1) inreg %sbase) {
; GFX12-LABEL: global_load_scale_add_foldable_knownbits:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-NEXT: global_load_b64 v[0:1], v0, s[2:3] offset:128
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: ; return to shader part epilog
;
; GFX1250-LABEL: global_load_scale_add_foldable_knownbits:
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0
; GFX1250-NEXT: global_load_b64 v[0:1], v0, s[2:3] offset:128 scale_offset
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: ; return to shader part epilog
%v = tail call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
%mul = shl i32 %v, 1
%add = add i32 %mul, 32
%zext.offset = zext i32 %add to i64
%gep = getelementptr inbounds float, ptr addrspace(1) %sbase, i64 %zext.offset
%load = load <2 x float>, ptr addrspace(1) %gep
ret <2 x float> %load
}
; The nsw/nuw helps the folding of address addition.
define amdgpu_ps <2 x float> @global_load_scale_add_foldable_nowrap(ptr addrspace(1) inreg %sbase, i32 %v) {
; GFX12-LABEL: global_load_scale_add_foldable_nowrap:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-NEXT: global_load_b64 v[0:1], v0, s[2:3] offset:128
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: ; return to shader part epilog
;
; GFX1250-LABEL: global_load_scale_add_foldable_nowrap:
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
; GFX1250-NEXT: global_load_b64 v[0:1], v0, s[2:3] offset:128 scale_offset
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: ; return to shader part epilog
%mul = shl nsw nuw i32 %v, 3
%add = add nsw nuw i32 %mul, 128
%zext.offset = zext i32 %add to i64
%gep = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%load = load <2 x float>, ptr addrspace(1) %gep
ret <2 x float> %load
}
; Address calculation cannot be folded because possible overflow during addition.
define amdgpu_ps <2 x float> @global_load_scale_add_unfoldable(ptr addrspace(1) inreg %sbase, i32 %v) {
; GFX12-LABEL: global_load_scale_add_unfoldable:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_lshl_add_u32 v0, v0, 3, 0x80
; GFX12-NEXT: global_load_b64 v[0:1], v0, s[2:3]
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: ; return to shader part epilog
;
; GFX1250-LABEL: global_load_scale_add_unfoldable:
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
; GFX1250-NEXT: v_lshl_add_u32 v0, v0, 3, 0x80
; GFX1250-NEXT: global_load_b64 v[0:1], v0, s[2:3]
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: ; return to shader part epilog
%mul = shl i32 %v, 3
%add = add i32 %mul, 128
%zext.offset = zext i32 %add to i64
%gep = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%load = load <2 x float>, ptr addrspace(1) %gep
ret <2 x float> %load
}
declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX12-GISEL: {{.*}}
; GFX12-SDAG: {{.*}}
; GFX1250-GISEL: {{.*}}
; GFX1250-SDAG: {{.*}}