|  | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
|  | ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -O0 < %s | FileCheck %s | 
|  |  | 
|  | ; FP is in CSR range, modified. | 
|  | define hidden fastcc void @callee_has_fp() #1 { | 
|  | ; CHECK-LABEL: callee_has_fp: | 
|  | ; CHECK:       ; %bb.0: | 
|  | ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) | 
|  | ; CHECK-NEXT:    s_mov_b32 s4, s33 | 
|  | ; CHECK-NEXT:    s_mov_b32 s33, s32 | 
|  | ; CHECK-NEXT:    s_add_i32 s32, s32, 0x200 | 
|  | ; CHECK-NEXT:    v_mov_b32_e32 v0, 1 | 
|  | ; CHECK-NEXT:    buffer_store_dword v0, off, s[0:3], s33 | 
|  | ; CHECK-NEXT:    s_waitcnt vmcnt(0) | 
|  | ; CHECK-NEXT:    s_mov_b32 s32, s33 | 
|  | ; CHECK-NEXT:    s_mov_b32 s33, s4 | 
|  | ; CHECK-NEXT:    s_setpc_b64 s[30:31] | 
|  | %alloca = alloca i32, addrspace(5) | 
|  | store volatile i32 1, ptr addrspace(5) %alloca | 
|  | ret void | 
|  | } | 
|  |  | 
|  | ; Has no stack objects, but introduces them due to the CSR spill. We | 
|  | ; see the FP modified in the callee with IPRA. We should not have | 
|  | ; redundant spills of s33 or assert. | 
|  | define internal fastcc void @csr_vgpr_spill_fp_callee() #0 { | 
|  | ; CHECK-LABEL: csr_vgpr_spill_fp_callee: | 
|  | ; CHECK:       ; %bb.0: ; %bb | 
|  | ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) | 
|  | ; CHECK-NEXT:    s_mov_b32 s18, s33 | 
|  | ; CHECK-NEXT:    s_mov_b32 s33, s32 | 
|  | ; CHECK-NEXT:    s_xor_saveexec_b64 s[16:17], -1 | 
|  | ; CHECK-NEXT:    buffer_store_dword v1, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill | 
|  | ; CHECK-NEXT:    s_mov_b64 exec, s[16:17] | 
|  | ; CHECK-NEXT:    s_add_i32 s32, s32, 0x400 | 
|  | ; CHECK-NEXT:    buffer_store_dword v40, off, s[0:3], s33 ; 4-byte Folded Spill | 
|  | ; CHECK-NEXT:    v_writelane_b32 v1, s30, 0 | 
|  | ; CHECK-NEXT:    v_writelane_b32 v1, s31, 1 | 
|  | ; CHECK-NEXT:    s_getpc_b64 s[16:17] | 
|  | ; CHECK-NEXT:    s_add_u32 s16, s16, callee_has_fp@rel32@lo+4 | 
|  | ; CHECK-NEXT:    s_addc_u32 s17, s17, callee_has_fp@rel32@hi+12 | 
|  | ; CHECK-NEXT:    s_mov_b64 s[22:23], s[2:3] | 
|  | ; CHECK-NEXT:    s_mov_b64 s[20:21], s[0:1] | 
|  | ; CHECK-NEXT:    s_mov_b64 s[0:1], s[20:21] | 
|  | ; CHECK-NEXT:    s_mov_b64 s[2:3], s[22:23] | 
|  | ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17] | 
|  | ; CHECK-NEXT:    ;;#ASMSTART | 
|  | ; CHECK-NEXT:    ; clobber csr v40 | 
|  | ; CHECK-NEXT:    ;;#ASMEND | 
|  | ; CHECK-NEXT:    v_readlane_b32 s31, v1, 1 | 
|  | ; CHECK-NEXT:    v_readlane_b32 s30, v1, 0 | 
|  | ; CHECK-NEXT:    buffer_load_dword v40, off, s[0:3], s33 ; 4-byte Folded Reload | 
|  | ; CHECK-NEXT:    s_mov_b32 s32, s33 | 
|  | ; CHECK-NEXT:    s_xor_saveexec_b64 s[4:5], -1 | 
|  | ; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload | 
|  | ; CHECK-NEXT:    s_mov_b64 exec, s[4:5] | 
|  | ; CHECK-NEXT:    s_mov_b32 s33, s18 | 
|  | ; CHECK-NEXT:    s_waitcnt vmcnt(0) | 
|  | ; CHECK-NEXT:    s_setpc_b64 s[30:31] | 
|  | bb: | 
|  | call fastcc void @callee_has_fp() | 
|  | call void asm sideeffect "; clobber csr v40", "~{v40}"() | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_kernel void @kernel_call() { | 
|  | ; CHECK-LABEL: kernel_call: | 
|  | ; CHECK:       ; %bb.0: ; %bb | 
|  | ; CHECK-NEXT:    s_mov_b32 s32, 0 | 
|  | ; CHECK-NEXT:    s_add_u32 flat_scratch_lo, s12, s17 | 
|  | ; CHECK-NEXT:    s_addc_u32 flat_scratch_hi, s13, 0 | 
|  | ; CHECK-NEXT:    s_add_u32 s0, s0, s17 | 
|  | ; CHECK-NEXT:    s_addc_u32 s1, s1, 0 | 
|  | ; CHECK-NEXT:    ; implicit-def: $vgpr3 : SGPR spill to VGPR lane | 
|  | ; CHECK-NEXT:    v_writelane_b32 v3, s16, 0 | 
|  | ; CHECK-NEXT:    s_mov_b32 s13, s15 | 
|  | ; CHECK-NEXT:    s_mov_b32 s12, s14 | 
|  | ; CHECK-NEXT:    v_readlane_b32 s14, v3, 0 | 
|  | ; CHECK-NEXT:    s_getpc_b64 s[16:17] | 
|  | ; CHECK-NEXT:    s_add_u32 s16, s16, csr_vgpr_spill_fp_callee@rel32@lo+4 | 
|  | ; CHECK-NEXT:    s_addc_u32 s17, s17, csr_vgpr_spill_fp_callee@rel32@hi+12 | 
|  | ; CHECK-NEXT:    s_mov_b64 s[22:23], s[2:3] | 
|  | ; CHECK-NEXT:    s_mov_b64 s[20:21], s[0:1] | 
|  | ; CHECK-NEXT:    s_mov_b32 s15, 20 | 
|  | ; CHECK-NEXT:    v_lshlrev_b32_e64 v2, s15, v2 | 
|  | ; CHECK-NEXT:    s_mov_b32 s15, 10 | 
|  | ; CHECK-NEXT:    v_lshlrev_b32_e64 v1, s15, v1 | 
|  | ; CHECK-NEXT:    v_or3_b32 v31, v0, v1, v2 | 
|  | ; CHECK-NEXT:    ; implicit-def: $sgpr15 | 
|  | ; CHECK-NEXT:    s_mov_b64 s[0:1], s[20:21] | 
|  | ; CHECK-NEXT:    s_mov_b64 s[2:3], s[22:23] | 
|  | ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17] | 
|  | ; CHECK-NEXT:    s_endpgm | 
|  | bb: | 
|  | tail call fastcc void @csr_vgpr_spill_fp_callee() | 
|  | ret void | 
|  | } | 
|  |  | 
|  | ; Same, except with a tail call. | 
|  | define internal fastcc void @csr_vgpr_spill_fp_tailcall_callee() #0 { | 
|  | ; CHECK-LABEL: csr_vgpr_spill_fp_tailcall_callee: | 
|  | ; CHECK:       ; %bb.0: ; %bb | 
|  | ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) | 
|  | ; CHECK-NEXT:    s_xor_saveexec_b64 s[16:17], -1 | 
|  | ; CHECK-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill | 
|  | ; CHECK-NEXT:    s_mov_b64 exec, s[16:17] | 
|  | ; CHECK-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill | 
|  | ; CHECK-NEXT:    v_writelane_b32 v1, s33, 0 | 
|  | ; CHECK-NEXT:    ;;#ASMSTART | 
|  | ; CHECK-NEXT:    ; clobber csr v40 | 
|  | ; CHECK-NEXT:    ;;#ASMEND | 
|  | ; CHECK-NEXT:    s_getpc_b64 s[16:17] | 
|  | ; CHECK-NEXT:    s_add_u32 s16, s16, callee_has_fp@rel32@lo+4 | 
|  | ; CHECK-NEXT:    s_addc_u32 s17, s17, callee_has_fp@rel32@hi+12 | 
|  | ; CHECK-NEXT:    v_readlane_b32 s33, v1, 0 | 
|  | ; CHECK-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload | 
|  | ; CHECK-NEXT:    s_xor_saveexec_b64 s[18:19], -1 | 
|  | ; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload | 
|  | ; CHECK-NEXT:    s_mov_b64 exec, s[18:19] | 
|  | ; CHECK-NEXT:    s_setpc_b64 s[16:17] | 
|  | bb: | 
|  | call void asm sideeffect "; clobber csr v40", "~{v40}"() | 
|  | tail call fastcc void @callee_has_fp() | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_kernel void @kernel_tailcall() { | 
|  | ; CHECK-LABEL: kernel_tailcall: | 
|  | ; CHECK:       ; %bb.0: ; %bb | 
|  | ; CHECK-NEXT:    s_mov_b32 s32, 0 | 
|  | ; CHECK-NEXT:    s_add_u32 flat_scratch_lo, s12, s17 | 
|  | ; CHECK-NEXT:    s_addc_u32 flat_scratch_hi, s13, 0 | 
|  | ; CHECK-NEXT:    s_add_u32 s0, s0, s17 | 
|  | ; CHECK-NEXT:    s_addc_u32 s1, s1, 0 | 
|  | ; CHECK-NEXT:    ; implicit-def: $vgpr3 : SGPR spill to VGPR lane | 
|  | ; CHECK-NEXT:    v_writelane_b32 v3, s16, 0 | 
|  | ; CHECK-NEXT:    s_mov_b32 s13, s15 | 
|  | ; CHECK-NEXT:    s_mov_b32 s12, s14 | 
|  | ; CHECK-NEXT:    v_readlane_b32 s14, v3, 0 | 
|  | ; CHECK-NEXT:    s_getpc_b64 s[16:17] | 
|  | ; CHECK-NEXT:    s_add_u32 s16, s16, csr_vgpr_spill_fp_tailcall_callee@rel32@lo+4 | 
|  | ; CHECK-NEXT:    s_addc_u32 s17, s17, csr_vgpr_spill_fp_tailcall_callee@rel32@hi+12 | 
|  | ; CHECK-NEXT:    s_mov_b64 s[22:23], s[2:3] | 
|  | ; CHECK-NEXT:    s_mov_b64 s[20:21], s[0:1] | 
|  | ; CHECK-NEXT:    s_mov_b32 s15, 20 | 
|  | ; CHECK-NEXT:    v_lshlrev_b32_e64 v2, s15, v2 | 
|  | ; CHECK-NEXT:    s_mov_b32 s15, 10 | 
|  | ; CHECK-NEXT:    v_lshlrev_b32_e64 v1, s15, v1 | 
|  | ; CHECK-NEXT:    v_or3_b32 v31, v0, v1, v2 | 
|  | ; CHECK-NEXT:    ; implicit-def: $sgpr15 | 
|  | ; CHECK-NEXT:    s_mov_b64 s[0:1], s[20:21] | 
|  | ; CHECK-NEXT:    s_mov_b64 s[2:3], s[22:23] | 
|  | ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17] | 
|  | ; CHECK-NEXT:    s_endpgm | 
|  | bb: | 
|  | tail call fastcc void @csr_vgpr_spill_fp_tailcall_callee() | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define hidden i32 @tail_call() #1 { | 
|  | ; CHECK-LABEL: tail_call: | 
|  | ; CHECK:       ; %bb.0: ; %entry | 
|  | ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) | 
|  | ; CHECK-NEXT:    s_mov_b32 s4, s33 | 
|  | ; CHECK-NEXT:    s_mov_b32 s33, s32 | 
|  | ; CHECK-NEXT:    v_mov_b32_e32 v0, 0 | 
|  | ; CHECK-NEXT:    s_mov_b32 s33, s4 | 
|  | ; CHECK-NEXT:    s_setpc_b64 s[30:31] | 
|  | entry: | 
|  | ret i32 0 | 
|  | } | 
|  |  | 
|  | define hidden i32 @caller_save_vgpr_spill_fp_tail_call() #0 { | 
|  | ; CHECK-LABEL: caller_save_vgpr_spill_fp_tail_call: | 
|  | ; CHECK:       ; %bb.0: ; %entry | 
|  | ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) | 
|  | ; CHECK-NEXT:    s_mov_b32 s18, s33 | 
|  | ; CHECK-NEXT:    s_mov_b32 s33, s32 | 
|  | ; CHECK-NEXT:    s_xor_saveexec_b64 s[16:17], -1 | 
|  | ; CHECK-NEXT:    buffer_store_dword v1, off, s[0:3], s33 ; 4-byte Folded Spill | 
|  | ; CHECK-NEXT:    s_mov_b64 exec, s[16:17] | 
|  | ; CHECK-NEXT:    s_add_i32 s32, s32, 0x400 | 
|  | ; CHECK-NEXT:    v_writelane_b32 v1, s30, 0 | 
|  | ; CHECK-NEXT:    v_writelane_b32 v1, s31, 1 | 
|  | ; CHECK-NEXT:    s_getpc_b64 s[16:17] | 
|  | ; CHECK-NEXT:    s_add_u32 s16, s16, tail_call@rel32@lo+4 | 
|  | ; CHECK-NEXT:    s_addc_u32 s17, s17, tail_call@rel32@hi+12 | 
|  | ; CHECK-NEXT:    s_mov_b64 s[22:23], s[2:3] | 
|  | ; CHECK-NEXT:    s_mov_b64 s[20:21], s[0:1] | 
|  | ; CHECK-NEXT:    s_mov_b64 s[0:1], s[20:21] | 
|  | ; CHECK-NEXT:    s_mov_b64 s[2:3], s[22:23] | 
|  | ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17] | 
|  | ; CHECK-NEXT:    v_readlane_b32 s31, v1, 1 | 
|  | ; CHECK-NEXT:    v_readlane_b32 s30, v1, 0 | 
|  | ; CHECK-NEXT:    s_mov_b32 s32, s33 | 
|  | ; CHECK-NEXT:    s_xor_saveexec_b64 s[4:5], -1 | 
|  | ; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s33 ; 4-byte Folded Reload | 
|  | ; CHECK-NEXT:    s_mov_b64 exec, s[4:5] | 
|  | ; CHECK-NEXT:    s_mov_b32 s33, s18 | 
|  | ; CHECK-NEXT:    s_waitcnt vmcnt(0) | 
|  | ; CHECK-NEXT:    s_setpc_b64 s[30:31] | 
|  | entry: | 
|  | %call = call i32 @tail_call() | 
|  | ret i32 %call | 
|  | } | 
|  |  | 
|  | define hidden i32 @caller_save_vgpr_spill_fp() #0 { | 
|  | ; CHECK-LABEL: caller_save_vgpr_spill_fp: | 
|  | ; CHECK:       ; %bb.0: ; %entry | 
|  | ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) | 
|  | ; CHECK-NEXT:    s_mov_b32 s19, s33 | 
|  | ; CHECK-NEXT:    s_mov_b32 s33, s32 | 
|  | ; CHECK-NEXT:    s_xor_saveexec_b64 s[16:17], -1 | 
|  | ; CHECK-NEXT:    buffer_store_dword v2, off, s[0:3], s33 ; 4-byte Folded Spill | 
|  | ; CHECK-NEXT:    s_mov_b64 exec, s[16:17] | 
|  | ; CHECK-NEXT:    s_add_i32 s32, s32, 0x400 | 
|  | ; CHECK-NEXT:    v_writelane_b32 v2, s30, 0 | 
|  | ; CHECK-NEXT:    v_writelane_b32 v2, s31, 1 | 
|  | ; CHECK-NEXT:    s_getpc_b64 s[16:17] | 
|  | ; CHECK-NEXT:    s_add_u32 s16, s16, caller_save_vgpr_spill_fp_tail_call@rel32@lo+4 | 
|  | ; CHECK-NEXT:    s_addc_u32 s17, s17, caller_save_vgpr_spill_fp_tail_call@rel32@hi+12 | 
|  | ; CHECK-NEXT:    s_mov_b64 s[22:23], s[2:3] | 
|  | ; CHECK-NEXT:    s_mov_b64 s[20:21], s[0:1] | 
|  | ; CHECK-NEXT:    s_mov_b64 s[0:1], s[20:21] | 
|  | ; CHECK-NEXT:    s_mov_b64 s[2:3], s[22:23] | 
|  | ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17] | 
|  | ; CHECK-NEXT:    v_readlane_b32 s31, v2, 1 | 
|  | ; CHECK-NEXT:    v_readlane_b32 s30, v2, 0 | 
|  | ; CHECK-NEXT:    s_mov_b32 s32, s33 | 
|  | ; CHECK-NEXT:    s_xor_saveexec_b64 s[4:5], -1 | 
|  | ; CHECK-NEXT:    buffer_load_dword v2, off, s[0:3], s33 ; 4-byte Folded Reload | 
|  | ; CHECK-NEXT:    s_mov_b64 exec, s[4:5] | 
|  | ; CHECK-NEXT:    s_mov_b32 s33, s19 | 
|  | ; CHECK-NEXT:    s_waitcnt vmcnt(0) | 
|  | ; CHECK-NEXT:    s_setpc_b64 s[30:31] | 
|  | entry: | 
|  | %call = call i32 @caller_save_vgpr_spill_fp_tail_call() | 
|  | ret i32 %call | 
|  | } | 
|  |  | 
|  | define protected amdgpu_kernel void @kernel() { | 
|  | ; CHECK-LABEL: kernel: | 
|  | ; CHECK:       ; %bb.0: ; %entry | 
|  | ; CHECK-NEXT:    s_mov_b32 s32, 0 | 
|  | ; CHECK-NEXT:    s_add_u32 flat_scratch_lo, s12, s17 | 
|  | ; CHECK-NEXT:    s_addc_u32 flat_scratch_hi, s13, 0 | 
|  | ; CHECK-NEXT:    s_add_u32 s0, s0, s17 | 
|  | ; CHECK-NEXT:    s_addc_u32 s1, s1, 0 | 
|  | ; CHECK-NEXT:    ; implicit-def: $vgpr3 : SGPR spill to VGPR lane | 
|  | ; CHECK-NEXT:    v_writelane_b32 v3, s16, 0 | 
|  | ; CHECK-NEXT:    s_mov_b32 s13, s15 | 
|  | ; CHECK-NEXT:    s_mov_b32 s12, s14 | 
|  | ; CHECK-NEXT:    v_readlane_b32 s14, v3, 0 | 
|  | ; CHECK-NEXT:    s_getpc_b64 s[16:17] | 
|  | ; CHECK-NEXT:    s_add_u32 s16, s16, caller_save_vgpr_spill_fp@rel32@lo+4 | 
|  | ; CHECK-NEXT:    s_addc_u32 s17, s17, caller_save_vgpr_spill_fp@rel32@hi+12 | 
|  | ; CHECK-NEXT:    s_mov_b64 s[22:23], s[2:3] | 
|  | ; CHECK-NEXT:    s_mov_b64 s[20:21], s[0:1] | 
|  | ; CHECK-NEXT:    s_mov_b32 s15, 20 | 
|  | ; CHECK-NEXT:    v_lshlrev_b32_e64 v2, s15, v2 | 
|  | ; CHECK-NEXT:    s_mov_b32 s15, 10 | 
|  | ; CHECK-NEXT:    v_lshlrev_b32_e64 v1, s15, v1 | 
|  | ; CHECK-NEXT:    v_or3_b32 v31, v0, v1, v2 | 
|  | ; CHECK-NEXT:    ; implicit-def: $sgpr15 | 
|  | ; CHECK-NEXT:    s_mov_b64 s[0:1], s[20:21] | 
|  | ; CHECK-NEXT:    s_mov_b64 s[2:3], s[22:23] | 
|  | ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17] | 
|  | ; CHECK-NEXT:    s_endpgm | 
|  | entry: | 
|  | %call = call i32 @caller_save_vgpr_spill_fp() | 
|  | ret void | 
|  | } | 
|  |  | 
|  | attributes #0 = { "frame-pointer"="none" noinline } | 
|  | attributes #1 = { "frame-pointer"="all" noinline } | 
|  |  |