| ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py |
| ; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -amdgpu-fixed-function-abi=1 -stop-after=irtranslator -o - %s | FileCheck --check-prefix=FIXED %s |
| ; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -amdgpu-fixed-function-abi=0 -stop-after=irtranslator -o - %s | FileCheck --check-prefix=VARABI %s |
| |
| ; Make sure arg1 is not allocated in v31, which is reserved for |
| ; workitem IDs with -amdgpu-fixed-function-abi. |
| |
| define void @void_a31i32_i32([31 x i32] %arg0, i32 %arg1) { |
| ; FIXED-LABEL: name: void_a31i32_i32 |
| ; FIXED: bb.1 (%ir-block.0): |
| ; FIXED: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31 |
| ; FIXED: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 |
| ; FIXED: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 |
| ; FIXED: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 |
| ; FIXED: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 |
| ; FIXED: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4 |
| ; FIXED: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5 |
| ; FIXED: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6 |
| ; FIXED: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7 |
| ; FIXED: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8 |
| ; FIXED: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9 |
| ; FIXED: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10 |
| ; FIXED: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11 |
| ; FIXED: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12 |
| ; FIXED: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13 |
| ; FIXED: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14 |
| ; FIXED: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15 |
| ; FIXED: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16 |
| ; FIXED: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17 |
| ; FIXED: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18 |
| ; FIXED: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19 |
| ; FIXED: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20 |
| ; FIXED: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21 |
| ; FIXED: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22 |
| ; FIXED: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23 |
| ; FIXED: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24 |
| ; FIXED: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25 |
| ; FIXED: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26 |
| ; FIXED: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27 |
| ; FIXED: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28 |
| ; FIXED: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29 |
| ; FIXED: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30 |
| ; FIXED: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0 |
| ; FIXED: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load 4 from %fixed-stack.0, align 16, addrspace 5) |
| ; FIXED: [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 |
| ; FIXED: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF |
| ; FIXED: G_STORE [[LOAD]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) |
| ; FIXED: [[COPY32:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]] |
| ; FIXED: S_SETPC_B64_return [[COPY32]] |
| ; VARABI-LABEL: name: void_a31i32_i32 |
| ; VARABI: bb.1 (%ir-block.0): |
| ; VARABI: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr30_sgpr31 |
| ; VARABI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 |
| ; VARABI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 |
| ; VARABI: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 |
| ; VARABI: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 |
| ; VARABI: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4 |
| ; VARABI: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5 |
| ; VARABI: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6 |
| ; VARABI: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7 |
| ; VARABI: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8 |
| ; VARABI: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9 |
| ; VARABI: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10 |
| ; VARABI: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11 |
| ; VARABI: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12 |
| ; VARABI: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13 |
| ; VARABI: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14 |
| ; VARABI: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15 |
| ; VARABI: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16 |
| ; VARABI: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17 |
| ; VARABI: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18 |
| ; VARABI: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19 |
| ; VARABI: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20 |
| ; VARABI: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21 |
| ; VARABI: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22 |
| ; VARABI: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23 |
| ; VARABI: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24 |
| ; VARABI: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25 |
| ; VARABI: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26 |
| ; VARABI: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27 |
| ; VARABI: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28 |
| ; VARABI: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29 |
| ; VARABI: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30 |
| ; VARABI: [[COPY31:%[0-9]+]]:_(s32) = COPY $vgpr31 |
| ; VARABI: [[COPY32:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 |
| ; VARABI: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF |
| ; VARABI: G_STORE [[COPY31]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) |
| ; VARABI: [[COPY33:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY32]] |
| ; VARABI: S_SETPC_B64_return [[COPY33]] |
| store i32 %arg1, i32 addrspace(1)* undef |
| ret void |
| } |