| ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py |
| ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=regbankselect -regbankselect-fast -o - %s | FileCheck -check-prefix=FAST %s |
| ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=regbankselect -regbankselect-greedy -o - %s | FileCheck -check-prefix=GREEDY %s |
| |
| ; Natural mapping |
| define amdgpu_ps void @load_1d_vgpr_vaddr__sgpr_srsrc(<8 x i32> inreg %rsrc, i32 %s) { |
| ; FAST-LABEL: name: load_1d_vgpr_vaddr__sgpr_srsrc |
| ; FAST: bb.1 (%ir-block.0): |
| ; FAST: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 |
| ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 |
| ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr3 |
| ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr4 |
| ; FAST: [[COPY3:%[0-9]+]]:sgpr(s32) = COPY $sgpr5 |
| ; FAST: [[COPY4:%[0-9]+]]:sgpr(s32) = COPY $sgpr6 |
| ; FAST: [[COPY5:%[0-9]+]]:sgpr(s32) = COPY $sgpr7 |
| ; FAST: [[COPY6:%[0-9]+]]:sgpr(s32) = COPY $sgpr8 |
| ; FAST: [[COPY7:%[0-9]+]]:sgpr(s32) = COPY $sgpr9 |
| ; FAST: [[BUILD_VECTOR:%[0-9]+]]:sgpr(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) |
| ; FAST: [[COPY8:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 |
| ; FAST: [[DEF:%[0-9]+]]:sgpr(p1) = G_IMPLICIT_DEF |
| ; FAST: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.1d), 15, [[COPY8]](s32), [[BUILD_VECTOR]](<8 x s32>), 0, 0, 0 :: (dereferenceable load (<4 x s32>) from custom "ImageResource") |
| ; FAST: [[COPY9:%[0-9]+]]:vgpr(p1) = COPY [[DEF]](p1) |
| ; FAST: G_STORE [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>), [[COPY9]](p1) :: (store (<4 x s32>) into `<4 x float> addrspace(1)* undef`, addrspace 1) |
| ; FAST: S_ENDPGM 0 |
| ; GREEDY-LABEL: name: load_1d_vgpr_vaddr__sgpr_srsrc |
| ; GREEDY: bb.1 (%ir-block.0): |
| ; GREEDY: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 |
| ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 |
| ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr3 |
| ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr4 |
| ; GREEDY: [[COPY3:%[0-9]+]]:sgpr(s32) = COPY $sgpr5 |
| ; GREEDY: [[COPY4:%[0-9]+]]:sgpr(s32) = COPY $sgpr6 |
| ; GREEDY: [[COPY5:%[0-9]+]]:sgpr(s32) = COPY $sgpr7 |
| ; GREEDY: [[COPY6:%[0-9]+]]:sgpr(s32) = COPY $sgpr8 |
| ; GREEDY: [[COPY7:%[0-9]+]]:sgpr(s32) = COPY $sgpr9 |
| ; GREEDY: [[BUILD_VECTOR:%[0-9]+]]:sgpr(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) |
| ; GREEDY: [[COPY8:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 |
| ; GREEDY: [[DEF:%[0-9]+]]:sgpr(p1) = G_IMPLICIT_DEF |
| ; GREEDY: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.1d), 15, [[COPY8]](s32), [[BUILD_VECTOR]](<8 x s32>), 0, 0, 0 :: (dereferenceable load (<4 x s32>) from custom "ImageResource") |
| ; GREEDY: [[COPY9:%[0-9]+]]:vgpr(p1) = COPY [[DEF]](p1) |
| ; GREEDY: G_STORE [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>), [[COPY9]](p1) :: (store (<4 x s32>) into `<4 x float> addrspace(1)* undef`, addrspace 1) |
| ; GREEDY: S_ENDPGM 0 |
| %v = call <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) |
| store <4 x float> %v, <4 x float> addrspace(1)* undef |
| ret void |
| } |
| |
| ; Copy needed for VGPR argument |
| define amdgpu_ps void @load_1d_sgpr_vaddr__sgpr_srsrc(<8 x i32> inreg %rsrc, i32 inreg %s) { |
| ; FAST-LABEL: name: load_1d_sgpr_vaddr__sgpr_srsrc |
| ; FAST: bb.1 (%ir-block.0): |
| ; FAST: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10 |
| ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 |
| ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr3 |
| ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr4 |
| ; FAST: [[COPY3:%[0-9]+]]:sgpr(s32) = COPY $sgpr5 |
| ; FAST: [[COPY4:%[0-9]+]]:sgpr(s32) = COPY $sgpr6 |
| ; FAST: [[COPY5:%[0-9]+]]:sgpr(s32) = COPY $sgpr7 |
| ; FAST: [[COPY6:%[0-9]+]]:sgpr(s32) = COPY $sgpr8 |
| ; FAST: [[COPY7:%[0-9]+]]:sgpr(s32) = COPY $sgpr9 |
| ; FAST: [[BUILD_VECTOR:%[0-9]+]]:sgpr(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) |
| ; FAST: [[COPY8:%[0-9]+]]:sgpr(s32) = COPY $sgpr10 |
| ; FAST: [[DEF:%[0-9]+]]:sgpr(p1) = G_IMPLICIT_DEF |
| ; FAST: [[COPY9:%[0-9]+]]:vgpr(s32) = COPY [[COPY8]](s32) |
| ; FAST: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.1d), 15, [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 0, 0, 0 :: (dereferenceable load (<4 x s32>) from custom "ImageResource") |
| ; FAST: [[COPY10:%[0-9]+]]:vgpr(p1) = COPY [[DEF]](p1) |
| ; FAST: G_STORE [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>), [[COPY10]](p1) :: (store (<4 x s32>) into `<4 x float> addrspace(1)* undef`, addrspace 1) |
| ; FAST: S_ENDPGM 0 |
| ; GREEDY-LABEL: name: load_1d_sgpr_vaddr__sgpr_srsrc |
| ; GREEDY: bb.1 (%ir-block.0): |
| ; GREEDY: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10 |
| ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 |
| ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr3 |
| ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr4 |
| ; GREEDY: [[COPY3:%[0-9]+]]:sgpr(s32) = COPY $sgpr5 |
| ; GREEDY: [[COPY4:%[0-9]+]]:sgpr(s32) = COPY $sgpr6 |
| ; GREEDY: [[COPY5:%[0-9]+]]:sgpr(s32) = COPY $sgpr7 |
| ; GREEDY: [[COPY6:%[0-9]+]]:sgpr(s32) = COPY $sgpr8 |
| ; GREEDY: [[COPY7:%[0-9]+]]:sgpr(s32) = COPY $sgpr9 |
| ; GREEDY: [[BUILD_VECTOR:%[0-9]+]]:sgpr(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) |
| ; GREEDY: [[COPY8:%[0-9]+]]:sgpr(s32) = COPY $sgpr10 |
| ; GREEDY: [[DEF:%[0-9]+]]:sgpr(p1) = G_IMPLICIT_DEF |
| ; GREEDY: [[COPY9:%[0-9]+]]:vgpr(s32) = COPY [[COPY8]](s32) |
| ; GREEDY: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.1d), 15, [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 0, 0, 0 :: (dereferenceable load (<4 x s32>) from custom "ImageResource") |
| ; GREEDY: [[COPY10:%[0-9]+]]:vgpr(p1) = COPY [[DEF]](p1) |
| ; GREEDY: G_STORE [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>), [[COPY10]](p1) :: (store (<4 x s32>) into `<4 x float> addrspace(1)* undef`, addrspace 1) |
| ; GREEDY: S_ENDPGM 0 |
| %v = call <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) |
| store <4 x float> %v, <4 x float> addrspace(1)* undef |
| ret void |
| } |
| |
| ; Waterfall loop needed for rsrc |
| define amdgpu_ps void @load_1d_vgpr_vaddr__vgpr_srsrc(<8 x i32> %rsrc, i32 %s) { |
| ; FAST-LABEL: name: load_1d_vgpr_vaddr__vgpr_srsrc |
| ; FAST: bb.1 (%ir-block.0): |
| ; FAST: successors: %bb.2(0x80000000) |
| ; FAST: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8 |
| ; FAST: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 |
| ; FAST: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 |
| ; FAST: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2 |
| ; FAST: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY $vgpr3 |
| ; FAST: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY $vgpr4 |
| ; FAST: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY $vgpr5 |
| ; FAST: [[COPY6:%[0-9]+]]:vgpr(s32) = COPY $vgpr6 |
| ; FAST: [[COPY7:%[0-9]+]]:vgpr(s32) = COPY $vgpr7 |
| ; FAST: [[BUILD_VECTOR:%[0-9]+]]:vgpr(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) |
| ; FAST: [[COPY8:%[0-9]+]]:vgpr(s32) = COPY $vgpr8 |
| ; FAST: [[DEF:%[0-9]+]]:sgpr(p1) = G_IMPLICIT_DEF |
| ; FAST: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF |
| ; FAST: [[UV:%[0-9]+]]:vreg_64(s64), [[UV1:%[0-9]+]]:vreg_64(s64), [[UV2:%[0-9]+]]:vreg_64(s64), [[UV3:%[0-9]+]]:vreg_64(s64) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<8 x s32>) |
| ; FAST: [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec |
| ; FAST: bb.2: |
| ; FAST: successors: %bb.3(0x40000000), %bb.2(0x40000000) |
| ; FAST: [[PHI:%[0-9]+]]:sreg_64_xexec = PHI [[DEF1]], %bb.1, %17, %bb.2 |
| ; FAST: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV]].sub0(s64), implicit $exec |
| ; FAST: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV]].sub1(s64), implicit $exec |
| ; FAST: [[MV:%[0-9]+]]:sreg_64_xexec(s64) = G_MERGE_VALUES [[V_READFIRSTLANE_B32_]](s32), [[V_READFIRSTLANE_B32_1]](s32) |
| ; FAST: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV]](s64), [[UV]](s64), implicit $exec |
| ; FAST: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV1]].sub0(s64), implicit $exec |
| ; FAST: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV1]].sub1(s64), implicit $exec |
| ; FAST: [[MV1:%[0-9]+]]:sreg_64_xexec(s64) = G_MERGE_VALUES [[V_READFIRSTLANE_B32_2]](s32), [[V_READFIRSTLANE_B32_3]](s32) |
| ; FAST: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV1]](s64), [[UV1]](s64), implicit $exec |
| ; FAST: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc |
| ; FAST: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV2]].sub0(s64), implicit $exec |
| ; FAST: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV2]].sub1(s64), implicit $exec |
| ; FAST: [[MV2:%[0-9]+]]:sreg_64_xexec(s64) = G_MERGE_VALUES [[V_READFIRSTLANE_B32_4]](s32), [[V_READFIRSTLANE_B32_5]](s32) |
| ; FAST: [[V_CMP_EQ_U64_e64_2:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV2]](s64), [[UV2]](s64), implicit $exec |
| ; FAST: [[S_AND_B64_1:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_2]], [[S_AND_B64_]], implicit-def $scc |
| ; FAST: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV3]].sub0(s64), implicit $exec |
| ; FAST: [[V_READFIRSTLANE_B32_7:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV3]].sub1(s64), implicit $exec |
| ; FAST: [[MV3:%[0-9]+]]:sreg_64_xexec(s64) = G_MERGE_VALUES [[V_READFIRSTLANE_B32_6]](s32), [[V_READFIRSTLANE_B32_7]](s32) |
| ; FAST: [[V_CMP_EQ_U64_e64_3:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV3]](s64), [[UV3]](s64), implicit $exec |
| ; FAST: [[S_AND_B64_2:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_3]], [[S_AND_B64_1]], implicit-def $scc |
| ; FAST: [[BUILD_VECTOR1:%[0-9]+]]:sgpr(<8 x s32>) = G_BUILD_VECTOR [[V_READFIRSTLANE_B32_]](s32), [[V_READFIRSTLANE_B32_1]](s32), [[V_READFIRSTLANE_B32_2]](s32), [[V_READFIRSTLANE_B32_3]](s32), [[V_READFIRSTLANE_B32_4]](s32), [[V_READFIRSTLANE_B32_5]](s32), [[V_READFIRSTLANE_B32_6]](s32), [[V_READFIRSTLANE_B32_7]](s32) |
| ; FAST: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_2]], implicit-def $exec, implicit-def $scc, implicit $exec |
| ; FAST: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.1d), 15, [[COPY8]](s32), [[BUILD_VECTOR1]](<8 x s32>), 0, 0, 0 :: (dereferenceable load (<4 x s32>) from custom "ImageResource") |
| ; FAST: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc |
| ; FAST: SI_WATERFALL_LOOP %bb.2, implicit $exec |
| ; FAST: bb.3: |
| ; FAST: successors: %bb.4(0x80000000) |
| ; FAST: $exec = S_MOV_B64_term [[S_MOV_B64_term]] |
| ; FAST: bb.4: |
| ; FAST: [[COPY9:%[0-9]+]]:vgpr(p1) = COPY [[DEF]](p1) |
| ; FAST: G_STORE [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>), [[COPY9]](p1) :: (store (<4 x s32>) into `<4 x float> addrspace(1)* undef`, addrspace 1) |
| ; FAST: S_ENDPGM 0 |
| ; GREEDY-LABEL: name: load_1d_vgpr_vaddr__vgpr_srsrc |
| ; GREEDY: bb.1 (%ir-block.0): |
| ; GREEDY: successors: %bb.2(0x80000000) |
| ; GREEDY: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8 |
| ; GREEDY: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 |
| ; GREEDY: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 |
| ; GREEDY: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2 |
| ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY $vgpr3 |
| ; GREEDY: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY $vgpr4 |
| ; GREEDY: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY $vgpr5 |
| ; GREEDY: [[COPY6:%[0-9]+]]:vgpr(s32) = COPY $vgpr6 |
| ; GREEDY: [[COPY7:%[0-9]+]]:vgpr(s32) = COPY $vgpr7 |
| ; GREEDY: [[BUILD_VECTOR:%[0-9]+]]:vgpr(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) |
| ; GREEDY: [[COPY8:%[0-9]+]]:vgpr(s32) = COPY $vgpr8 |
| ; GREEDY: [[DEF:%[0-9]+]]:sgpr(p1) = G_IMPLICIT_DEF |
| ; GREEDY: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF |
| ; GREEDY: [[UV:%[0-9]+]]:vreg_64(s64), [[UV1:%[0-9]+]]:vreg_64(s64), [[UV2:%[0-9]+]]:vreg_64(s64), [[UV3:%[0-9]+]]:vreg_64(s64) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<8 x s32>) |
| ; GREEDY: [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec |
| ; GREEDY: bb.2: |
| ; GREEDY: successors: %bb.3(0x40000000), %bb.2(0x40000000) |
| ; GREEDY: [[PHI:%[0-9]+]]:sreg_64_xexec = PHI [[DEF1]], %bb.1, %17, %bb.2 |
| ; GREEDY: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV]].sub0(s64), implicit $exec |
| ; GREEDY: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV]].sub1(s64), implicit $exec |
| ; GREEDY: [[MV:%[0-9]+]]:sreg_64_xexec(s64) = G_MERGE_VALUES [[V_READFIRSTLANE_B32_]](s32), [[V_READFIRSTLANE_B32_1]](s32) |
| ; GREEDY: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV]](s64), [[UV]](s64), implicit $exec |
| ; GREEDY: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV1]].sub0(s64), implicit $exec |
| ; GREEDY: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV1]].sub1(s64), implicit $exec |
| ; GREEDY: [[MV1:%[0-9]+]]:sreg_64_xexec(s64) = G_MERGE_VALUES [[V_READFIRSTLANE_B32_2]](s32), [[V_READFIRSTLANE_B32_3]](s32) |
| ; GREEDY: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV1]](s64), [[UV1]](s64), implicit $exec |
| ; GREEDY: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc |
| ; GREEDY: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV2]].sub0(s64), implicit $exec |
| ; GREEDY: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV2]].sub1(s64), implicit $exec |
| ; GREEDY: [[MV2:%[0-9]+]]:sreg_64_xexec(s64) = G_MERGE_VALUES [[V_READFIRSTLANE_B32_4]](s32), [[V_READFIRSTLANE_B32_5]](s32) |
| ; GREEDY: [[V_CMP_EQ_U64_e64_2:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV2]](s64), [[UV2]](s64), implicit $exec |
| ; GREEDY: [[S_AND_B64_1:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_2]], [[S_AND_B64_]], implicit-def $scc |
| ; GREEDY: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV3]].sub0(s64), implicit $exec |
| ; GREEDY: [[V_READFIRSTLANE_B32_7:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV3]].sub1(s64), implicit $exec |
| ; GREEDY: [[MV3:%[0-9]+]]:sreg_64_xexec(s64) = G_MERGE_VALUES [[V_READFIRSTLANE_B32_6]](s32), [[V_READFIRSTLANE_B32_7]](s32) |
| ; GREEDY: [[V_CMP_EQ_U64_e64_3:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV3]](s64), [[UV3]](s64), implicit $exec |
| ; GREEDY: [[S_AND_B64_2:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_3]], [[S_AND_B64_1]], implicit-def $scc |
| ; GREEDY: [[BUILD_VECTOR1:%[0-9]+]]:sgpr(<8 x s32>) = G_BUILD_VECTOR [[V_READFIRSTLANE_B32_]](s32), [[V_READFIRSTLANE_B32_1]](s32), [[V_READFIRSTLANE_B32_2]](s32), [[V_READFIRSTLANE_B32_3]](s32), [[V_READFIRSTLANE_B32_4]](s32), [[V_READFIRSTLANE_B32_5]](s32), [[V_READFIRSTLANE_B32_6]](s32), [[V_READFIRSTLANE_B32_7]](s32) |
| ; GREEDY: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_2]], implicit-def $exec, implicit-def $scc, implicit $exec |
| ; GREEDY: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.1d), 15, [[COPY8]](s32), [[BUILD_VECTOR1]](<8 x s32>), 0, 0, 0 :: (dereferenceable load (<4 x s32>) from custom "ImageResource") |
| ; GREEDY: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc |
| ; GREEDY: SI_WATERFALL_LOOP %bb.2, implicit $exec |
| ; GREEDY: bb.3: |
| ; GREEDY: successors: %bb.4(0x80000000) |
| ; GREEDY: $exec = S_MOV_B64_term [[S_MOV_B64_term]] |
| ; GREEDY: bb.4: |
| ; GREEDY: [[COPY9:%[0-9]+]]:vgpr(p1) = COPY [[DEF]](p1) |
| ; GREEDY: G_STORE [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>), [[COPY9]](p1) :: (store (<4 x s32>) into `<4 x float> addrspace(1)* undef`, addrspace 1) |
| ; GREEDY: S_ENDPGM 0 |
| %v = call <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) |
| store <4 x float> %v, <4 x float> addrspace(1)* undef |
| ret void |
| } |
| |
| ; Waterfall loop needed for rsrc, copy needed for vaddr |
| define amdgpu_ps void @load_1d_sgpr_vaddr__vgpr_srsrc(<8 x i32> %rsrc, i32 inreg %s) { |
| ; FAST-LABEL: name: load_1d_sgpr_vaddr__vgpr_srsrc |
| ; FAST: bb.1 (%ir-block.0): |
| ; FAST: successors: %bb.2(0x80000000) |
| ; FAST: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7 |
| ; FAST: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 |
| ; FAST: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 |
| ; FAST: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2 |
| ; FAST: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY $vgpr3 |
| ; FAST: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY $vgpr4 |
| ; FAST: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY $vgpr5 |
| ; FAST: [[COPY6:%[0-9]+]]:vgpr(s32) = COPY $vgpr6 |
| ; FAST: [[COPY7:%[0-9]+]]:vgpr(s32) = COPY $vgpr7 |
| ; FAST: [[BUILD_VECTOR:%[0-9]+]]:vgpr(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) |
| ; FAST: [[COPY8:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 |
| ; FAST: [[DEF:%[0-9]+]]:sgpr(p1) = G_IMPLICIT_DEF |
| ; FAST: [[COPY9:%[0-9]+]]:vgpr(s32) = COPY [[COPY8]](s32) |
| ; FAST: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF |
| ; FAST: [[UV:%[0-9]+]]:vreg_64(s64), [[UV1:%[0-9]+]]:vreg_64(s64), [[UV2:%[0-9]+]]:vreg_64(s64), [[UV3:%[0-9]+]]:vreg_64(s64) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<8 x s32>) |
| ; FAST: [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec |
| ; FAST: bb.2: |
| ; FAST: successors: %bb.3(0x40000000), %bb.2(0x40000000) |
| ; FAST: [[PHI:%[0-9]+]]:sreg_64_xexec = PHI [[DEF1]], %bb.1, %18, %bb.2 |
| ; FAST: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV]].sub0(s64), implicit $exec |
| ; FAST: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV]].sub1(s64), implicit $exec |
| ; FAST: [[MV:%[0-9]+]]:sreg_64_xexec(s64) = G_MERGE_VALUES [[V_READFIRSTLANE_B32_]](s32), [[V_READFIRSTLANE_B32_1]](s32) |
| ; FAST: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV]](s64), [[UV]](s64), implicit $exec |
| ; FAST: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV1]].sub0(s64), implicit $exec |
| ; FAST: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV1]].sub1(s64), implicit $exec |
| ; FAST: [[MV1:%[0-9]+]]:sreg_64_xexec(s64) = G_MERGE_VALUES [[V_READFIRSTLANE_B32_2]](s32), [[V_READFIRSTLANE_B32_3]](s32) |
| ; FAST: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV1]](s64), [[UV1]](s64), implicit $exec |
| ; FAST: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc |
| ; FAST: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV2]].sub0(s64), implicit $exec |
| ; FAST: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV2]].sub1(s64), implicit $exec |
| ; FAST: [[MV2:%[0-9]+]]:sreg_64_xexec(s64) = G_MERGE_VALUES [[V_READFIRSTLANE_B32_4]](s32), [[V_READFIRSTLANE_B32_5]](s32) |
| ; FAST: [[V_CMP_EQ_U64_e64_2:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV2]](s64), [[UV2]](s64), implicit $exec |
| ; FAST: [[S_AND_B64_1:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_2]], [[S_AND_B64_]], implicit-def $scc |
| ; FAST: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV3]].sub0(s64), implicit $exec |
| ; FAST: [[V_READFIRSTLANE_B32_7:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV3]].sub1(s64), implicit $exec |
| ; FAST: [[MV3:%[0-9]+]]:sreg_64_xexec(s64) = G_MERGE_VALUES [[V_READFIRSTLANE_B32_6]](s32), [[V_READFIRSTLANE_B32_7]](s32) |
| ; FAST: [[V_CMP_EQ_U64_e64_3:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV3]](s64), [[UV3]](s64), implicit $exec |
| ; FAST: [[S_AND_B64_2:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_3]], [[S_AND_B64_1]], implicit-def $scc |
| ; FAST: [[BUILD_VECTOR1:%[0-9]+]]:sgpr(<8 x s32>) = G_BUILD_VECTOR [[V_READFIRSTLANE_B32_]](s32), [[V_READFIRSTLANE_B32_1]](s32), [[V_READFIRSTLANE_B32_2]](s32), [[V_READFIRSTLANE_B32_3]](s32), [[V_READFIRSTLANE_B32_4]](s32), [[V_READFIRSTLANE_B32_5]](s32), [[V_READFIRSTLANE_B32_6]](s32), [[V_READFIRSTLANE_B32_7]](s32) |
| ; FAST: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_2]], implicit-def $exec, implicit-def $scc, implicit $exec |
| ; FAST: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.1d), 15, [[COPY9]](s32), [[BUILD_VECTOR1]](<8 x s32>), 0, 0, 0 :: (dereferenceable load (<4 x s32>) from custom "ImageResource") |
| ; FAST: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc |
| ; FAST: SI_WATERFALL_LOOP %bb.2, implicit $exec |
| ; FAST: bb.3: |
| ; FAST: successors: %bb.4(0x80000000) |
| ; FAST: $exec = S_MOV_B64_term [[S_MOV_B64_term]] |
| ; FAST: bb.4: |
| ; FAST: [[COPY10:%[0-9]+]]:vgpr(p1) = COPY [[DEF]](p1) |
| ; FAST: G_STORE [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>), [[COPY10]](p1) :: (store (<4 x s32>) into `<4 x float> addrspace(1)* undef`, addrspace 1) |
| ; FAST: S_ENDPGM 0 |
| ; GREEDY-LABEL: name: load_1d_sgpr_vaddr__vgpr_srsrc |
| ; GREEDY: bb.1 (%ir-block.0): |
| ; GREEDY: successors: %bb.2(0x80000000) |
| ; GREEDY: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7 |
| ; GREEDY: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 |
| ; GREEDY: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 |
| ; GREEDY: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2 |
| ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY $vgpr3 |
| ; GREEDY: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY $vgpr4 |
| ; GREEDY: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY $vgpr5 |
| ; GREEDY: [[COPY6:%[0-9]+]]:vgpr(s32) = COPY $vgpr6 |
| ; GREEDY: [[COPY7:%[0-9]+]]:vgpr(s32) = COPY $vgpr7 |
| ; GREEDY: [[BUILD_VECTOR:%[0-9]+]]:vgpr(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) |
| ; GREEDY: [[COPY8:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 |
| ; GREEDY: [[DEF:%[0-9]+]]:sgpr(p1) = G_IMPLICIT_DEF |
| ; GREEDY: [[COPY9:%[0-9]+]]:vgpr(s32) = COPY [[COPY8]](s32) |
| ; GREEDY: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF |
| ; GREEDY: [[UV:%[0-9]+]]:vreg_64(s64), [[UV1:%[0-9]+]]:vreg_64(s64), [[UV2:%[0-9]+]]:vreg_64(s64), [[UV3:%[0-9]+]]:vreg_64(s64) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<8 x s32>) |
| ; GREEDY: [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec |
| ; GREEDY: bb.2: |
| ; GREEDY: successors: %bb.3(0x40000000), %bb.2(0x40000000) |
| ; GREEDY: [[PHI:%[0-9]+]]:sreg_64_xexec = PHI [[DEF1]], %bb.1, %18, %bb.2 |
| ; GREEDY: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV]].sub0(s64), implicit $exec |
| ; GREEDY: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV]].sub1(s64), implicit $exec |
| ; GREEDY: [[MV:%[0-9]+]]:sreg_64_xexec(s64) = G_MERGE_VALUES [[V_READFIRSTLANE_B32_]](s32), [[V_READFIRSTLANE_B32_1]](s32) |
| ; GREEDY: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV]](s64), [[UV]](s64), implicit $exec |
| ; GREEDY: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV1]].sub0(s64), implicit $exec |
| ; GREEDY: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV1]].sub1(s64), implicit $exec |
| ; GREEDY: [[MV1:%[0-9]+]]:sreg_64_xexec(s64) = G_MERGE_VALUES [[V_READFIRSTLANE_B32_2]](s32), [[V_READFIRSTLANE_B32_3]](s32) |
| ; GREEDY: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV1]](s64), [[UV1]](s64), implicit $exec |
| ; GREEDY: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc |
| ; GREEDY: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV2]].sub0(s64), implicit $exec |
| ; GREEDY: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV2]].sub1(s64), implicit $exec |
| ; GREEDY: [[MV2:%[0-9]+]]:sreg_64_xexec(s64) = G_MERGE_VALUES [[V_READFIRSTLANE_B32_4]](s32), [[V_READFIRSTLANE_B32_5]](s32) |
| ; GREEDY: [[V_CMP_EQ_U64_e64_2:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV2]](s64), [[UV2]](s64), implicit $exec |
| ; GREEDY: [[S_AND_B64_1:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_2]], [[S_AND_B64_]], implicit-def $scc |
| ; GREEDY: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV3]].sub0(s64), implicit $exec |
| ; GREEDY: [[V_READFIRSTLANE_B32_7:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[UV3]].sub1(s64), implicit $exec |
| ; GREEDY: [[MV3:%[0-9]+]]:sreg_64_xexec(s64) = G_MERGE_VALUES [[V_READFIRSTLANE_B32_6]](s32), [[V_READFIRSTLANE_B32_7]](s32) |
| ; GREEDY: [[V_CMP_EQ_U64_e64_3:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV3]](s64), [[UV3]](s64), implicit $exec |
| ; GREEDY: [[S_AND_B64_2:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_3]], [[S_AND_B64_1]], implicit-def $scc |
| ; GREEDY: [[BUILD_VECTOR1:%[0-9]+]]:sgpr(<8 x s32>) = G_BUILD_VECTOR [[V_READFIRSTLANE_B32_]](s32), [[V_READFIRSTLANE_B32_1]](s32), [[V_READFIRSTLANE_B32_2]](s32), [[V_READFIRSTLANE_B32_3]](s32), [[V_READFIRSTLANE_B32_4]](s32), [[V_READFIRSTLANE_B32_5]](s32), [[V_READFIRSTLANE_B32_6]](s32), [[V_READFIRSTLANE_B32_7]](s32) |
| ; GREEDY: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_2]], implicit-def $exec, implicit-def $scc, implicit $exec |
| ; GREEDY: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.1d), 15, [[COPY9]](s32), [[BUILD_VECTOR1]](<8 x s32>), 0, 0, 0 :: (dereferenceable load (<4 x s32>) from custom "ImageResource") |
| ; GREEDY: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc |
| ; GREEDY: SI_WATERFALL_LOOP %bb.2, implicit $exec |
| ; GREEDY: bb.3: |
| ; GREEDY: successors: %bb.4(0x80000000) |
| ; GREEDY: $exec = S_MOV_B64_term [[S_MOV_B64_term]] |
| ; GREEDY: bb.4: |
| ; GREEDY: [[COPY10:%[0-9]+]]:vgpr(p1) = COPY [[DEF]](p1) |
| ; GREEDY: G_STORE [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>), [[COPY10]](p1) :: (store (<4 x s32>) into `<4 x float> addrspace(1)* undef`, addrspace 1) |
| ; GREEDY: S_ENDPGM 0 |
| %v = call <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) |
| store <4 x float> %v, <4 x float> addrspace(1)* undef |
| ret void |
| } |
| |
| declare <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i32(i32 immarg, i32, <8 x i32>, i32 immarg, i32 immarg) #0 |
| |
| attributes #0 = { nounwind readonly } |