| ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py |
| ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck %s |
| |
| ; Natural mapping |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 78, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 0) |
| ret void |
| } |
| |
| ; Natural mapping |
| define amdgpu_ps void @raw_tbuffer_store_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<2 x float> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 |
| ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 |
| ; CHECK: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: TBUFFER_STORE_FORMAT_XY_OFFEN_exact [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 78, 0, 0, 0, implicit $exec :: (dereferenceable store 8 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| call void @llvm.amdgcn.raw.tbuffer.store.v2f32(<2 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 0) |
| ret void |
| } |
| |
| ; Natural mapping |
| define amdgpu_ps void @raw_tbuffer_store_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<3 x float> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY6]], %subreg.sub3 |
| ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 |
| ; CHECK: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: TBUFFER_STORE_FORMAT_XYZ_OFFEN_exact [[REG_SEQUENCE]], [[COPY7]], [[REG_SEQUENCE1]], [[COPY8]], 0, 78, 0, 0, 0, implicit $exec :: (dereferenceable store 12 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| call void @llvm.amdgcn.raw.tbuffer.store.v3f32(<3 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 0) |
| ret void |
| } |
| |
| ; Natural mapping |
| define amdgpu_ps void @raw_tbuffer_store_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<4 x float> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 |
| ; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 |
| ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 |
| ; CHECK: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: TBUFFER_STORE_FORMAT_XYZW_OFFEN_exact [[REG_SEQUENCE]], [[COPY8]], [[REG_SEQUENCE1]], [[COPY9]], 0, 78, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| call void @llvm.amdgcn.raw.tbuffer.store.v4f32(<4 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 0) |
| ret void |
| } |
| |
| ; Copies for VGPR arguments |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__sgpr_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 inreg %voffset, i32 inreg %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__sgpr_voffset__sgpr_soffset |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr7 |
| ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY7]], [[REG_SEQUENCE]], [[COPY6]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0) |
| ret void |
| } |
| |
| ; Waterfall for rsrc |
| define amdgpu_ps void @raw_tbuffer_store_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, <4 x i32> %rsrc, i32 %voffset, i32 inreg %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: successors: %bb.2(0x80000000) |
| ; CHECK: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 |
| ; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 |
| ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 |
| ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 |
| ; CHECK: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 |
| ; CHECK: [[S_MOV_B32_term:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32_term $exec_lo |
| ; CHECK: bb.2: |
| ; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000) |
| ; CHECK: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub0, implicit $exec |
| ; CHECK: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub1, implicit $exec |
| ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1 |
| ; CHECK: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY7]], implicit $exec |
| ; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, implicit $exec |
| ; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, implicit $exec |
| ; CHECK: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1 |
| ; CHECK: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY8]], implicit $exec |
| ; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc |
| ; CHECK: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE3]], [[COPY6]], 0, 94, 1, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec |
| ; CHECK: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc |
| ; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec |
| ; CHECK: bb.3: |
| ; CHECK: successors: %bb.4(0x80000000) |
| ; CHECK: $exec_lo = S_MOV_B32_term [[S_MOV_B32_term]] |
| ; CHECK: bb.4: |
| ; CHECK: S_ENDPGM 0 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 1) |
| ret void |
| } |
| |
| ; Waterfall for rsrc and soffset |
| define amdgpu_ps void @raw_tbuffer_store_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: successors: %bb.2(0x80000000) |
| ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 |
| ; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 |
| ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 |
| ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6 |
| ; CHECK: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 |
| ; CHECK: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 |
| ; CHECK: [[S_MOV_B32_term:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32_term $exec_lo |
| ; CHECK: bb.2: |
| ; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000) |
| ; CHECK: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub0, implicit $exec |
| ; CHECK: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub1, implicit $exec |
| ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1 |
| ; CHECK: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY7]], implicit $exec |
| ; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, implicit $exec |
| ; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, implicit $exec |
| ; CHECK: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1 |
| ; CHECK: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY8]], implicit $exec |
| ; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc |
| ; CHECK: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 |
| ; CHECK: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec |
| ; CHECK: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec |
| ; CHECK: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U32_e64_]], [[S_AND_B32_]], implicit-def $scc |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec |
| ; CHECK: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc |
| ; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec |
| ; CHECK: bb.3: |
| ; CHECK: successors: %bb.4(0x80000000) |
| ; CHECK: $exec_lo = S_MOV_B32_term [[S_MOV_B32_term]] |
| ; CHECK: bb.4: |
| ; CHECK: S_ENDPGM 0 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0) |
| ret void |
| } |
| |
| ; Waterfall for rsrc and soffset, copy for voffset |
| define amdgpu_ps void @raw_tbuffer_store_f32__vgpr_rsrc__sgpr_voffset__vgpr_soffset(float %val, <4 x i32> %rsrc, i32 inreg %voffset, i32 %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__vgpr_rsrc__sgpr_voffset__vgpr_soffset |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: successors: %bb.2(0x80000000) |
| ; CHECK: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 |
| ; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 |
| ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr5 |
| ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] |
| ; CHECK: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 |
| ; CHECK: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 |
| ; CHECK: [[S_MOV_B32_term:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32_term $exec_lo |
| ; CHECK: bb.2: |
| ; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000) |
| ; CHECK: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, implicit $exec |
| ; CHECK: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, implicit $exec |
| ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1 |
| ; CHECK: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY8]], implicit $exec |
| ; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub0, implicit $exec |
| ; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub1, implicit $exec |
| ; CHECK: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1 |
| ; CHECK: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY9]], implicit $exec |
| ; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc |
| ; CHECK: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 |
| ; CHECK: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec |
| ; CHECK: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec |
| ; CHECK: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U32_e64_]], [[S_AND_B32_]], implicit-def $scc |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY7]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 78, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec |
| ; CHECK: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc |
| ; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec |
| ; CHECK: bb.3: |
| ; CHECK: successors: %bb.4(0x80000000) |
| ; CHECK: $exec_lo = S_MOV_B32_term [[S_MOV_B32_term]] |
| ; CHECK: bb.4: |
| ; CHECK: S_ENDPGM 0 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 0) |
| ret void |
| } |
| |
| ; Natural mapping + glc |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_glc(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_glc |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 78, 1, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 1) |
| ret void |
| } |
| |
| ; Natural mapping + slc |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 78, 2, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 2) |
| ret void |
| } |
| |
| ; Natural mapping + glc + slc |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc_glc(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc_glc |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 78, 3, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 3) |
| ret void |
| } |
| |
| ; Natural mapping + dlc |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_dlc(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_dlc |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 78, 4, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 4) |
| ret void |
| } |
| |
| |
| |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vdpr_voffset__sgpr_soffset__voffset0(float %val, <4 x i32> inreg %rsrc, i32 inreg %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vdpr_voffset__sgpr_soffset__voffset0 |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFSET_exact [[COPY]], [[REG_SEQUENCE]], [[COPY5]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 0, i32 %soffset, i32 94, i32 0) |
| ret void |
| } |
| |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset4095(float %val, <4 x i32> inreg %rsrc, i32 inreg %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset4095 |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFSET_exact [[COPY]], [[REG_SEQUENCE]], [[COPY5]], 4095, 94, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource" + 4095, align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 4095, i32 %soffset, i32 94, i32 0) |
| ret void |
| } |
| |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset4096(float %val, <4 x i32> inreg %rsrc, i32 inreg %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset4096 |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 |
| ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY6]], [[REG_SEQUENCE]], [[COPY5]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource" + 4096, align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 4096, i32 %soffset, i32 94, i32 0) |
| ret void |
| } |
| |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16(float %val, <4 x i32> inreg %rsrc, i32 %voffset.base, i32 inreg %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16 |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 94, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource" + 16, align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| %voffset = add i32 %voffset.base, 16 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0) |
| ret void |
| } |
| |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095(float %val, <4 x i32> inreg %rsrc, i32 %voffset.base, i32 inreg %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 94, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource" + 4095, align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| %voffset = add i32 %voffset.base, 4095 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0) |
| ret void |
| } |
| |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096(float %val, <4 x i32> inreg %rsrc, i32 %voffset.base, i32 inreg %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096 |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 |
| ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] |
| ; CHECK: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource" + 4096, align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| %voffset = add i32 %voffset.base, 4096 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0) |
| ret void |
| } |
| |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset4095(float %val, <4 x i32> inreg %rsrc, i32 %voffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset4095 |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 4095, i32 94, i32 0) |
| ret void |
| } |
| |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset4096(float %val, <4 x i32> inreg %rsrc, i32 %voffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset4096 |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 4096, i32 94, i32 0) |
| ret void |
| } |
| |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add16(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset.base) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add16 |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16 |
| ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY6]], [[S_MOV_B32_]], implicit-def $scc |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[S_ADD_I32_]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| %soffset = add i32 %soffset.base, 16 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0) |
| ret void |
| } |
| |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add4095(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset.base) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add4095 |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 |
| ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY6]], [[S_MOV_B32_]], implicit-def $scc |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[S_ADD_I32_]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| %soffset = add i32 %soffset.base, 4095 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0) |
| ret void |
| } |
| |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add4096(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset.base) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add4096 |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 |
| ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 |
| ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 |
| ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 |
| ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY6]], [[S_MOV_B32_]], implicit-def $scc |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[S_ADD_I32_]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: S_ENDPGM 0 |
| %soffset = add i32 %soffset.base, 4096 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0) |
| ret void |
| } |
| |
| ; An add of the offset is necessary, with a waterfall loop. Make sure the add is done outside of the waterfall loop. |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000(float %val, <4 x i32> %rsrc, i32 %voffset, i32 inreg %soffset.base) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000 |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: successors: %bb.2(0x80000000) |
| ; CHECK: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 |
| ; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 |
| ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 |
| ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000 |
| ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY6]], [[S_MOV_B32_]], implicit-def $scc |
| ; CHECK: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 |
| ; CHECK: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 |
| ; CHECK: [[S_MOV_B32_term:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32_term $exec_lo |
| ; CHECK: bb.2: |
| ; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000) |
| ; CHECK: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub0, implicit $exec |
| ; CHECK: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub1, implicit $exec |
| ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1 |
| ; CHECK: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY7]], implicit $exec |
| ; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, implicit $exec |
| ; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, implicit $exec |
| ; CHECK: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1 |
| ; CHECK: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY8]], implicit $exec |
| ; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc |
| ; CHECK: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE3]], [[S_ADD_I32_]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource", align 1, addrspace 4) |
| ; CHECK: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec |
| ; CHECK: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc |
| ; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec |
| ; CHECK: bb.3: |
| ; CHECK: successors: %bb.4(0x80000000) |
| ; CHECK: $exec_lo = S_MOV_B32_term [[S_MOV_B32_term]] |
| ; CHECK: bb.4: |
| ; CHECK: S_ENDPGM 0 |
| %soffset = add i32 %soffset.base, 5000 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0) |
| ret void |
| } |
| |
| ; An add of the offset is necessary, with a waterfall loop. Make sure the add is done outside of the waterfall loop. |
| define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000(float %val, <4 x i32> %rsrc, i32 %voffset.base, i32 inreg %soffset) { |
| ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000 |
| ; CHECK: bb.1 (%ir-block.0): |
| ; CHECK: successors: %bb.2(0x80000000) |
| ; CHECK: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 |
| ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 |
| ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 |
| ; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 |
| ; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 |
| ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 |
| ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 |
| ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 |
| ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2 |
| ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 |
| ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] |
| ; CHECK: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec |
| ; CHECK: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 |
| ; CHECK: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 |
| ; CHECK: [[S_MOV_B32_term:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32_term $exec_lo |
| ; CHECK: bb.2: |
| ; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000) |
| ; CHECK: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, implicit $exec |
| ; CHECK: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, implicit $exec |
| ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1 |
| ; CHECK: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY8]], implicit $exec |
| ; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub0, implicit $exec |
| ; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub1, implicit $exec |
| ; CHECK: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1 |
| ; CHECK: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY9]], implicit $exec |
| ; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc |
| ; CHECK: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 |
| ; CHECK: TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE3]], [[COPY6]], 904, 94, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "BufferResource" + 5000, align 1, addrspace 4) |
| ; CHECK: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec |
| ; CHECK: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc |
| ; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec |
| ; CHECK: bb.3: |
| ; CHECK: successors: %bb.4(0x80000000) |
| ; CHECK: $exec_lo = S_MOV_B32_term [[S_MOV_B32_term]] |
| ; CHECK: bb.4: |
| ; CHECK: S_ENDPGM 0 |
| %voffset = add i32 %voffset.base, 5000 |
| call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0) |
| ret void |
| } |
| |
| declare void @llvm.amdgcn.raw.tbuffer.store.f32(float, <4 x i32>, i32, i32, i32 immarg, i32 immarg) |
| declare void @llvm.amdgcn.raw.tbuffer.store.v2f32(<2 x float>, <4 x i32>, i32, i32, i32 immarg, i32 immarg) |
| declare void @llvm.amdgcn.raw.tbuffer.store.v3f32(<3 x float>, <4 x i32>, i32, i32, i32 immarg, i32 immarg) |
| declare void @llvm.amdgcn.raw.tbuffer.store.v4f32(<4 x float>, <4 x i32>, i32, i32, i32 immarg, i32 immarg) |