| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 |
| ; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefixes=GCN,SDAG %s |
| ; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefixes=GCN,GISEL %s |
| |
| define amdgpu_ps float @s_load_b32_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { |
| ; GCN-LABEL: s_load_b32_idxprom: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_b32 s0, s[0:1], s2 offset:0x0 scale_offset |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %idxprom = zext i32 %idx to i64 |
| %arrayidx = getelementptr inbounds float, ptr addrspace(4) %p, i64 %idxprom |
| %ret = load float, ptr addrspace(4) %arrayidx, align 4 |
| ret float %ret |
| } |
| |
| ; 'i32 %idx' is a signed index while SMRD soffset is unsigned, thus it is not selected. |
| |
| define amdgpu_ps float @s_load_b32_idx32(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { |
| ; SDAG-LABEL: s_load_b32_idx32: |
| ; SDAG: ; %bb.0: ; %entry |
| ; SDAG-NEXT: s_ashr_i32 s3, s2, 31 |
| ; SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) |
| ; SDAG-NEXT: s_lshl_b64 s[2:3], s[2:3], 2 |
| ; SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[2:3] |
| ; SDAG-NEXT: s_load_b32 s0, s[0:1], 0x0 |
| ; SDAG-NEXT: s_wait_kmcnt 0x0 |
| ; SDAG-NEXT: v_mov_b32_e32 v0, s0 |
| ; SDAG-NEXT: ; return to shader part epilog |
| ; |
| ; GISEL-LABEL: s_load_b32_idx32: |
| ; GISEL: ; %bb.0: ; %entry |
| ; GISEL-NEXT: s_ashr_i32 s3, s2, 31 |
| ; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) |
| ; GISEL-NEXT: s_lshl_b64 s[2:3], s[2:3], 2 |
| ; GISEL-NEXT: s_add_co_u32 s0, s0, s2 |
| ; GISEL-NEXT: s_add_co_ci_u32 s1, s1, s3 |
| ; GISEL-NEXT: s_load_b32 s0, s[0:1], 0x0 |
| ; GISEL-NEXT: s_wait_kmcnt 0x0 |
| ; GISEL-NEXT: v_mov_b32_e32 v0, s0 |
| ; GISEL-NEXT: ; return to shader part epilog |
| entry: |
| %arrayidx = getelementptr inbounds float, ptr addrspace(4) %p, i32 %idx |
| %ret = load float, ptr addrspace(4) %arrayidx, align 4 |
| ret float %ret |
| } |
| |
| define amdgpu_ps float @s_load_b32_idxprom_wrong_stride(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { |
| ; SDAG-LABEL: s_load_b32_idxprom_wrong_stride: |
| ; SDAG: ; %bb.0: ; %entry |
| ; SDAG-NEXT: s_mov_b32 s3, 0 |
| ; SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) |
| ; SDAG-NEXT: s_lshl_b64 s[2:3], s[2:3], 3 |
| ; SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[2:3] |
| ; SDAG-NEXT: s_load_b32 s0, s[0:1], 0x0 |
| ; SDAG-NEXT: s_wait_kmcnt 0x0 |
| ; SDAG-NEXT: v_mov_b32_e32 v0, s0 |
| ; SDAG-NEXT: ; return to shader part epilog |
| ; |
| ; GISEL-LABEL: s_load_b32_idxprom_wrong_stride: |
| ; GISEL: ; %bb.0: ; %entry |
| ; GISEL-NEXT: s_mov_b32 s3, 0 |
| ; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) |
| ; GISEL-NEXT: s_lshl_b64 s[2:3], s[2:3], 3 |
| ; GISEL-NEXT: s_add_co_u32 s0, s0, s2 |
| ; GISEL-NEXT: s_add_co_ci_u32 s1, s1, s3 |
| ; GISEL-NEXT: s_load_b32 s0, s[0:1], 0x0 |
| ; GISEL-NEXT: s_wait_kmcnt 0x0 |
| ; GISEL-NEXT: v_mov_b32_e32 v0, s0 |
| ; GISEL-NEXT: ; return to shader part epilog |
| entry: |
| %idxprom = zext i32 %idx to i64 |
| %arrayidx = getelementptr inbounds <2 x float>, ptr addrspace(4) %p, i64 %idxprom |
| %ret = load float, ptr addrspace(4) %arrayidx, align 4 |
| ret float %ret |
| } |
| |
| define amdgpu_ps float @s_load_b16_idxprom_ioffset(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { |
| ; GCN-LABEL: s_load_b16_idxprom_ioffset: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_u16 s0, s[0:1], s2 offset:0x20 scale_offset |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %idxprom = zext i32 %idx to i64 |
| %idxadd = add i64 %idxprom, 16 |
| %arrayidx = getelementptr inbounds i16, ptr addrspace(4) %p, i64 %idxadd |
| %ld = load i16, ptr addrspace(4) %arrayidx, align 2 |
| %ret.i32 = zext i16 %ld to i32 |
| %ret = bitcast i32 %ret.i32 to float |
| ret float %ret |
| } |
| |
| define amdgpu_ps <2 x float> @s_load_b64_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { |
| ; GCN-LABEL: s_load_b64_idxprom: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_b64 s[0:1], s[0:1], s2 offset:0x0 scale_offset |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %idxprom = zext i32 %idx to i64 |
| %arrayidx = getelementptr inbounds <2 x float>, ptr addrspace(4) %p, i64 %idxprom |
| %ret = load <2 x float>, ptr addrspace(4) %arrayidx, align 4 |
| ret <2 x float> %ret |
| } |
| |
| define amdgpu_ps <3 x float> @s_load_b96_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { |
| ; GCN-LABEL: s_load_b96_idxprom: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_b96 s[0:2], s[0:1], s2 offset:0x0 scale_offset |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s2 |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %idxprom = zext i32 %idx to i64 |
| %arrayidx = getelementptr inbounds [3 x float], ptr addrspace(4) %p, i64 %idxprom |
| %ret = load <3 x float>, ptr addrspace(4) %arrayidx, align 4 |
| ret <3 x float> %ret |
| } |
| |
| define amdgpu_ps <4 x float> @s_load_b128_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { |
| ; GCN-LABEL: s_load_b128_idxprom: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_b128 s[0:3], s[0:1], s2 offset:0x0 scale_offset |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 |
| ; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %idxprom = zext i32 %idx to i64 |
| %arrayidx = getelementptr inbounds <4 x float>, ptr addrspace(4) %p, i64 %idxprom |
| %ret = load <4 x float>, ptr addrspace(4) %arrayidx, align 4 |
| ret <4 x float> %ret |
| } |
| |
| define amdgpu_ps <8 x float> @s_load_b256_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { |
| ; GCN-LABEL: s_load_b256_idxprom: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_b256 s[0:7], s[0:1], s2 offset:0x0 scale_offset |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 |
| ; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 |
| ; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 |
| ; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7 |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %idxprom = zext i32 %idx to i64 |
| %arrayidx = getelementptr inbounds <8 x float>, ptr addrspace(4) %p, i64 %idxprom |
| %ret = load <8 x float>, ptr addrspace(4) %arrayidx, align 4 |
| ret <8 x float> %ret |
| } |
| |
| define amdgpu_ps <16 x float> @s_load_b512_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { |
| ; GCN-LABEL: s_load_b512_idxprom: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_b512 s[0:15], s[0:1], s2 offset:0x0 scale_offset |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 |
| ; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 |
| ; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 |
| ; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7 |
| ; GCN-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9 |
| ; GCN-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11 |
| ; GCN-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13 |
| ; GCN-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15 |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %idxprom = zext i32 %idx to i64 |
| %arrayidx = getelementptr inbounds <16 x float>, ptr addrspace(4) %p, i64 %idxprom |
| %ret = load <16 x float>, ptr addrspace(4) %arrayidx, align 4 |
| ret <16 x float> %ret |
| } |
| |
| define amdgpu_ps float @s_load_b32_idxprom_range(ptr addrspace(4) align 4 inreg %p) { |
| ; GCN-LABEL: s_load_b32_idxprom_range: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: s_load_b32 s0, s[0:1], s2 offset:0x0 scale_offset |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 |
| %idxprom = zext i32 %idx to i64 |
| %arrayidx = getelementptr inbounds float, ptr addrspace(4) %p, i64 %idxprom |
| %ret = load float, ptr addrspace(4) %arrayidx, align 4 |
| ret float %ret |
| } |
| |
| define amdgpu_ps float @s_load_b32_idxprom_range_ioffset(ptr addrspace(4) align 4 inreg %p) { |
| ; GCN-LABEL: s_load_b32_idxprom_range_ioffset: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: s_load_b32 s0, s[0:1], s2 offset:0x40 scale_offset |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 |
| %idxprom = zext i32 %idx to i64 |
| %idxadd = add i64 %idxprom, 16 |
| %arrayidx = getelementptr inbounds float, ptr addrspace(4) %p, i64 %idxadd |
| %ret = load float, ptr addrspace(4) %arrayidx, align 4 |
| ret float %ret |
| } |
| |
| ; Note: this is a byte load, there is nothing to scale |
| |
| define amdgpu_ps float @s_load_b8_idxprom_range_ioffset(ptr addrspace(4) align 4 inreg %p) { |
| ; GCN-LABEL: s_load_b8_idxprom_range_ioffset: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: s_load_u8 s0, s[0:1], s2 offset:0x10 |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 |
| %idxprom = zext i32 %idx to i64 |
| %idxadd = add i64 %idxprom, 16 |
| %arrayidx = getelementptr inbounds i8, ptr addrspace(4) %p, i64 %idxadd |
| %ld = load i8, ptr addrspace(4) %arrayidx |
| %ret.i32 = zext i8 %ld to i32 |
| %ret = bitcast i32 %ret.i32 to float |
| ret float %ret |
| } |
| |
| define amdgpu_ps float @s_load_b16_idxprom_range(ptr addrspace(4) align 4 inreg %p) { |
| ; GCN-LABEL: s_load_b16_idxprom_range: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: s_load_u16 s0, s[0:1], s2 offset:0x0 scale_offset |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 |
| %idxprom = zext i32 %idx to i64 |
| %arrayidx = getelementptr inbounds i16, ptr addrspace(4) %p, i64 %idxprom |
| %ld = load i16, ptr addrspace(4) %arrayidx, align 2 |
| %ret.i32 = zext i16 %ld to i32 |
| %ret = bitcast i32 %ret.i32 to float |
| ret float %ret |
| } |
| |
| define amdgpu_ps float @s_load_b16_idxprom_range_ioffset(ptr addrspace(4) align 4 inreg %p) { |
| ; GCN-LABEL: s_load_b16_idxprom_range_ioffset: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: s_load_u16 s0, s[0:1], s2 offset:0x20 scale_offset |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 |
| %idxprom = zext i32 %idx to i64 |
| %idxadd = add i64 %idxprom, 16 |
| %arrayidx = getelementptr inbounds i16, ptr addrspace(4) %p, i64 %idxadd |
| %ld = load i16, ptr addrspace(4) %arrayidx, align 2 |
| %ret.i32 = zext i16 %ld to i32 |
| %ret = bitcast i32 %ret.i32 to float |
| ret float %ret |
| } |
| |
| define amdgpu_ps <2 x float> @s_load_b64_idxprom_range(ptr addrspace(4) align 4 inreg %p) { |
| ; GCN-LABEL: s_load_b64_idxprom_range: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: s_load_b64 s[0:1], s[0:1], s2 offset:0x0 scale_offset |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 |
| %idxprom = zext i32 %idx to i64 |
| %arrayidx = getelementptr inbounds <2 x float>, ptr addrspace(4) %p, i64 %idxprom |
| %ret = load <2 x float>, ptr addrspace(4) %arrayidx, align 4 |
| ret <2 x float> %ret |
| } |
| |
| define amdgpu_ps <3 x float> @s_load_b96_idxprom_range(ptr addrspace(4) align 4 inreg %p) { |
| ; GCN-LABEL: s_load_b96_idxprom_range: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: s_load_b96 s[0:2], s[0:1], s2 offset:0x0 scale_offset |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s2 |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 |
| %idxprom = zext i32 %idx to i64 |
| %arrayidx = getelementptr inbounds [3 x float], ptr addrspace(4) %p, i64 %idxprom |
| %ret = load <3 x float>, ptr addrspace(4) %arrayidx, align 4 |
| ret <3 x float> %ret |
| } |
| |
| define amdgpu_ps <4 x float> @s_load_b128_idxprom_range(ptr addrspace(4) align 4 inreg %p) { |
| ; GCN-LABEL: s_load_b128_idxprom_range: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: s_load_b128 s[0:3], s[0:1], s2 offset:0x0 scale_offset |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 |
| ; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 |
| %idxprom = zext i32 %idx to i64 |
| %arrayidx = getelementptr inbounds <4 x float>, ptr addrspace(4) %p, i64 %idxprom |
| %ret = load <4 x float>, ptr addrspace(4) %arrayidx, align 4 |
| ret <4 x float> %ret |
| } |
| |
| define amdgpu_ps <8 x float> @s_load_b256_idxprom_range(ptr addrspace(4) align 4 inreg %p) { |
| ; GCN-LABEL: s_load_b256_idxprom_range: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: s_load_b256 s[0:7], s[0:1], s2 offset:0x0 scale_offset |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 |
| ; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 |
| ; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 |
| ; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7 |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 |
| %idxprom = zext i32 %idx to i64 |
| %arrayidx = getelementptr inbounds <8 x float>, ptr addrspace(4) %p, i64 %idxprom |
| %ret = load <8 x float>, ptr addrspace(4) %arrayidx, align 4 |
| ret <8 x float> %ret |
| } |
| |
| define amdgpu_ps <16 x float> @s_load_b512_idxprom_range(ptr addrspace(4) align 4 inreg %p) { |
| ; GCN-LABEL: s_load_b512_idxprom_range: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: s_load_b512 s[0:15], s[0:1], s2 offset:0x0 scale_offset |
| ; GCN-NEXT: s_wait_kmcnt 0x0 |
| ; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 |
| ; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 |
| ; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 |
| ; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7 |
| ; GCN-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9 |
| ; GCN-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11 |
| ; GCN-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13 |
| ; GCN-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15 |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 |
| %idxprom = zext i32 %idx to i64 |
| %arrayidx = getelementptr inbounds <16 x float>, ptr addrspace(4) %p, i64 %idxprom |
| %ret = load <16 x float>, ptr addrspace(4) %arrayidx, align 4 |
| ret <16 x float> %ret |
| } |
| |
| !0 = !{i32 0, i32 1024} |