| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 |
| ; RUN: llc -mtriple=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefixes=GFX67,GFX6 %s |
| ; RUN: llc -mtriple=amdgcn -mcpu=bonaire < %s | FileCheck -check-prefixes=GFX67,GFX7 %s |
| ; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -check-prefixes=GFX89,GFX8 %s |
| ; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX89,GFX9 %s |
| |
| define amdgpu_vs float @load_i32(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_i32: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX67-NEXT: s_load_dword s1, s[2:3], 0x2 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_add_i32 s0, s0, s1 |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_i32: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX8-NEXT: s_load_dword s1, s[2:3], 0x8 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: s_add_i32 s0, s0, s1 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_i32: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dword s4, s[0:1], 0x0 |
| ; GFX9-NEXT: s_load_dword s5, s[2:3], 0x8 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: s_add_i32 s4, s4, s5 |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds i32, ptr addrspace(6) %p1, i32 2 |
| %r0 = load i32, ptr addrspace(6) %p0 |
| %r1 = load i32, ptr addrspace(6) %gep1 |
| %r = add i32 %r0, %r1 |
| %r2 = bitcast i32 %r to float |
| ret float %r2 |
| } |
| |
| define amdgpu_vs <2 x float> @load_v2i32(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v2i32: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x4 |
| ; GFX67-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_add_i32 s0, s0, s2 |
| ; GFX67-NEXT: s_add_i32 s1, s1, s3 |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX67-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v2i32: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x10 |
| ; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: s_add_i32 s0, s0, s2 |
| ; GFX8-NEXT: s_add_i32 s1, s1, s3 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v2i32: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x10 |
| ; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: s_add_i32 s0, s6, s4 |
| ; GFX9-NEXT: s_add_i32 s1, s7, s5 |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <2 x i32>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <2 x i32>, ptr addrspace(6) %p0 |
| %r1 = load <2 x i32>, ptr addrspace(6) %gep1 |
| %r = add <2 x i32> %r0, %r1 |
| %r2 = bitcast <2 x i32> %r to <2 x float> |
| ret <2 x float> %r2 |
| } |
| |
| define amdgpu_vs <4 x float> @load_v4i32(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v4i32: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x8 |
| ; GFX67-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_add_i32 s0, s0, s4 |
| ; GFX67-NEXT: s_add_i32 s1, s1, s5 |
| ; GFX67-NEXT: s_add_i32 s2, s2, s6 |
| ; GFX67-NEXT: s_add_i32 s3, s3, s7 |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX67-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX67-NEXT: v_mov_b32_e32 v2, s2 |
| ; GFX67-NEXT: v_mov_b32_e32 v3, s3 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v4i32: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x20 |
| ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: s_add_i32 s0, s0, s4 |
| ; GFX8-NEXT: s_add_i32 s1, s1, s5 |
| ; GFX8-NEXT: s_add_i32 s2, s2, s6 |
| ; GFX8-NEXT: s_add_i32 s3, s3, s7 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX8-NEXT: v_mov_b32_e32 v2, s2 |
| ; GFX8-NEXT: v_mov_b32_e32 v3, s3 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v4i32: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x20 |
| ; GFX9-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: s_add_i32 s0, s8, s4 |
| ; GFX9-NEXT: s_add_i32 s1, s9, s5 |
| ; GFX9-NEXT: s_add_i32 s2, s10, s6 |
| ; GFX9-NEXT: s_add_i32 s3, s11, s7 |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX9-NEXT: v_mov_b32_e32 v2, s2 |
| ; GFX9-NEXT: v_mov_b32_e32 v3, s3 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <4 x i32>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <4 x i32>, ptr addrspace(6) %p0 |
| %r1 = load <4 x i32>, ptr addrspace(6) %gep1 |
| %r = add <4 x i32> %r0, %r1 |
| %r2 = bitcast <4 x i32> %r to <4 x float> |
| ret <4 x float> %r2 |
| } |
| |
| define amdgpu_vs <8 x float> @load_v8i32(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v8i32: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x10 |
| ; GFX67-NEXT: s_load_dwordx8 s[12:19], s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_add_i32 s0, s12, s4 |
| ; GFX67-NEXT: s_add_i32 s1, s13, s5 |
| ; GFX67-NEXT: s_add_i32 s2, s14, s6 |
| ; GFX67-NEXT: s_add_i32 s3, s15, s7 |
| ; GFX67-NEXT: s_add_i32 s4, s16, s8 |
| ; GFX67-NEXT: s_add_i32 s5, s17, s9 |
| ; GFX67-NEXT: s_add_i32 s6, s18, s10 |
| ; GFX67-NEXT: s_add_i32 s7, s19, s11 |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX67-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX67-NEXT: v_mov_b32_e32 v2, s2 |
| ; GFX67-NEXT: v_mov_b32_e32 v3, s3 |
| ; GFX67-NEXT: v_mov_b32_e32 v4, s4 |
| ; GFX67-NEXT: v_mov_b32_e32 v5, s5 |
| ; GFX67-NEXT: v_mov_b32_e32 v6, s6 |
| ; GFX67-NEXT: v_mov_b32_e32 v7, s7 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX89-LABEL: load_v8i32: |
| ; GFX89: ; %bb.0: |
| ; GFX89-NEXT: s_mov_b32 s2, s1 |
| ; GFX89-NEXT: s_mov_b32 s3, 0 |
| ; GFX89-NEXT: s_mov_b32 s1, s3 |
| ; GFX89-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x40 |
| ; GFX89-NEXT: s_load_dwordx8 s[12:19], s[0:1], 0x0 |
| ; GFX89-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX89-NEXT: s_add_i32 s0, s12, s4 |
| ; GFX89-NEXT: s_add_i32 s1, s13, s5 |
| ; GFX89-NEXT: s_add_i32 s2, s14, s6 |
| ; GFX89-NEXT: s_add_i32 s3, s15, s7 |
| ; GFX89-NEXT: s_add_i32 s4, s16, s8 |
| ; GFX89-NEXT: s_add_i32 s5, s17, s9 |
| ; GFX89-NEXT: s_add_i32 s6, s18, s10 |
| ; GFX89-NEXT: s_add_i32 s7, s19, s11 |
| ; GFX89-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX89-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX89-NEXT: v_mov_b32_e32 v2, s2 |
| ; GFX89-NEXT: v_mov_b32_e32 v3, s3 |
| ; GFX89-NEXT: v_mov_b32_e32 v4, s4 |
| ; GFX89-NEXT: v_mov_b32_e32 v5, s5 |
| ; GFX89-NEXT: v_mov_b32_e32 v6, s6 |
| ; GFX89-NEXT: v_mov_b32_e32 v7, s7 |
| ; GFX89-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <8 x i32>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <8 x i32>, ptr addrspace(6) %p0 |
| %r1 = load <8 x i32>, ptr addrspace(6) %gep1 |
| %r = add <8 x i32> %r0, %r1 |
| %r2 = bitcast <8 x i32> %r to <8 x float> |
| ret <8 x float> %r2 |
| } |
| |
| define amdgpu_vs <16 x float> @load_v16i32(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v16i32: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dwordx16 s[4:19], s[2:3], 0x20 |
| ; GFX67-NEXT: s_load_dwordx16 s[36:51], s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_add_i32 s0, s36, s4 |
| ; GFX67-NEXT: s_add_i32 s1, s37, s5 |
| ; GFX67-NEXT: s_add_i32 s2, s38, s6 |
| ; GFX67-NEXT: s_add_i32 s3, s39, s7 |
| ; GFX67-NEXT: s_add_i32 s4, s40, s8 |
| ; GFX67-NEXT: s_add_i32 s5, s41, s9 |
| ; GFX67-NEXT: s_add_i32 s6, s42, s10 |
| ; GFX67-NEXT: s_add_i32 s7, s43, s11 |
| ; GFX67-NEXT: s_add_i32 s8, s44, s12 |
| ; GFX67-NEXT: s_add_i32 s9, s45, s13 |
| ; GFX67-NEXT: s_add_i32 s10, s46, s14 |
| ; GFX67-NEXT: s_add_i32 s11, s47, s15 |
| ; GFX67-NEXT: s_add_i32 s12, s48, s16 |
| ; GFX67-NEXT: s_add_i32 s13, s49, s17 |
| ; GFX67-NEXT: s_add_i32 s14, s50, s18 |
| ; GFX67-NEXT: s_add_i32 s15, s51, s19 |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX67-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX67-NEXT: v_mov_b32_e32 v2, s2 |
| ; GFX67-NEXT: v_mov_b32_e32 v3, s3 |
| ; GFX67-NEXT: v_mov_b32_e32 v4, s4 |
| ; GFX67-NEXT: v_mov_b32_e32 v5, s5 |
| ; GFX67-NEXT: v_mov_b32_e32 v6, s6 |
| ; GFX67-NEXT: v_mov_b32_e32 v7, s7 |
| ; GFX67-NEXT: v_mov_b32_e32 v8, s8 |
| ; GFX67-NEXT: v_mov_b32_e32 v9, s9 |
| ; GFX67-NEXT: v_mov_b32_e32 v10, s10 |
| ; GFX67-NEXT: v_mov_b32_e32 v11, s11 |
| ; GFX67-NEXT: v_mov_b32_e32 v12, s12 |
| ; GFX67-NEXT: v_mov_b32_e32 v13, s13 |
| ; GFX67-NEXT: v_mov_b32_e32 v14, s14 |
| ; GFX67-NEXT: v_mov_b32_e32 v15, s15 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX89-LABEL: load_v16i32: |
| ; GFX89: ; %bb.0: |
| ; GFX89-NEXT: s_mov_b32 s2, s1 |
| ; GFX89-NEXT: s_mov_b32 s3, 0 |
| ; GFX89-NEXT: s_mov_b32 s1, s3 |
| ; GFX89-NEXT: s_load_dwordx16 s[4:19], s[2:3], 0x80 |
| ; GFX89-NEXT: s_load_dwordx16 s[36:51], s[0:1], 0x0 |
| ; GFX89-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX89-NEXT: s_add_i32 s0, s36, s4 |
| ; GFX89-NEXT: s_add_i32 s1, s37, s5 |
| ; GFX89-NEXT: s_add_i32 s2, s38, s6 |
| ; GFX89-NEXT: s_add_i32 s3, s39, s7 |
| ; GFX89-NEXT: s_add_i32 s4, s40, s8 |
| ; GFX89-NEXT: s_add_i32 s5, s41, s9 |
| ; GFX89-NEXT: s_add_i32 s6, s42, s10 |
| ; GFX89-NEXT: s_add_i32 s7, s43, s11 |
| ; GFX89-NEXT: s_add_i32 s8, s44, s12 |
| ; GFX89-NEXT: s_add_i32 s9, s45, s13 |
| ; GFX89-NEXT: s_add_i32 s10, s46, s14 |
| ; GFX89-NEXT: s_add_i32 s11, s47, s15 |
| ; GFX89-NEXT: s_add_i32 s12, s48, s16 |
| ; GFX89-NEXT: s_add_i32 s13, s49, s17 |
| ; GFX89-NEXT: s_add_i32 s14, s50, s18 |
| ; GFX89-NEXT: s_add_i32 s15, s51, s19 |
| ; GFX89-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX89-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX89-NEXT: v_mov_b32_e32 v2, s2 |
| ; GFX89-NEXT: v_mov_b32_e32 v3, s3 |
| ; GFX89-NEXT: v_mov_b32_e32 v4, s4 |
| ; GFX89-NEXT: v_mov_b32_e32 v5, s5 |
| ; GFX89-NEXT: v_mov_b32_e32 v6, s6 |
| ; GFX89-NEXT: v_mov_b32_e32 v7, s7 |
| ; GFX89-NEXT: v_mov_b32_e32 v8, s8 |
| ; GFX89-NEXT: v_mov_b32_e32 v9, s9 |
| ; GFX89-NEXT: v_mov_b32_e32 v10, s10 |
| ; GFX89-NEXT: v_mov_b32_e32 v11, s11 |
| ; GFX89-NEXT: v_mov_b32_e32 v12, s12 |
| ; GFX89-NEXT: v_mov_b32_e32 v13, s13 |
| ; GFX89-NEXT: v_mov_b32_e32 v14, s14 |
| ; GFX89-NEXT: v_mov_b32_e32 v15, s15 |
| ; GFX89-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <16 x i32>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <16 x i32>, ptr addrspace(6) %p0 |
| %r1 = load <16 x i32>, ptr addrspace(6) %gep1 |
| %r = add <16 x i32> %r0, %r1 |
| %r2 = bitcast <16 x i32> %r to <16 x float> |
| ret <16 x float> %r2 |
| } |
| |
| define amdgpu_vs float @load_f32(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_f32: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dword s2, s[2:3], 0x2 |
| ; GFX67-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s2 |
| ; GFX67-NEXT: v_add_f32_e32 v0, s0, v0 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_f32: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dword s2, s[2:3], 0x8 |
| ; GFX8-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s2 |
| ; GFX8-NEXT: v_add_f32_e32 v0, s0, v0 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_f32: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dword s4, s[2:3], 0x8 |
| ; GFX9-NEXT: s_load_dword s5, s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX9-NEXT: v_add_f32_e32 v0, s5, v0 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds float, ptr addrspace(6) %p1, i32 2 |
| %r0 = load float, ptr addrspace(6) %p0 |
| %r1 = load float, ptr addrspace(6) %gep1 |
| %r = fadd float %r0, %r1 |
| ret float %r |
| } |
| |
| define amdgpu_vs <2 x float> @load_v2f32(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v2f32: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x4 |
| ; GFX67-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s2 |
| ; GFX67-NEXT: v_mov_b32_e32 v1, s3 |
| ; GFX67-NEXT: v_add_f32_e32 v0, s0, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v1, s1, v1 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v2f32: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x10 |
| ; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s2 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s3 |
| ; GFX8-NEXT: v_add_f32_e32 v0, s0, v0 |
| ; GFX8-NEXT: v_add_f32_e32 v1, s1, v1 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v2f32: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x10 |
| ; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX9-NEXT: v_add_f32_e32 v0, s6, v0 |
| ; GFX9-NEXT: v_add_f32_e32 v1, s7, v1 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <2 x float>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <2 x float>, ptr addrspace(6) %p0 |
| %r1 = load <2 x float>, ptr addrspace(6) %gep1 |
| %r = fadd <2 x float> %r0, %r1 |
| ret <2 x float> %r |
| } |
| |
| define amdgpu_vs <4 x float> @load_v4f32(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v4f32: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x8 |
| ; GFX67-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX67-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX67-NEXT: v_mov_b32_e32 v2, s6 |
| ; GFX67-NEXT: v_mov_b32_e32 v3, s7 |
| ; GFX67-NEXT: v_add_f32_e32 v0, s0, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v1, s1, v1 |
| ; GFX67-NEXT: v_add_f32_e32 v2, s2, v2 |
| ; GFX67-NEXT: v_add_f32_e32 v3, s3, v3 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v4f32: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x20 |
| ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX8-NEXT: v_mov_b32_e32 v2, s6 |
| ; GFX8-NEXT: v_mov_b32_e32 v3, s7 |
| ; GFX8-NEXT: v_add_f32_e32 v0, s0, v0 |
| ; GFX8-NEXT: v_add_f32_e32 v1, s1, v1 |
| ; GFX8-NEXT: v_add_f32_e32 v2, s2, v2 |
| ; GFX8-NEXT: v_add_f32_e32 v3, s3, v3 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v4f32: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x20 |
| ; GFX9-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX9-NEXT: v_mov_b32_e32 v2, s6 |
| ; GFX9-NEXT: v_mov_b32_e32 v3, s7 |
| ; GFX9-NEXT: v_add_f32_e32 v0, s8, v0 |
| ; GFX9-NEXT: v_add_f32_e32 v1, s9, v1 |
| ; GFX9-NEXT: v_add_f32_e32 v2, s10, v2 |
| ; GFX9-NEXT: v_add_f32_e32 v3, s11, v3 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <4 x float>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <4 x float>, ptr addrspace(6) %p0 |
| %r1 = load <4 x float>, ptr addrspace(6) %gep1 |
| %r = fadd <4 x float> %r0, %r1 |
| ret <4 x float> %r |
| } |
| |
| define amdgpu_vs <8 x float> @load_v8f32(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v8f32: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x10 |
| ; GFX67-NEXT: s_load_dwordx8 s[12:19], s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX67-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX67-NEXT: v_mov_b32_e32 v2, s6 |
| ; GFX67-NEXT: v_mov_b32_e32 v3, s7 |
| ; GFX67-NEXT: v_mov_b32_e32 v4, s8 |
| ; GFX67-NEXT: v_mov_b32_e32 v5, s9 |
| ; GFX67-NEXT: v_mov_b32_e32 v6, s10 |
| ; GFX67-NEXT: v_mov_b32_e32 v7, s11 |
| ; GFX67-NEXT: v_add_f32_e32 v0, s12, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v1, s13, v1 |
| ; GFX67-NEXT: v_add_f32_e32 v2, s14, v2 |
| ; GFX67-NEXT: v_add_f32_e32 v3, s15, v3 |
| ; GFX67-NEXT: v_add_f32_e32 v4, s16, v4 |
| ; GFX67-NEXT: v_add_f32_e32 v5, s17, v5 |
| ; GFX67-NEXT: v_add_f32_e32 v6, s18, v6 |
| ; GFX67-NEXT: v_add_f32_e32 v7, s19, v7 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX89-LABEL: load_v8f32: |
| ; GFX89: ; %bb.0: |
| ; GFX89-NEXT: s_mov_b32 s2, s1 |
| ; GFX89-NEXT: s_mov_b32 s3, 0 |
| ; GFX89-NEXT: s_mov_b32 s1, s3 |
| ; GFX89-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x40 |
| ; GFX89-NEXT: s_load_dwordx8 s[12:19], s[0:1], 0x0 |
| ; GFX89-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX89-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX89-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX89-NEXT: v_mov_b32_e32 v2, s6 |
| ; GFX89-NEXT: v_mov_b32_e32 v3, s7 |
| ; GFX89-NEXT: v_mov_b32_e32 v4, s8 |
| ; GFX89-NEXT: v_mov_b32_e32 v5, s9 |
| ; GFX89-NEXT: v_mov_b32_e32 v6, s10 |
| ; GFX89-NEXT: v_mov_b32_e32 v7, s11 |
| ; GFX89-NEXT: v_add_f32_e32 v0, s12, v0 |
| ; GFX89-NEXT: v_add_f32_e32 v1, s13, v1 |
| ; GFX89-NEXT: v_add_f32_e32 v2, s14, v2 |
| ; GFX89-NEXT: v_add_f32_e32 v3, s15, v3 |
| ; GFX89-NEXT: v_add_f32_e32 v4, s16, v4 |
| ; GFX89-NEXT: v_add_f32_e32 v5, s17, v5 |
| ; GFX89-NEXT: v_add_f32_e32 v6, s18, v6 |
| ; GFX89-NEXT: v_add_f32_e32 v7, s19, v7 |
| ; GFX89-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <8 x float>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <8 x float>, ptr addrspace(6) %p0 |
| %r1 = load <8 x float>, ptr addrspace(6) %gep1 |
| %r = fadd <8 x float> %r0, %r1 |
| ret <8 x float> %r |
| } |
| |
| define amdgpu_vs <16 x float> @load_v16f32(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v16f32: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dwordx16 s[16:31], s[2:3], 0x20 |
| ; GFX67-NEXT: s_load_dwordx16 s[0:15], s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s16 |
| ; GFX67-NEXT: v_mov_b32_e32 v1, s17 |
| ; GFX67-NEXT: v_mov_b32_e32 v2, s18 |
| ; GFX67-NEXT: v_mov_b32_e32 v3, s19 |
| ; GFX67-NEXT: v_mov_b32_e32 v4, s20 |
| ; GFX67-NEXT: v_mov_b32_e32 v5, s21 |
| ; GFX67-NEXT: v_mov_b32_e32 v6, s22 |
| ; GFX67-NEXT: v_mov_b32_e32 v7, s23 |
| ; GFX67-NEXT: v_mov_b32_e32 v8, s24 |
| ; GFX67-NEXT: v_mov_b32_e32 v9, s25 |
| ; GFX67-NEXT: v_mov_b32_e32 v10, s26 |
| ; GFX67-NEXT: v_mov_b32_e32 v11, s27 |
| ; GFX67-NEXT: v_mov_b32_e32 v12, s28 |
| ; GFX67-NEXT: v_mov_b32_e32 v13, s29 |
| ; GFX67-NEXT: v_mov_b32_e32 v14, s30 |
| ; GFX67-NEXT: v_mov_b32_e32 v15, s31 |
| ; GFX67-NEXT: v_add_f32_e32 v0, s0, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v1, s1, v1 |
| ; GFX67-NEXT: v_add_f32_e32 v2, s2, v2 |
| ; GFX67-NEXT: v_add_f32_e32 v3, s3, v3 |
| ; GFX67-NEXT: v_add_f32_e32 v4, s4, v4 |
| ; GFX67-NEXT: v_add_f32_e32 v5, s5, v5 |
| ; GFX67-NEXT: v_add_f32_e32 v6, s6, v6 |
| ; GFX67-NEXT: v_add_f32_e32 v7, s7, v7 |
| ; GFX67-NEXT: v_add_f32_e32 v8, s8, v8 |
| ; GFX67-NEXT: v_add_f32_e32 v9, s9, v9 |
| ; GFX67-NEXT: v_add_f32_e32 v10, s10, v10 |
| ; GFX67-NEXT: v_add_f32_e32 v11, s11, v11 |
| ; GFX67-NEXT: v_add_f32_e32 v12, s12, v12 |
| ; GFX67-NEXT: v_add_f32_e32 v13, s13, v13 |
| ; GFX67-NEXT: v_add_f32_e32 v14, s14, v14 |
| ; GFX67-NEXT: v_add_f32_e32 v15, s15, v15 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v16f32: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dwordx16 s[16:31], s[2:3], 0x80 |
| ; GFX8-NEXT: s_load_dwordx16 s[0:15], s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s16 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s17 |
| ; GFX8-NEXT: v_mov_b32_e32 v2, s18 |
| ; GFX8-NEXT: v_mov_b32_e32 v3, s19 |
| ; GFX8-NEXT: v_mov_b32_e32 v4, s20 |
| ; GFX8-NEXT: v_mov_b32_e32 v5, s21 |
| ; GFX8-NEXT: v_mov_b32_e32 v6, s22 |
| ; GFX8-NEXT: v_mov_b32_e32 v7, s23 |
| ; GFX8-NEXT: v_mov_b32_e32 v8, s24 |
| ; GFX8-NEXT: v_mov_b32_e32 v9, s25 |
| ; GFX8-NEXT: v_mov_b32_e32 v10, s26 |
| ; GFX8-NEXT: v_mov_b32_e32 v11, s27 |
| ; GFX8-NEXT: v_mov_b32_e32 v12, s28 |
| ; GFX8-NEXT: v_mov_b32_e32 v13, s29 |
| ; GFX8-NEXT: v_mov_b32_e32 v14, s30 |
| ; GFX8-NEXT: v_mov_b32_e32 v15, s31 |
| ; GFX8-NEXT: v_add_f32_e32 v0, s0, v0 |
| ; GFX8-NEXT: v_add_f32_e32 v1, s1, v1 |
| ; GFX8-NEXT: v_add_f32_e32 v2, s2, v2 |
| ; GFX8-NEXT: v_add_f32_e32 v3, s3, v3 |
| ; GFX8-NEXT: v_add_f32_e32 v4, s4, v4 |
| ; GFX8-NEXT: v_add_f32_e32 v5, s5, v5 |
| ; GFX8-NEXT: v_add_f32_e32 v6, s6, v6 |
| ; GFX8-NEXT: v_add_f32_e32 v7, s7, v7 |
| ; GFX8-NEXT: v_add_f32_e32 v8, s8, v8 |
| ; GFX8-NEXT: v_add_f32_e32 v9, s9, v9 |
| ; GFX8-NEXT: v_add_f32_e32 v10, s10, v10 |
| ; GFX8-NEXT: v_add_f32_e32 v11, s11, v11 |
| ; GFX8-NEXT: v_add_f32_e32 v12, s12, v12 |
| ; GFX8-NEXT: v_add_f32_e32 v13, s13, v13 |
| ; GFX8-NEXT: v_add_f32_e32 v14, s14, v14 |
| ; GFX8-NEXT: v_add_f32_e32 v15, s15, v15 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v16f32: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dwordx16 s[36:51], s[2:3], 0x80 |
| ; GFX9-NEXT: s_load_dwordx16 s[4:19], s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s36 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s37 |
| ; GFX9-NEXT: v_mov_b32_e32 v2, s38 |
| ; GFX9-NEXT: v_mov_b32_e32 v3, s39 |
| ; GFX9-NEXT: v_mov_b32_e32 v4, s40 |
| ; GFX9-NEXT: v_mov_b32_e32 v5, s41 |
| ; GFX9-NEXT: v_mov_b32_e32 v6, s42 |
| ; GFX9-NEXT: v_mov_b32_e32 v7, s43 |
| ; GFX9-NEXT: v_mov_b32_e32 v8, s44 |
| ; GFX9-NEXT: v_mov_b32_e32 v9, s45 |
| ; GFX9-NEXT: v_mov_b32_e32 v10, s46 |
| ; GFX9-NEXT: v_mov_b32_e32 v11, s47 |
| ; GFX9-NEXT: v_mov_b32_e32 v12, s48 |
| ; GFX9-NEXT: v_mov_b32_e32 v13, s49 |
| ; GFX9-NEXT: v_mov_b32_e32 v14, s50 |
| ; GFX9-NEXT: v_mov_b32_e32 v15, s51 |
| ; GFX9-NEXT: v_add_f32_e32 v0, s4, v0 |
| ; GFX9-NEXT: v_add_f32_e32 v1, s5, v1 |
| ; GFX9-NEXT: v_add_f32_e32 v2, s6, v2 |
| ; GFX9-NEXT: v_add_f32_e32 v3, s7, v3 |
| ; GFX9-NEXT: v_add_f32_e32 v4, s8, v4 |
| ; GFX9-NEXT: v_add_f32_e32 v5, s9, v5 |
| ; GFX9-NEXT: v_add_f32_e32 v6, s10, v6 |
| ; GFX9-NEXT: v_add_f32_e32 v7, s11, v7 |
| ; GFX9-NEXT: v_add_f32_e32 v8, s12, v8 |
| ; GFX9-NEXT: v_add_f32_e32 v9, s13, v9 |
| ; GFX9-NEXT: v_add_f32_e32 v10, s14, v10 |
| ; GFX9-NEXT: v_add_f32_e32 v11, s15, v11 |
| ; GFX9-NEXT: v_add_f32_e32 v12, s16, v12 |
| ; GFX9-NEXT: v_add_f32_e32 v13, s17, v13 |
| ; GFX9-NEXT: v_add_f32_e32 v14, s18, v14 |
| ; GFX9-NEXT: v_add_f32_e32 v15, s19, v15 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <16 x float>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <16 x float>, ptr addrspace(6) %p0 |
| %r1 = load <16 x float>, ptr addrspace(6) %gep1 |
| %r = fadd <16 x float> %r0, %r1 |
| ret <16 x float> %r |
| } |
| |
| define amdgpu_vs i32 @load_i32_hi0(ptr addrspace(6) inreg %p) #1 { |
| ; GFX67-LABEL: load_i32_hi0: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s1, 0 |
| ; GFX67-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX89-LABEL: load_i32_hi0: |
| ; GFX89: ; %bb.0: |
| ; GFX89-NEXT: s_mov_b32 s1, 0 |
| ; GFX89-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX89-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX89-NEXT: ; return to shader part epilog |
| %r0 = load i32, ptr addrspace(6) %p |
| ret i32 %r0 |
| } |
| |
| define amdgpu_vs i32 @load_i32_hi1(ptr addrspace(6) inreg %p) #2 { |
| ; GFX67-LABEL: load_i32_hi1: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s1, 1 |
| ; GFX67-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX89-LABEL: load_i32_hi1: |
| ; GFX89: ; %bb.0: |
| ; GFX89-NEXT: s_mov_b32 s1, 1 |
| ; GFX89-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX89-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX89-NEXT: ; return to shader part epilog |
| %r0 = load i32, ptr addrspace(6) %p |
| ret i32 %r0 |
| } |
| |
| define amdgpu_vs i32 @load_i32_hiffff8000(ptr addrspace(6) inreg %p) #3 { |
| ; GFX67-LABEL: load_i32_hiffff8000: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_movk_i32 s1, 0x8000 |
| ; GFX67-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX89-LABEL: load_i32_hiffff8000: |
| ; GFX89: ; %bb.0: |
| ; GFX89-NEXT: s_movk_i32 s1, 0x8000 |
| ; GFX89-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX89-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX89-NEXT: ; return to shader part epilog |
| %r0 = load i32, ptr addrspace(6) %p |
| ret i32 %r0 |
| } |
| |
| define amdgpu_vs i32 @load_i32_hifffffff0(ptr addrspace(6) inreg %p) #4 { |
| ; GFX67-LABEL: load_i32_hifffffff0: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s1, -16 |
| ; GFX67-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX89-LABEL: load_i32_hifffffff0: |
| ; GFX89: ; %bb.0: |
| ; GFX89-NEXT: s_mov_b32 s1, -16 |
| ; GFX89-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX89-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX89-NEXT: ; return to shader part epilog |
| %r0 = load i32, ptr addrspace(6) %p |
| ret i32 %r0 |
| } |
| |
| define amdgpu_ps <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @load_sampler(ptr addrspace(6) inreg noalias dereferenceable(18446744073709551615), ptr addrspace(6) inreg noalias dereferenceable(18446744073709551615), ptr addrspace(6) inreg noalias dereferenceable(18446744073709551615), ptr addrspace(6) inreg noalias dereferenceable(18446744073709551615), float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, i32, i32, float, i32) #5 { |
| ; GFX6-LABEL: load_sampler: |
| ; GFX6: ; %bb.0: ; %main_body |
| ; GFX6-NEXT: s_mov_b64 s[6:7], exec |
| ; GFX6-NEXT: s_wqm_b64 exec, exec |
| ; GFX6-NEXT: s_mov_b32 m0, s5 |
| ; GFX6-NEXT: v_interp_mov_f32 v0, p0, attr0.x |
| ; GFX6-NEXT: v_lshlrev_b32_e32 v0, 6, v0 |
| ; GFX6-NEXT: v_add_i32_e32 v0, vcc, s1, v0 |
| ; GFX6-NEXT: v_readfirstlane_b32 s0, v0 |
| ; GFX6-NEXT: s_mov_b32 s1, 0 |
| ; GFX6-NEXT: s_nop 2 |
| ; GFX6-NEXT: s_load_dwordx8 s[8:15], s[0:1], 0x0 |
| ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xc |
| ; GFX6-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX6-NEXT: s_and_b64 exec, exec, s[6:7] |
| ; GFX6-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX6-NEXT: image_sample v[0:3], v0, s[8:15], s[0:3] dmask:0xf |
| ; GFX6-NEXT: s_waitcnt vmcnt(0) |
| ; GFX6-NEXT: ; return to shader part epilog |
| ; |
| ; GFX7-LABEL: load_sampler: |
| ; GFX7: ; %bb.0: ; %main_body |
| ; GFX7-NEXT: s_mov_b64 s[6:7], exec |
| ; GFX7-NEXT: s_wqm_b64 exec, exec |
| ; GFX7-NEXT: s_mov_b32 m0, s5 |
| ; GFX7-NEXT: v_interp_mov_f32 v0, p0, attr0.x |
| ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 6, v0 |
| ; GFX7-NEXT: v_add_i32_e32 v0, vcc, s1, v0 |
| ; GFX7-NEXT: v_readfirstlane_b32 s0, v0 |
| ; GFX7-NEXT: s_mov_b32 s1, 0 |
| ; GFX7-NEXT: s_load_dwordx8 s[8:15], s[0:1], 0x0 |
| ; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xc |
| ; GFX7-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX7-NEXT: s_and_b64 exec, exec, s[6:7] |
| ; GFX7-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX7-NEXT: image_sample v[0:3], v0, s[8:15], s[0:3] dmask:0xf |
| ; GFX7-NEXT: s_waitcnt vmcnt(0) |
| ; GFX7-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_sampler: |
| ; GFX8: ; %bb.0: ; %main_body |
| ; GFX8-NEXT: s_mov_b64 s[6:7], exec |
| ; GFX8-NEXT: s_wqm_b64 exec, exec |
| ; GFX8-NEXT: s_mov_b32 m0, s5 |
| ; GFX8-NEXT: v_interp_mov_f32_e32 v0, p0, attr0.x |
| ; GFX8-NEXT: v_lshlrev_b32_e32 v0, 6, v0 |
| ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s1, v0 |
| ; GFX8-NEXT: v_readfirstlane_b32 s0, v0 |
| ; GFX8-NEXT: s_mov_b32 s1, 0 |
| ; GFX8-NEXT: s_load_dwordx8 s[8:15], s[0:1], 0x0 |
| ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x30 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX8-NEXT: s_and_b64 exec, exec, s[6:7] |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: image_sample v[0:3], v0, s[8:15], s[0:3] dmask:0xf |
| ; GFX8-NEXT: s_waitcnt vmcnt(0) |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_sampler: |
| ; GFX9: ; %bb.0: ; %main_body |
| ; GFX9-NEXT: s_mov_b64 s[6:7], exec |
| ; GFX9-NEXT: s_wqm_b64 exec, exec |
| ; GFX9-NEXT: s_mov_b32 m0, s5 |
| ; GFX9-NEXT: s_mov_b32 s17, 0 |
| ; GFX9-NEXT: v_interp_mov_f32_e32 v0, p0, attr0.x |
| ; GFX9-NEXT: v_lshl_add_u32 v0, v0, 6, s1 |
| ; GFX9-NEXT: v_readfirstlane_b32 s16, v0 |
| ; GFX9-NEXT: s_load_dwordx8 s[8:15], s[16:17], 0x0 |
| ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[16:17], 0x30 |
| ; GFX9-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX9-NEXT: s_and_b64 exec, exec, s[6:7] |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: image_sample v[0:3], v0, s[8:15], s[0:3] dmask:0xf |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) |
| ; GFX9-NEXT: ; return to shader part epilog |
| main_body: |
| %22 = call nsz float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %5) #8 |
| %23 = bitcast float %22 to i32 |
| %24 = shl i32 %23, 1 |
| %25 = getelementptr inbounds [0 x <8 x i32>], ptr addrspace(6) %1, i32 0, i32 %24, !amdgpu.uniform !0 |
| %26 = load <8 x i32>, ptr addrspace(6) %25, align 32, !invariant.load !0 |
| %27 = shl i32 %23, 2 |
| %28 = getelementptr [0 x <4 x i32>], ptr addrspace(6) %1, i32 0, i32 %27, !amdgpu.uniform !0 |
| %29 = getelementptr inbounds [0 x <4 x i32>], ptr addrspace(6) %28, i32 0, i32 3, !amdgpu.uniform !0 |
| %30 = load <4 x i32>, ptr addrspace(6) %29, align 16, !invariant.load !0 |
| %31 = call nsz <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32 15, float 0.0, <8 x i32> %26, <4 x i32> %30, i1 0, i32 0, i32 0) #8 |
| %32 = extractelement <4 x float> %31, i32 0 |
| %33 = extractelement <4 x float> %31, i32 1 |
| %34 = extractelement <4 x float> %31, i32 2 |
| %35 = extractelement <4 x float> %31, i32 3 |
| %36 = bitcast float %4 to i32 |
| %37 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> poison, i32 %36, 4 |
| %38 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %37, float %32, 5 |
| %39 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %38, float %33, 6 |
| %40 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %39, float %34, 7 |
| %41 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %40, float %35, 8 |
| %42 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %41, float %20, 19 |
| ret <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %42 |
| } |
| |
| define amdgpu_ps <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @load_sampler_nouniform(ptr addrspace(6) inreg noalias dereferenceable(18446744073709551615), ptr addrspace(6) inreg noalias dereferenceable(18446744073709551615), ptr addrspace(6) inreg noalias dereferenceable(18446744073709551615), ptr addrspace(6) inreg noalias dereferenceable(18446744073709551615), float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, i32, i32, float, i32) #5 { |
| ; GFX6-LABEL: load_sampler_nouniform: |
| ; GFX6: ; %bb.0: ; %main_body |
| ; GFX6-NEXT: s_mov_b64 s[6:7], exec |
| ; GFX6-NEXT: s_wqm_b64 exec, exec |
| ; GFX6-NEXT: s_mov_b32 m0, s5 |
| ; GFX6-NEXT: v_interp_mov_f32 v0, p0, attr0.x |
| ; GFX6-NEXT: v_lshlrev_b32_e32 v0, 6, v0 |
| ; GFX6-NEXT: v_add_i32_e32 v0, vcc, s1, v0 |
| ; GFX6-NEXT: v_readfirstlane_b32 s0, v0 |
| ; GFX6-NEXT: s_mov_b32 s1, 0 |
| ; GFX6-NEXT: s_nop 2 |
| ; GFX6-NEXT: s_load_dwordx8 s[8:15], s[0:1], 0x0 |
| ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xc |
| ; GFX6-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX6-NEXT: s_and_b64 exec, exec, s[6:7] |
| ; GFX6-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX6-NEXT: image_sample v[0:3], v0, s[8:15], s[0:3] dmask:0xf |
| ; GFX6-NEXT: s_waitcnt vmcnt(0) |
| ; GFX6-NEXT: ; return to shader part epilog |
| ; |
| ; GFX7-LABEL: load_sampler_nouniform: |
| ; GFX7: ; %bb.0: ; %main_body |
| ; GFX7-NEXT: s_mov_b64 s[6:7], exec |
| ; GFX7-NEXT: s_wqm_b64 exec, exec |
| ; GFX7-NEXT: s_mov_b32 m0, s5 |
| ; GFX7-NEXT: v_interp_mov_f32 v0, p0, attr0.x |
| ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 6, v0 |
| ; GFX7-NEXT: v_add_i32_e32 v0, vcc, s1, v0 |
| ; GFX7-NEXT: v_readfirstlane_b32 s0, v0 |
| ; GFX7-NEXT: s_mov_b32 s1, 0 |
| ; GFX7-NEXT: s_load_dwordx8 s[8:15], s[0:1], 0x0 |
| ; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xc |
| ; GFX7-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX7-NEXT: s_and_b64 exec, exec, s[6:7] |
| ; GFX7-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX7-NEXT: image_sample v[0:3], v0, s[8:15], s[0:3] dmask:0xf |
| ; GFX7-NEXT: s_waitcnt vmcnt(0) |
| ; GFX7-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_sampler_nouniform: |
| ; GFX8: ; %bb.0: ; %main_body |
| ; GFX8-NEXT: s_mov_b64 s[6:7], exec |
| ; GFX8-NEXT: s_wqm_b64 exec, exec |
| ; GFX8-NEXT: s_mov_b32 m0, s5 |
| ; GFX8-NEXT: v_interp_mov_f32_e32 v0, p0, attr0.x |
| ; GFX8-NEXT: v_lshlrev_b32_e32 v0, 6, v0 |
| ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s1, v0 |
| ; GFX8-NEXT: v_readfirstlane_b32 s0, v0 |
| ; GFX8-NEXT: s_mov_b32 s1, 0 |
| ; GFX8-NEXT: s_load_dwordx8 s[8:15], s[0:1], 0x0 |
| ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x30 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX8-NEXT: s_and_b64 exec, exec, s[6:7] |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: image_sample v[0:3], v0, s[8:15], s[0:3] dmask:0xf |
| ; GFX8-NEXT: s_waitcnt vmcnt(0) |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_sampler_nouniform: |
| ; GFX9: ; %bb.0: ; %main_body |
| ; GFX9-NEXT: s_mov_b64 s[6:7], exec |
| ; GFX9-NEXT: s_wqm_b64 exec, exec |
| ; GFX9-NEXT: s_mov_b32 m0, s5 |
| ; GFX9-NEXT: s_mov_b32 s17, 0 |
| ; GFX9-NEXT: v_interp_mov_f32_e32 v0, p0, attr0.x |
| ; GFX9-NEXT: v_lshl_add_u32 v0, v0, 6, s1 |
| ; GFX9-NEXT: v_readfirstlane_b32 s16, v0 |
| ; GFX9-NEXT: s_load_dwordx8 s[8:15], s[16:17], 0x0 |
| ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[16:17], 0x30 |
| ; GFX9-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX9-NEXT: s_and_b64 exec, exec, s[6:7] |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: image_sample v[0:3], v0, s[8:15], s[0:3] dmask:0xf |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) |
| ; GFX9-NEXT: ; return to shader part epilog |
| main_body: |
| %22 = call nsz float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %5) #8 |
| %23 = bitcast float %22 to i32 |
| %24 = shl i32 %23, 1 |
| %25 = getelementptr inbounds [0 x <8 x i32>], ptr addrspace(6) %1, i32 0, i32 %24 |
| %26 = load <8 x i32>, ptr addrspace(6) %25, align 32, !invariant.load !0 |
| %27 = shl i32 %23, 2 |
| %28 = getelementptr [0 x <4 x i32>], ptr addrspace(6) %1, i32 0, i32 %27 |
| %29 = getelementptr inbounds [0 x <4 x i32>], ptr addrspace(6) %28, i32 0, i32 3 |
| %30 = load <4 x i32>, ptr addrspace(6) %29, align 16, !invariant.load !0 |
| %31 = call nsz <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32 15, float 0.0, <8 x i32> %26, <4 x i32> %30, i1 0, i32 0, i32 0) #8 |
| %32 = extractelement <4 x float> %31, i32 0 |
| %33 = extractelement <4 x float> %31, i32 1 |
| %34 = extractelement <4 x float> %31, i32 2 |
| %35 = extractelement <4 x float> %31, i32 3 |
| %36 = bitcast float %4 to i32 |
| %37 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> poison, i32 %36, 4 |
| %38 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %37, float %32, 5 |
| %39 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %38, float %33, 6 |
| %40 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %39, float %34, 7 |
| %41 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %40, float %35, 8 |
| %42 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %41, float %20, 19 |
| ret <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %42 |
| } |
| |
| define amdgpu_vs float @load_addr_no_fold(ptr addrspace(6) inreg noalias %p0) #0 { |
| ; GFX67-LABEL: load_addr_no_fold: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_add_i32 s0, s0, 4 |
| ; GFX67-NEXT: s_mov_b32 s1, 0 |
| ; GFX67-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX89-LABEL: load_addr_no_fold: |
| ; GFX89: ; %bb.0: |
| ; GFX89-NEXT: s_add_i32 s0, s0, 4 |
| ; GFX89-NEXT: s_mov_b32 s1, 0 |
| ; GFX89-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX89-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX89-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX89-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr i32, ptr addrspace(6) %p0, i32 1 |
| %r1 = load i32, ptr addrspace(6) %gep1 |
| %r2 = bitcast i32 %r1 to float |
| ret float %r2 |
| } |
| |
| define amdgpu_vs float @vgpr_arg_src(ptr addrspace(6) %arg) { |
| ; GFX6-LABEL: vgpr_arg_src: |
| ; GFX6: ; %bb.0: ; %main_body |
| ; GFX6-NEXT: v_readfirstlane_b32 s0, v0 |
| ; GFX6-NEXT: s_mov_b32 s1, 0 |
| ; GFX6-NEXT: s_nop 2 |
| ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0 |
| ; GFX6-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX6-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen |
| ; GFX6-NEXT: s_waitcnt vmcnt(0) |
| ; GFX6-NEXT: ; return to shader part epilog |
| ; |
| ; GFX7-LABEL: vgpr_arg_src: |
| ; GFX7: ; %bb.0: ; %main_body |
| ; GFX7-NEXT: v_readfirstlane_b32 s0, v0 |
| ; GFX7-NEXT: s_mov_b32 s1, 0 |
| ; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0 |
| ; GFX7-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX7-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen |
| ; GFX7-NEXT: s_waitcnt vmcnt(0) |
| ; GFX7-NEXT: ; return to shader part epilog |
| ; |
| ; GFX89-LABEL: vgpr_arg_src: |
| ; GFX89: ; %bb.0: ; %main_body |
| ; GFX89-NEXT: v_readfirstlane_b32 s0, v0 |
| ; GFX89-NEXT: s_mov_b32 s1, 0 |
| ; GFX89-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0 |
| ; GFX89-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX89-NEXT: s_nop 1 |
| ; GFX89-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen |
| ; GFX89-NEXT: s_waitcnt vmcnt(0) |
| ; GFX89-NEXT: ; return to shader part epilog |
| main_body: |
| %tmp9 = load ptr addrspace(8), ptr addrspace(6) %arg |
| %tmp10 = call nsz float @llvm.amdgcn.struct.ptr.buffer.load.format.f32(ptr addrspace(8) %tmp9, i32 poison, i32 0, i32 0, i32 0) #1 |
| ret float %tmp10 |
| } |
| |
| ; define amdgpu_vs float @load_i8(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; %gep1 = getelementptr inbounds i8, ptr addrspace(6) %p1, i32 2 |
| ; %r0 = load i8, ptr addrspace(6) %p0 |
| ; %r1 = load i8, ptr addrspace(6) %gep1 |
| ; %r = add i8 %r0, %r1 |
| ; %ext = zext i8 %r to i32 |
| ; %r2 = bitcast i32 %ext to float |
| ; ret float %r2 |
| ; } |
| |
| ; define amdgpu_vs float @zextload_i8(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; %gep1 = getelementptr inbounds i8, ptr addrspace(6) %p1, i32 2 |
| ; %r0 = load i8, ptr addrspace(6) %p0 |
| ; %r1 = load i8, ptr addrspace(6) %gep1 |
| ; %zext.r0 = zext i8 %r0 to i32 |
| ; %zext.r1 = zext i8 %r1 to i32 |
| ; %r = add i32 %zext.r0, %zext.r1 |
| ; %r2 = bitcast i32 %r to float |
| ; ret float %r2 |
| ; } |
| |
| ; define amdgpu_vs float @sextload_i8(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; %gep1 = getelementptr inbounds i8, ptr addrspace(6) %p1, i32 2 |
| ; %r0 = load i8, ptr addrspace(6) %p0 |
| ; %r1 = load i8, ptr addrspace(6) %gep1 |
| ; %zext.r0 = sext i8 %r0 to i32 |
| ; %zext.r1 = sext i8 %r1 to i32 |
| ; %r = add i32 %zext.r0, %zext.r1 |
| ; %r2 = bitcast i32 %r to float |
| ; ret float %r2 |
| ; } |
| |
| ; define amdgpu_vs half @load_i16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; %gep1 = getelementptr inbounds i16, ptr addrspace(6) %p1, i32 2 |
| ; %r0 = load i16, ptr addrspace(6) %p0 |
| ; %r1 = load i16, ptr addrspace(6) %gep1 |
| ; %r = add i16 %r0, %r1 |
| ; %r2 = bitcast i16 %r to half |
| ; ret half %r2 |
| ; } |
| |
| ; define amdgpu_vs half @load_i16_align4(ptr addrspace(6) inreg %ptr) #0 { |
| ; %gep1 = getelementptr inbounds i16, ptr addrspace(6) %ptr, i32 2 |
| ; %ld = load i16, ptr addrspace(6) %gep1 |
| ; %cast = bitcast i16 %ld to half |
| ; ret half %cast |
| ; } |
| |
| define amdgpu_vs <2 x half> @load_v2i16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v2i16: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dword s2, s[2:3], 0x2 |
| ; GFX67-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_lshr_b32 s1, s2, 16 |
| ; GFX67-NEXT: s_lshr_b32 s3, s0, 16 |
| ; GFX67-NEXT: s_add_i32 s0, s0, s2 |
| ; GFX67-NEXT: s_add_i32 s3, s3, s1 |
| ; GFX67-NEXT: s_and_b32 s0, s0, 0xffff |
| ; GFX67-NEXT: s_lshl_b32 s1, s3, 16 |
| ; GFX67-NEXT: s_or_b32 s0, s0, s1 |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v2i16: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dword s2, s[2:3], 0x8 |
| ; GFX8-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: s_lshr_b32 s1, s2, 16 |
| ; GFX8-NEXT: s_lshr_b32 s3, s0, 16 |
| ; GFX8-NEXT: s_add_i32 s3, s3, s1 |
| ; GFX8-NEXT: s_add_i32 s0, s0, s2 |
| ; GFX8-NEXT: s_and_b32 s0, s0, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s1, s3, 16 |
| ; GFX8-NEXT: s_or_b32 s0, s0, s1 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v2i16: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dword s4, s[2:3], 0x8 |
| ; GFX9-NEXT: s_load_dword s5, s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX9-NEXT: v_pk_add_u16 v0, s5, v0 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <2 x i16>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <2 x i16>, ptr addrspace(6) %p0 |
| %r1 = load <2 x i16>, ptr addrspace(6) %gep1 |
| %r = add <2 x i16> %r0, %r1 |
| %r2 = bitcast <2 x i16> %r to <2 x half> |
| ret <2 x half> %r2 |
| } |
| |
| define amdgpu_vs <3 x half> @load_v3i16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v3i16: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x4 |
| ; GFX67-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_lshr_b32 s4, s2, 16 |
| ; GFX67-NEXT: s_lshr_b32 s5, s0, 16 |
| ; GFX67-NEXT: s_add_i32 s5, s5, s4 |
| ; GFX67-NEXT: s_add_i32 s0, s0, s2 |
| ; GFX67-NEXT: s_add_i32 s1, s1, s3 |
| ; GFX67-NEXT: s_lshl_b32 s3, s5, 16 |
| ; GFX67-NEXT: s_and_b32 s0, s0, 0xffff |
| ; GFX67-NEXT: s_and_b32 s1, s1, 0xffff |
| ; GFX67-NEXT: s_or_b32 s0, s0, s3 |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX67-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v3i16: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x10 |
| ; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: s_lshr_b32 s4, s2, 16 |
| ; GFX8-NEXT: s_add_i32 s1, s1, s3 |
| ; GFX8-NEXT: s_lshr_b32 s3, s0, 16 |
| ; GFX8-NEXT: s_add_i32 s3, s3, s4 |
| ; GFX8-NEXT: s_add_i32 s0, s0, s2 |
| ; GFX8-NEXT: s_and_b32 s0, s0, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s2, s3, 16 |
| ; GFX8-NEXT: s_or_b32 s0, s0, s2 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v3i16: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x10 |
| ; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX9-NEXT: v_pk_add_u16 v0, s6, v0 |
| ; GFX9-NEXT: v_pk_add_u16 v1, s7, v1 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <3 x i16>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <3 x i16>, ptr addrspace(6) %p0 |
| %r1 = load <3 x i16>, ptr addrspace(6) %gep1 |
| %r = add <3 x i16> %r0, %r1 |
| %r2 = bitcast <3 x i16> %r to <3 x half> |
| ret <3 x half> %r2 |
| } |
| |
| define amdgpu_vs <4 x half> @load_v4i16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v4i16: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 |
| ; GFX67-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x4 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_lshr_b32 s4, s0, 16 |
| ; GFX67-NEXT: s_lshr_b32 s5, s1, 16 |
| ; GFX67-NEXT: s_lshr_b32 s6, s2, 16 |
| ; GFX67-NEXT: s_lshr_b32 s7, s3, 16 |
| ; GFX67-NEXT: s_add_i32 s5, s5, s7 |
| ; GFX67-NEXT: s_add_i32 s1, s1, s3 |
| ; GFX67-NEXT: s_add_i32 s4, s4, s6 |
| ; GFX67-NEXT: s_add_i32 s0, s0, s2 |
| ; GFX67-NEXT: s_lshl_b32 s5, s5, 16 |
| ; GFX67-NEXT: s_lshl_b32 s3, s4, 16 |
| ; GFX67-NEXT: s_and_b32 s0, s0, 0xffff |
| ; GFX67-NEXT: s_and_b32 s1, s1, 0xffff |
| ; GFX67-NEXT: s_or_b32 s0, s0, s3 |
| ; GFX67-NEXT: s_or_b32 s1, s1, s5 |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX67-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v4i16: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x10 |
| ; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: s_lshr_b32 s4, s3, 16 |
| ; GFX8-NEXT: s_lshr_b32 s5, s1, 16 |
| ; GFX8-NEXT: s_add_i32 s5, s5, s4 |
| ; GFX8-NEXT: s_add_i32 s1, s1, s3 |
| ; GFX8-NEXT: s_lshr_b32 s3, s2, 16 |
| ; GFX8-NEXT: s_lshr_b32 s4, s0, 16 |
| ; GFX8-NEXT: s_add_i32 s4, s4, s3 |
| ; GFX8-NEXT: s_add_i32 s0, s0, s2 |
| ; GFX8-NEXT: s_and_b32 s0, s0, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s2, s4, 16 |
| ; GFX8-NEXT: s_or_b32 s0, s0, s2 |
| ; GFX8-NEXT: s_and_b32 s1, s1, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s2, s5, 16 |
| ; GFX8-NEXT: s_or_b32 s1, s1, s2 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v4i16: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x10 |
| ; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX9-NEXT: v_pk_add_u16 v0, s6, v0 |
| ; GFX9-NEXT: v_pk_add_u16 v1, s7, v1 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <4 x i16>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <4 x i16>, ptr addrspace(6) %p0 |
| %r1 = load <4 x i16>, ptr addrspace(6) %gep1 |
| %r = add <4 x i16> %r0, %r1 |
| %r2 = bitcast <4 x i16> %r to <4 x half> |
| ret <4 x half> %r2 |
| } |
| |
| define amdgpu_vs <6 x half> @load_v6i16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v6i16: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s5, 0 |
| ; GFX67-NEXT: s_mov_b32 s4, s1 |
| ; GFX67-NEXT: s_mov_b32 s1, s5 |
| ; GFX67-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0 |
| ; GFX67-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x8 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_lshr_b32 s3, s0, 16 |
| ; GFX67-NEXT: s_lshr_b32 s9, s4, 16 |
| ; GFX67-NEXT: s_lshr_b32 s7, s1, 16 |
| ; GFX67-NEXT: s_lshr_b32 s8, s2, 16 |
| ; GFX67-NEXT: s_lshr_b32 s10, s5, 16 |
| ; GFX67-NEXT: s_lshr_b32 s11, s6, 16 |
| ; GFX67-NEXT: s_add_i32 s3, s3, s9 |
| ; GFX67-NEXT: s_add_i32 s0, s0, s4 |
| ; GFX67-NEXT: s_add_i32 s8, s8, s11 |
| ; GFX67-NEXT: s_add_i32 s2, s2, s6 |
| ; GFX67-NEXT: s_add_i32 s7, s7, s10 |
| ; GFX67-NEXT: s_add_i32 s1, s1, s5 |
| ; GFX67-NEXT: s_and_b32 s0, s0, 0xffff |
| ; GFX67-NEXT: s_lshl_b32 s3, s3, 16 |
| ; GFX67-NEXT: s_lshl_b32 s6, s7, 16 |
| ; GFX67-NEXT: s_or_b32 s0, s0, s3 |
| ; GFX67-NEXT: s_and_b32 s1, s1, 0xffff |
| ; GFX67-NEXT: s_and_b32 s2, s2, 0xffff |
| ; GFX67-NEXT: s_lshl_b32 s3, s8, 16 |
| ; GFX67-NEXT: s_or_b32 s1, s1, s6 |
| ; GFX67-NEXT: s_or_b32 s2, s2, s3 |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX67-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX67-NEXT: v_mov_b32_e32 v2, s2 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v6i16: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x20 |
| ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: s_lshr_b32 s3, s6, 16 |
| ; GFX8-NEXT: s_lshr_b32 s7, s2, 16 |
| ; GFX8-NEXT: s_add_i32 s7, s7, s3 |
| ; GFX8-NEXT: s_add_i32 s2, s2, s6 |
| ; GFX8-NEXT: s_lshr_b32 s3, s5, 16 |
| ; GFX8-NEXT: s_lshr_b32 s6, s1, 16 |
| ; GFX8-NEXT: s_add_i32 s6, s6, s3 |
| ; GFX8-NEXT: s_add_i32 s1, s1, s5 |
| ; GFX8-NEXT: s_lshr_b32 s3, s4, 16 |
| ; GFX8-NEXT: s_lshr_b32 s5, s0, 16 |
| ; GFX8-NEXT: s_add_i32 s5, s5, s3 |
| ; GFX8-NEXT: s_add_i32 s0, s0, s4 |
| ; GFX8-NEXT: s_and_b32 s0, s0, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s3, s5, 16 |
| ; GFX8-NEXT: s_or_b32 s0, s0, s3 |
| ; GFX8-NEXT: s_and_b32 s1, s1, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s3, s6, 16 |
| ; GFX8-NEXT: s_or_b32 s1, s1, s3 |
| ; GFX8-NEXT: s_and_b32 s2, s2, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s3, s7, 16 |
| ; GFX8-NEXT: s_or_b32 s2, s2, s3 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX8-NEXT: v_mov_b32_e32 v2, s2 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v6i16: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x20 |
| ; GFX9-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX9-NEXT: v_mov_b32_e32 v2, s6 |
| ; GFX9-NEXT: v_pk_add_u16 v0, s8, v0 |
| ; GFX9-NEXT: v_pk_add_u16 v1, s9, v1 |
| ; GFX9-NEXT: v_pk_add_u16 v2, s10, v2 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <6 x i16>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <6 x i16>, ptr addrspace(6) %p0 |
| %r1 = load <6 x i16>, ptr addrspace(6) %gep1 |
| %r = add <6 x i16> %r0, %r1 |
| %r2 = bitcast <6 x i16> %r to <6 x half> |
| ret <6 x half> %r2 |
| } |
| |
| define amdgpu_vs <8 x half> @load_v8i16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v8i16: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s5, 0 |
| ; GFX67-NEXT: s_mov_b32 s4, s1 |
| ; GFX67-NEXT: s_mov_b32 s1, s5 |
| ; GFX67-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0 |
| ; GFX67-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x8 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_lshr_b32 s8, s0, 16 |
| ; GFX67-NEXT: s_lshr_b32 s12, s4, 16 |
| ; GFX67-NEXT: s_lshr_b32 s9, s1, 16 |
| ; GFX67-NEXT: s_lshr_b32 s10, s2, 16 |
| ; GFX67-NEXT: s_lshr_b32 s11, s3, 16 |
| ; GFX67-NEXT: s_lshr_b32 s13, s5, 16 |
| ; GFX67-NEXT: s_lshr_b32 s14, s6, 16 |
| ; GFX67-NEXT: s_lshr_b32 s15, s7, 16 |
| ; GFX67-NEXT: s_add_i32 s8, s8, s12 |
| ; GFX67-NEXT: s_add_i32 s0, s0, s4 |
| ; GFX67-NEXT: s_add_i32 s11, s11, s15 |
| ; GFX67-NEXT: s_add_i32 s3, s3, s7 |
| ; GFX67-NEXT: s_add_i32 s10, s10, s14 |
| ; GFX67-NEXT: s_add_i32 s2, s2, s6 |
| ; GFX67-NEXT: s_add_i32 s9, s9, s13 |
| ; GFX67-NEXT: s_add_i32 s1, s1, s5 |
| ; GFX67-NEXT: s_and_b32 s0, s0, 0xffff |
| ; GFX67-NEXT: s_lshl_b32 s4, s8, 16 |
| ; GFX67-NEXT: s_lshl_b32 s11, s11, 16 |
| ; GFX67-NEXT: s_lshl_b32 s6, s9, 16 |
| ; GFX67-NEXT: s_or_b32 s0, s0, s4 |
| ; GFX67-NEXT: s_and_b32 s1, s1, 0xffff |
| ; GFX67-NEXT: s_and_b32 s2, s2, 0xffff |
| ; GFX67-NEXT: s_lshl_b32 s4, s10, 16 |
| ; GFX67-NEXT: s_and_b32 s3, s3, 0xffff |
| ; GFX67-NEXT: s_or_b32 s1, s1, s6 |
| ; GFX67-NEXT: s_or_b32 s2, s2, s4 |
| ; GFX67-NEXT: s_or_b32 s3, s3, s11 |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX67-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX67-NEXT: v_mov_b32_e32 v2, s2 |
| ; GFX67-NEXT: v_mov_b32_e32 v3, s3 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v8i16: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x20 |
| ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: s_lshr_b32 s8, s7, 16 |
| ; GFX8-NEXT: s_lshr_b32 s9, s3, 16 |
| ; GFX8-NEXT: s_add_i32 s9, s9, s8 |
| ; GFX8-NEXT: s_add_i32 s3, s3, s7 |
| ; GFX8-NEXT: s_lshr_b32 s7, s6, 16 |
| ; GFX8-NEXT: s_lshr_b32 s8, s2, 16 |
| ; GFX8-NEXT: s_add_i32 s8, s8, s7 |
| ; GFX8-NEXT: s_add_i32 s2, s2, s6 |
| ; GFX8-NEXT: s_lshr_b32 s6, s5, 16 |
| ; GFX8-NEXT: s_lshr_b32 s7, s1, 16 |
| ; GFX8-NEXT: s_add_i32 s7, s7, s6 |
| ; GFX8-NEXT: s_add_i32 s1, s1, s5 |
| ; GFX8-NEXT: s_lshr_b32 s5, s4, 16 |
| ; GFX8-NEXT: s_lshr_b32 s6, s0, 16 |
| ; GFX8-NEXT: s_add_i32 s6, s6, s5 |
| ; GFX8-NEXT: s_add_i32 s0, s0, s4 |
| ; GFX8-NEXT: s_and_b32 s0, s0, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s4, s6, 16 |
| ; GFX8-NEXT: s_or_b32 s0, s0, s4 |
| ; GFX8-NEXT: s_and_b32 s1, s1, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s4, s7, 16 |
| ; GFX8-NEXT: s_or_b32 s1, s1, s4 |
| ; GFX8-NEXT: s_and_b32 s2, s2, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s4, s8, 16 |
| ; GFX8-NEXT: s_or_b32 s2, s2, s4 |
| ; GFX8-NEXT: s_and_b32 s3, s3, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s4, s9, 16 |
| ; GFX8-NEXT: s_or_b32 s3, s3, s4 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX8-NEXT: v_mov_b32_e32 v2, s2 |
| ; GFX8-NEXT: v_mov_b32_e32 v3, s3 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v8i16: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x20 |
| ; GFX9-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX9-NEXT: v_mov_b32_e32 v2, s6 |
| ; GFX9-NEXT: v_mov_b32_e32 v3, s7 |
| ; GFX9-NEXT: v_pk_add_u16 v0, s8, v0 |
| ; GFX9-NEXT: v_pk_add_u16 v1, s9, v1 |
| ; GFX9-NEXT: v_pk_add_u16 v2, s10, v2 |
| ; GFX9-NEXT: v_pk_add_u16 v3, s11, v3 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <8 x i16>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <8 x i16>, ptr addrspace(6) %p0 |
| %r1 = load <8 x i16>, ptr addrspace(6) %gep1 |
| %r = add <8 x i16> %r0, %r1 |
| %r2 = bitcast <8 x i16> %r to <8 x half> |
| ret <8 x half> %r2 |
| } |
| |
| define amdgpu_vs <16 x half> @load_v16i16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v16i16: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s9, 0 |
| ; GFX67-NEXT: s_mov_b32 s8, s1 |
| ; GFX67-NEXT: s_mov_b32 s1, s9 |
| ; GFX67-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x0 |
| ; GFX67-NEXT: s_load_dwordx8 s[8:15], s[8:9], 0x10 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_lshr_b32 s16, s0, 16 |
| ; GFX67-NEXT: s_lshr_b32 s24, s8, 16 |
| ; GFX67-NEXT: s_lshr_b32 s18, s2, 16 |
| ; GFX67-NEXT: s_lshr_b32 s26, s10, 16 |
| ; GFX67-NEXT: s_add_i32 s16, s16, s24 |
| ; GFX67-NEXT: s_add_i32 s0, s0, s8 |
| ; GFX67-NEXT: s_lshr_b32 s20, s4, 16 |
| ; GFX67-NEXT: s_lshr_b32 s28, s12, 16 |
| ; GFX67-NEXT: s_add_i32 s18, s18, s26 |
| ; GFX67-NEXT: s_add_i32 s2, s2, s10 |
| ; GFX67-NEXT: s_and_b32 s0, s0, 0xffff |
| ; GFX67-NEXT: s_lshl_b32 s8, s16, 16 |
| ; GFX67-NEXT: s_lshr_b32 s17, s1, 16 |
| ; GFX67-NEXT: s_lshr_b32 s19, s3, 16 |
| ; GFX67-NEXT: s_lshr_b32 s21, s5, 16 |
| ; GFX67-NEXT: s_lshr_b32 s22, s6, 16 |
| ; GFX67-NEXT: s_lshr_b32 s23, s7, 16 |
| ; GFX67-NEXT: s_lshr_b32 s25, s9, 16 |
| ; GFX67-NEXT: s_lshr_b32 s27, s11, 16 |
| ; GFX67-NEXT: s_lshr_b32 s29, s13, 16 |
| ; GFX67-NEXT: s_lshr_b32 s30, s14, 16 |
| ; GFX67-NEXT: s_lshr_b32 s31, s15, 16 |
| ; GFX67-NEXT: s_add_i32 s20, s20, s28 |
| ; GFX67-NEXT: s_add_i32 s4, s4, s12 |
| ; GFX67-NEXT: s_or_b32 s0, s0, s8 |
| ; GFX67-NEXT: s_and_b32 s2, s2, 0xffff |
| ; GFX67-NEXT: s_lshl_b32 s8, s18, 16 |
| ; GFX67-NEXT: s_add_i32 s23, s23, s31 |
| ; GFX67-NEXT: s_add_i32 s7, s7, s15 |
| ; GFX67-NEXT: s_add_i32 s22, s22, s30 |
| ; GFX67-NEXT: s_add_i32 s6, s6, s14 |
| ; GFX67-NEXT: s_add_i32 s21, s21, s29 |
| ; GFX67-NEXT: s_add_i32 s5, s5, s13 |
| ; GFX67-NEXT: s_add_i32 s19, s19, s27 |
| ; GFX67-NEXT: s_add_i32 s3, s3, s11 |
| ; GFX67-NEXT: s_add_i32 s17, s17, s25 |
| ; GFX67-NEXT: s_add_i32 s1, s1, s9 |
| ; GFX67-NEXT: s_or_b32 s2, s2, s8 |
| ; GFX67-NEXT: s_and_b32 s4, s4, 0xffff |
| ; GFX67-NEXT: s_lshl_b32 s8, s20, 16 |
| ; GFX67-NEXT: s_lshl_b32 s23, s23, 16 |
| ; GFX67-NEXT: s_lshl_b32 s14, s21, 16 |
| ; GFX67-NEXT: s_lshl_b32 s12, s19, 16 |
| ; GFX67-NEXT: s_lshl_b32 s10, s17, 16 |
| ; GFX67-NEXT: s_and_b32 s1, s1, 0xffff |
| ; GFX67-NEXT: s_and_b32 s3, s3, 0xffff |
| ; GFX67-NEXT: s_or_b32 s4, s4, s8 |
| ; GFX67-NEXT: s_and_b32 s5, s5, 0xffff |
| ; GFX67-NEXT: s_and_b32 s6, s6, 0xffff |
| ; GFX67-NEXT: s_lshl_b32 s8, s22, 16 |
| ; GFX67-NEXT: s_and_b32 s7, s7, 0xffff |
| ; GFX67-NEXT: s_or_b32 s1, s1, s10 |
| ; GFX67-NEXT: s_or_b32 s3, s3, s12 |
| ; GFX67-NEXT: s_or_b32 s5, s5, s14 |
| ; GFX67-NEXT: s_or_b32 s6, s6, s8 |
| ; GFX67-NEXT: s_or_b32 s7, s7, s23 |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX67-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX67-NEXT: v_mov_b32_e32 v2, s2 |
| ; GFX67-NEXT: v_mov_b32_e32 v3, s3 |
| ; GFX67-NEXT: v_mov_b32_e32 v4, s4 |
| ; GFX67-NEXT: v_mov_b32_e32 v5, s5 |
| ; GFX67-NEXT: v_mov_b32_e32 v6, s6 |
| ; GFX67-NEXT: v_mov_b32_e32 v7, s7 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v16i16: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x40 |
| ; GFX8-NEXT: s_load_dwordx8 s[12:19], s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: s_lshr_b32 s0, s11, 16 |
| ; GFX8-NEXT: s_lshr_b32 s1, s19, 16 |
| ; GFX8-NEXT: s_lshr_b32 s2, s10, 16 |
| ; GFX8-NEXT: s_lshr_b32 s3, s18, 16 |
| ; GFX8-NEXT: s_add_i32 s1, s1, s0 |
| ; GFX8-NEXT: s_add_i32 s0, s19, s11 |
| ; GFX8-NEXT: s_add_i32 s3, s3, s2 |
| ; GFX8-NEXT: s_add_i32 s2, s18, s10 |
| ; GFX8-NEXT: s_lshr_b32 s10, s9, 16 |
| ; GFX8-NEXT: s_lshr_b32 s11, s17, 16 |
| ; GFX8-NEXT: s_add_i32 s11, s11, s10 |
| ; GFX8-NEXT: s_add_i32 s9, s17, s9 |
| ; GFX8-NEXT: s_lshr_b32 s10, s8, 16 |
| ; GFX8-NEXT: s_lshr_b32 s17, s16, 16 |
| ; GFX8-NEXT: s_add_i32 s17, s17, s10 |
| ; GFX8-NEXT: s_add_i32 s8, s16, s8 |
| ; GFX8-NEXT: s_lshr_b32 s10, s7, 16 |
| ; GFX8-NEXT: s_lshr_b32 s16, s15, 16 |
| ; GFX8-NEXT: s_add_i32 s16, s16, s10 |
| ; GFX8-NEXT: s_add_i32 s7, s15, s7 |
| ; GFX8-NEXT: s_lshr_b32 s10, s6, 16 |
| ; GFX8-NEXT: s_lshr_b32 s15, s14, 16 |
| ; GFX8-NEXT: s_add_i32 s15, s15, s10 |
| ; GFX8-NEXT: s_add_i32 s6, s14, s6 |
| ; GFX8-NEXT: s_lshr_b32 s10, s5, 16 |
| ; GFX8-NEXT: s_lshr_b32 s14, s13, 16 |
| ; GFX8-NEXT: s_add_i32 s14, s14, s10 |
| ; GFX8-NEXT: s_add_i32 s5, s13, s5 |
| ; GFX8-NEXT: s_lshr_b32 s10, s4, 16 |
| ; GFX8-NEXT: s_lshr_b32 s13, s12, 16 |
| ; GFX8-NEXT: s_add_i32 s13, s13, s10 |
| ; GFX8-NEXT: s_add_i32 s4, s12, s4 |
| ; GFX8-NEXT: s_and_b32 s4, s4, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s10, s13, 16 |
| ; GFX8-NEXT: s_or_b32 s4, s4, s10 |
| ; GFX8-NEXT: s_and_b32 s5, s5, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s10, s14, 16 |
| ; GFX8-NEXT: s_or_b32 s5, s5, s10 |
| ; GFX8-NEXT: s_and_b32 s6, s6, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s10, s15, 16 |
| ; GFX8-NEXT: s_or_b32 s6, s6, s10 |
| ; GFX8-NEXT: s_and_b32 s7, s7, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s10, s16, 16 |
| ; GFX8-NEXT: s_or_b32 s7, s7, s10 |
| ; GFX8-NEXT: s_and_b32 s8, s8, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s10, s17, 16 |
| ; GFX8-NEXT: s_or_b32 s8, s8, s10 |
| ; GFX8-NEXT: s_and_b32 s9, s9, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s10, s11, 16 |
| ; GFX8-NEXT: s_and_b32 s2, s2, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s3, s3, 16 |
| ; GFX8-NEXT: s_and_b32 s0, s0, 0xffff |
| ; GFX8-NEXT: s_lshl_b32 s1, s1, 16 |
| ; GFX8-NEXT: s_or_b32 s9, s9, s10 |
| ; GFX8-NEXT: s_or_b32 s2, s2, s3 |
| ; GFX8-NEXT: s_or_b32 s0, s0, s1 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX8-NEXT: v_mov_b32_e32 v2, s6 |
| ; GFX8-NEXT: v_mov_b32_e32 v3, s7 |
| ; GFX8-NEXT: v_mov_b32_e32 v4, s8 |
| ; GFX8-NEXT: v_mov_b32_e32 v5, s9 |
| ; GFX8-NEXT: v_mov_b32_e32 v6, s2 |
| ; GFX8-NEXT: v_mov_b32_e32 v7, s0 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v16i16: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x40 |
| ; GFX9-NEXT: s_load_dwordx8 s[12:19], s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX9-NEXT: v_mov_b32_e32 v2, s6 |
| ; GFX9-NEXT: v_mov_b32_e32 v3, s7 |
| ; GFX9-NEXT: v_mov_b32_e32 v4, s8 |
| ; GFX9-NEXT: v_mov_b32_e32 v5, s9 |
| ; GFX9-NEXT: v_mov_b32_e32 v6, s10 |
| ; GFX9-NEXT: v_mov_b32_e32 v7, s11 |
| ; GFX9-NEXT: v_pk_add_u16 v0, s12, v0 |
| ; GFX9-NEXT: v_pk_add_u16 v1, s13, v1 |
| ; GFX9-NEXT: v_pk_add_u16 v2, s14, v2 |
| ; GFX9-NEXT: v_pk_add_u16 v3, s15, v3 |
| ; GFX9-NEXT: v_pk_add_u16 v4, s16, v4 |
| ; GFX9-NEXT: v_pk_add_u16 v5, s17, v5 |
| ; GFX9-NEXT: v_pk_add_u16 v6, s18, v6 |
| ; GFX9-NEXT: v_pk_add_u16 v7, s19, v7 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <16 x i16>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <16 x i16>, ptr addrspace(6) %p0 |
| %r1 = load <16 x i16>, ptr addrspace(6) %gep1 |
| %r = add <16 x i16> %r0, %r1 |
| %r2 = bitcast <16 x i16> %r to <16 x half> |
| ret <16 x half> %r2 |
| } |
| |
| ; define amdgpu_vs float @zextload_i16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; %gep1 = getelementptr inbounds i16, ptr addrspace(6) %p1, i32 2 |
| ; %r0 = load i16, ptr addrspace(6) %p0 |
| ; %r1 = load i16, ptr addrspace(6) %gep1 |
| ; %zext.r0 = zext i16 %r0 to i32 |
| ; %zext.r1 = zext i16 %r1 to i32 |
| ; %r = add i32 %zext.r0, %zext.r1 |
| ; %r2 = bitcast i32 %r to float |
| ; ret float %r2 |
| ; } |
| |
| define amdgpu_vs <2 x float> @zextload_v2i16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: zextload_v2i16: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dword s2, s[2:3], 0x2 |
| ; GFX67-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_lshr_b32 s1, s2, 16 |
| ; GFX67-NEXT: s_lshr_b32 s3, s0, 16 |
| ; GFX67-NEXT: s_and_b32 s0, s0, 0xffff |
| ; GFX67-NEXT: s_and_b32 s2, s2, 0xffff |
| ; GFX67-NEXT: s_add_i32 s0, s0, s2 |
| ; GFX67-NEXT: s_add_i32 s3, s3, s1 |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX67-NEXT: v_mov_b32_e32 v1, s3 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: zextload_v2i16: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX8-NEXT: s_load_dword s1, s[2:3], 0x8 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: s_lshr_b32 s2, s0, 16 |
| ; GFX8-NEXT: s_and_b32 s0, s0, 0xffff |
| ; GFX8-NEXT: s_lshr_b32 s3, s1, 16 |
| ; GFX8-NEXT: s_and_b32 s1, s1, 0xffff |
| ; GFX8-NEXT: s_add_i32 s0, s0, s1 |
| ; GFX8-NEXT: s_add_i32 s2, s2, s3 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s2 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: zextload_v2i16: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dword s4, s[0:1], 0x0 |
| ; GFX9-NEXT: s_load_dword s5, s[2:3], 0x8 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: s_lshr_b32 s0, s4, 16 |
| ; GFX9-NEXT: s_and_b32 s1, s4, 0xffff |
| ; GFX9-NEXT: s_lshr_b32 s2, s5, 16 |
| ; GFX9-NEXT: s_and_b32 s3, s5, 0xffff |
| ; GFX9-NEXT: s_add_i32 s1, s1, s3 |
| ; GFX9-NEXT: s_add_i32 s0, s0, s2 |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s1 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s0 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <2 x i16>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <2 x i16>, ptr addrspace(6) %p0 |
| %r1 = load <2 x i16>, ptr addrspace(6) %gep1 |
| %zext.r0 = zext <2 x i16> %r0 to <2 x i32> |
| %zext.r1 = zext <2 x i16> %r1 to <2 x i32> |
| %r = add <2 x i32> %zext.r0, %zext.r1 |
| %r2 = bitcast <2 x i32> %r to <2 x float> |
| ret <2 x float> %r2 |
| } |
| |
| ; define amdgpu_vs float @sextload_i16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; %gep1 = getelementptr inbounds i16, ptr addrspace(6) %p1, i32 2 |
| ; %r0 = load i16, ptr addrspace(6) %p0 |
| ; %r1 = load i16, ptr addrspace(6) %gep1 |
| ; %zext.r0 = sext i16 %r0 to i32 |
| ; %zext.r1 = sext i16 %r1 to i32 |
| ; %r = add i32 %zext.r0, %zext.r1 |
| ; %r2 = bitcast i32 %r to float |
| ; ret float %r2 |
| ; } |
| |
| define amdgpu_vs <2 x float> @sextload_v2i16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: sextload_v2i16: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX67-NEXT: s_load_dword s1, s[2:3], 0x2 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_ashr_i32 s2, s0, 16 |
| ; GFX67-NEXT: s_sext_i32_i16 s0, s0 |
| ; GFX67-NEXT: s_ashr_i32 s3, s1, 16 |
| ; GFX67-NEXT: s_sext_i32_i16 s1, s1 |
| ; GFX67-NEXT: s_add_i32 s0, s0, s1 |
| ; GFX67-NEXT: s_add_i32 s2, s2, s3 |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX67-NEXT: v_mov_b32_e32 v1, s2 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: sextload_v2i16: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX8-NEXT: s_load_dword s1, s[2:3], 0x8 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: s_ashr_i32 s2, s0, 16 |
| ; GFX8-NEXT: s_sext_i32_i16 s0, s0 |
| ; GFX8-NEXT: s_ashr_i32 s3, s1, 16 |
| ; GFX8-NEXT: s_sext_i32_i16 s1, s1 |
| ; GFX8-NEXT: s_add_i32 s0, s0, s1 |
| ; GFX8-NEXT: s_add_i32 s2, s2, s3 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s0 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s2 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: sextload_v2i16: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dword s4, s[0:1], 0x0 |
| ; GFX9-NEXT: s_load_dword s5, s[2:3], 0x8 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: s_ashr_i32 s0, s4, 16 |
| ; GFX9-NEXT: s_sext_i32_i16 s1, s4 |
| ; GFX9-NEXT: s_ashr_i32 s2, s5, 16 |
| ; GFX9-NEXT: s_sext_i32_i16 s3, s5 |
| ; GFX9-NEXT: s_add_i32 s1, s1, s3 |
| ; GFX9-NEXT: s_add_i32 s0, s0, s2 |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s1 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s0 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <2 x i16>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <2 x i16>, ptr addrspace(6) %p0 |
| %r1 = load <2 x i16>, ptr addrspace(6) %gep1 |
| %sext.r0 = sext <2 x i16> %r0 to <2 x i32> |
| %sext.r1 = sext <2 x i16> %r1 to <2 x i32> |
| %r = add <2 x i32> %sext.r0, %sext.r1 |
| %r2 = bitcast <2 x i32> %r to <2 x float> |
| ret <2 x float> %r2 |
| } |
| |
| |
| ; define amdgpu_vs float @load_i1(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; %gep1 = getelementptr inbounds i1, ptr addrspace(6) %p1, i32 2 |
| ; %r0 = load i1, ptr addrspace(6) %p0 |
| ; %r1 = load i1, ptr addrspace(6) %gep1 |
| ; %r = and i1 %r0, %r1 |
| ; %r2 = zext i1 %r to i32 |
| ; %cast = bitcast i32 %r2 to float |
| ; ret float %cast |
| ; } |
| |
| ; define amdgpu_vs float @zextload_i1(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; %gep1 = getelementptr inbounds i1, ptr addrspace(6) %p1, i32 2 |
| ; %r0 = load i1, ptr addrspace(6) %p0 |
| ; %r1 = load i1, ptr addrspace(6) %gep1 |
| ; %zext.r0 = zext i1 %r0 to i32 |
| ; %zext.r1 = zext i1 %r1 to i32 |
| ; %r = and i32 %zext.r0, %zext.r1 |
| ; %r2 = bitcast i32 %r to float |
| ; ret float %r2 |
| ; } |
| |
| ; define amdgpu_vs float @sextload_i1(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; %gep1 = getelementptr inbounds i1, ptr addrspace(6) %p1, i32 2 |
| ; %r0 = load i1, ptr addrspace(6) %p0 |
| ; %r1 = load i1, ptr addrspace(6) %gep1 |
| ; %zext.r0 = sext i1 %r0 to i32 |
| ; %zext.r1 = sext i1 %r1 to i32 |
| ; %r = and i32 %zext.r0, %zext.r1 |
| ; %r2 = bitcast i32 %r to float |
| ; ret float %r2 |
| ; } |
| |
| ; define amdgpu_vs half @load_f16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; %gep1 = getelementptr inbounds half, ptr addrspace(6) %p1, i32 2 |
| ; %r0 = load half, ptr addrspace(6) %p0 |
| ; %r1 = load half, ptr addrspace(6) %gep1 |
| ; %r = fadd half %r0, %r1 |
| ; ret half %r |
| ; } |
| |
| define amdgpu_vs <2 x half> @load_v2f16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v2f16: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dword s2, s[2:3], 0x2 |
| ; GFX67-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_lshr_b32 s1, s2, 16 |
| ; GFX67-NEXT: s_lshr_b32 s3, s0, 16 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v0, s1 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s3 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v2, s2 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v3, s0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v1, v0 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v0, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v1, v3, v2 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v1, v1 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v0, 16, v0 |
| ; GFX67-NEXT: v_or_b32_e32 v0, v1, v0 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v2f16: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dword s2, s[2:3], 0x8 |
| ; GFX8-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: s_lshr_b32 s1, s2, 16 |
| ; GFX8-NEXT: s_lshr_b32 s3, s0, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s1 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s3 |
| ; GFX8-NEXT: v_add_f16_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s2 |
| ; GFX8-NEXT: v_add_f16_e32 v1, s0, v1 |
| ; GFX8-NEXT: v_or_b32_e32 v0, v1, v0 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v2f16: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dword s4, s[2:3], 0x8 |
| ; GFX9-NEXT: s_load_dword s5, s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX9-NEXT: v_pk_add_f16 v0, s5, v0 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <2 x half>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <2 x half>, ptr addrspace(6) %p0 |
| %r1 = load <2 x half>, ptr addrspace(6) %gep1 |
| %r = fadd <2 x half> %r0, %r1 |
| ret <2 x half> %r |
| } |
| |
| define amdgpu_vs <3 x half> @load_v3f16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v3f16: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x4 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_lshr_b32 s2, s4, 16 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s2 |
| ; GFX67-NEXT: s_lshr_b32 s2, s0, 16 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v2, s2 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v3, s4 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v4, s0 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v0, s5 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v5, s1 |
| ; GFX67-NEXT: v_add_f32_e32 v1, v2, v1 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v2, v1 |
| ; GFX67-NEXT: v_add_f32_e32 v1, v4, v3 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v3, v1 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v5, v0 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v1, v0 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v0, 16, v2 |
| ; GFX67-NEXT: v_or_b32_e32 v0, v3, v0 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v3f16: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x10 |
| ; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s3 |
| ; GFX8-NEXT: v_add_f16_e32 v1, s1, v0 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s2 |
| ; GFX8-NEXT: v_add_f16_e32 v0, s0, v0 |
| ; GFX8-NEXT: s_lshr_b32 s1, s2, 16 |
| ; GFX8-NEXT: s_lshr_b32 s0, s0, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v2, s1 |
| ; GFX8-NEXT: v_mov_b32_e32 v3, s0 |
| ; GFX8-NEXT: v_add_f16_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v3f16: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x10 |
| ; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX9-NEXT: v_pk_add_f16 v0, s6, v0 |
| ; GFX9-NEXT: v_pk_add_f16 v1, s7, v1 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <3 x half>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <3 x half>, ptr addrspace(6) %p0 |
| %r1 = load <3 x half>, ptr addrspace(6) %gep1 |
| %r = fadd <3 x half> %r0, %r1 |
| ret <3 x half> %r |
| } |
| |
| define amdgpu_vs <4 x half> @load_v4f16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v4f16: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x4 |
| ; GFX67-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_lshr_b32 s4, s3, 16 |
| ; GFX67-NEXT: s_lshr_b32 s5, s1, 16 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v0, s4 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s5 |
| ; GFX67-NEXT: s_lshr_b32 s4, s2, 16 |
| ; GFX67-NEXT: s_lshr_b32 s5, s0, 16 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v2, s1 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v1, v0 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s3 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v3, s4 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v4, s5 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v5, s2 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v6, s0 |
| ; GFX67-NEXT: v_add_f32_e32 v1, v2, v1 |
| ; GFX67-NEXT: v_add_f32_e32 v2, v4, v3 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v0, v0 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v2, v2 |
| ; GFX67-NEXT: v_add_f32_e32 v3, v6, v5 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v3, v3 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v1, v1 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v4, 16, v0 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v0, 16, v2 |
| ; GFX67-NEXT: v_or_b32_e32 v0, v3, v0 |
| ; GFX67-NEXT: v_or_b32_e32 v1, v1, v4 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v4f16: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x10 |
| ; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s3 |
| ; GFX8-NEXT: v_add_f16_e32 v1, s1, v0 |
| ; GFX8-NEXT: s_lshr_b32 s3, s3, 16 |
| ; GFX8-NEXT: s_lshr_b32 s1, s1, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s3 |
| ; GFX8-NEXT: v_mov_b32_e32 v2, s1 |
| ; GFX8-NEXT: v_add_f16_sdwa v2, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s2 |
| ; GFX8-NEXT: v_add_f16_e32 v0, s0, v0 |
| ; GFX8-NEXT: s_lshr_b32 s1, s2, 16 |
| ; GFX8-NEXT: s_lshr_b32 s0, s0, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v3, s1 |
| ; GFX8-NEXT: v_mov_b32_e32 v4, s0 |
| ; GFX8-NEXT: v_add_f16_sdwa v3, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_or_b32_e32 v0, v0, v3 |
| ; GFX8-NEXT: v_or_b32_e32 v1, v1, v2 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v4f16: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x10 |
| ; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX9-NEXT: v_pk_add_f16 v0, s6, v0 |
| ; GFX9-NEXT: v_pk_add_f16 v1, s7, v1 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <4 x half>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <4 x half>, ptr addrspace(6) %p0 |
| %r1 = load <4 x half>, ptr addrspace(6) %gep1 |
| %r = fadd <4 x half> %r0, %r1 |
| ret <4 x half> %r |
| } |
| |
| define amdgpu_vs <6 x half> @load_v6f16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v6f16: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x8 |
| ; GFX67-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v0, s6 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s2 |
| ; GFX67-NEXT: s_lshr_b32 s2, s2, 16 |
| ; GFX67-NEXT: s_lshr_b32 s3, s6, 16 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v2, s3 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v1, v0 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s2 |
| ; GFX67-NEXT: s_lshr_b32 s2, s5, 16 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v3, s2 |
| ; GFX67-NEXT: s_lshr_b32 s2, s1, 16 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v4, s2 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v5, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v1, v2 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v2, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v4, v3 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v3, s1 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v6, s0 |
| ; GFX67-NEXT: s_lshr_b32 s1, s4, 16 |
| ; GFX67-NEXT: s_lshr_b32 s0, s0, 16 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s5 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v4, s4 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v7, s1 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v8, s0 |
| ; GFX67-NEXT: v_add_f32_e32 v1, v3, v1 |
| ; GFX67-NEXT: v_add_f32_e32 v3, v6, v4 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v0, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v4, v8, v7 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v4, v4 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v3, v3 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v1, v1 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v6, 16, v0 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v0, 16, v4 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v2, 16, v2 |
| ; GFX67-NEXT: v_or_b32_e32 v0, v3, v0 |
| ; GFX67-NEXT: v_or_b32_e32 v1, v1, v6 |
| ; GFX67-NEXT: v_or_b32_e32 v2, v5, v2 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v6f16: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x20 |
| ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s6 |
| ; GFX8-NEXT: v_add_f16_e32 v2, s2, v0 |
| ; GFX8-NEXT: s_lshr_b32 s3, s6, 16 |
| ; GFX8-NEXT: s_lshr_b32 s2, s2, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s3 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s2 |
| ; GFX8-NEXT: v_add_f16_sdwa v3, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s5 |
| ; GFX8-NEXT: v_add_f16_e32 v1, s1, v0 |
| ; GFX8-NEXT: s_lshr_b32 s2, s5, 16 |
| ; GFX8-NEXT: s_lshr_b32 s1, s1, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s2 |
| ; GFX8-NEXT: v_mov_b32_e32 v4, s1 |
| ; GFX8-NEXT: v_add_f16_sdwa v4, v4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX8-NEXT: v_add_f16_e32 v0, s0, v0 |
| ; GFX8-NEXT: s_lshr_b32 s1, s4, 16 |
| ; GFX8-NEXT: s_lshr_b32 s0, s0, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v5, s1 |
| ; GFX8-NEXT: v_mov_b32_e32 v6, s0 |
| ; GFX8-NEXT: v_add_f16_sdwa v5, v6, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_or_b32_e32 v0, v0, v5 |
| ; GFX8-NEXT: v_or_b32_e32 v1, v1, v4 |
| ; GFX8-NEXT: v_or_b32_e32 v2, v2, v3 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v6f16: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x20 |
| ; GFX9-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX9-NEXT: v_mov_b32_e32 v2, s6 |
| ; GFX9-NEXT: v_pk_add_f16 v0, s8, v0 |
| ; GFX9-NEXT: v_pk_add_f16 v1, s9, v1 |
| ; GFX9-NEXT: v_pk_add_f16 v2, s10, v2 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <6 x half>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <6 x half>, ptr addrspace(6) %p0 |
| %r1 = load <6 x half>, ptr addrspace(6) %gep1 |
| %r = fadd <6 x half> %r0, %r1 |
| ret <6 x half> %r |
| } |
| |
| define amdgpu_vs <8 x half> @load_v8f16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v8f16: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s5, 0 |
| ; GFX67-NEXT: s_mov_b32 s4, s1 |
| ; GFX67-NEXT: s_mov_b32 s1, s5 |
| ; GFX67-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0 |
| ; GFX67-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x8 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_lshr_b32 s11, s3, 16 |
| ; GFX67-NEXT: s_lshr_b32 s12, s7, 16 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v0, s12 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s11 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v2, s3 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v3, s6 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v4, s2 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v1, v0 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v0, v0 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s7 |
| ; GFX67-NEXT: s_lshr_b32 s10, s2, 16 |
| ; GFX67-NEXT: s_lshr_b32 s13, s6, 16 |
| ; GFX67-NEXT: s_lshr_b32 s9, s1, 16 |
| ; GFX67-NEXT: s_lshr_b32 s12, s5, 16 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v5, 16, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v2, v1 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s13 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v2, s10 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v6, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v4, v3 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v3, s12 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v4, s9 |
| ; GFX67-NEXT: s_lshr_b32 s8, s0, 16 |
| ; GFX67-NEXT: s_lshr_b32 s11, s4, 16 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v7, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v2, v1 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v2, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v4, v3 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s5 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v3, s1 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v4, s4 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v8, s0 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v9, s11 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v10, s8 |
| ; GFX67-NEXT: v_add_f32_e32 v1, v3, v1 |
| ; GFX67-NEXT: v_add_f32_e32 v3, v8, v4 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v0, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v4, v10, v9 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v4, v4 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v3, v3 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v1, v1 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v8, 16, v0 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v0, 16, v4 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v2, 16, v2 |
| ; GFX67-NEXT: v_or_b32_e32 v0, v3, v0 |
| ; GFX67-NEXT: v_or_b32_e32 v1, v1, v8 |
| ; GFX67-NEXT: v_or_b32_e32 v2, v7, v2 |
| ; GFX67-NEXT: v_or_b32_e32 v3, v6, v5 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v8f16: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x20 |
| ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s7 |
| ; GFX8-NEXT: v_add_f16_e32 v3, s3, v0 |
| ; GFX8-NEXT: s_lshr_b32 s7, s7, 16 |
| ; GFX8-NEXT: s_lshr_b32 s3, s3, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s7 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s3 |
| ; GFX8-NEXT: v_add_f16_sdwa v4, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s6 |
| ; GFX8-NEXT: v_add_f16_e32 v2, s2, v0 |
| ; GFX8-NEXT: s_lshr_b32 s3, s6, 16 |
| ; GFX8-NEXT: s_lshr_b32 s2, s2, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s3 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s2 |
| ; GFX8-NEXT: v_add_f16_sdwa v5, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s5 |
| ; GFX8-NEXT: v_add_f16_e32 v1, s1, v0 |
| ; GFX8-NEXT: s_lshr_b32 s2, s5, 16 |
| ; GFX8-NEXT: s_lshr_b32 s1, s1, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s2 |
| ; GFX8-NEXT: v_mov_b32_e32 v6, s1 |
| ; GFX8-NEXT: v_add_f16_sdwa v6, v6, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX8-NEXT: v_add_f16_e32 v0, s0, v0 |
| ; GFX8-NEXT: s_lshr_b32 s1, s4, 16 |
| ; GFX8-NEXT: s_lshr_b32 s0, s0, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v7, s1 |
| ; GFX8-NEXT: v_mov_b32_e32 v8, s0 |
| ; GFX8-NEXT: v_add_f16_sdwa v7, v8, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_or_b32_e32 v0, v0, v7 |
| ; GFX8-NEXT: v_or_b32_e32 v1, v1, v6 |
| ; GFX8-NEXT: v_or_b32_e32 v2, v2, v5 |
| ; GFX8-NEXT: v_or_b32_e32 v3, v3, v4 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v8f16: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x20 |
| ; GFX9-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX9-NEXT: v_mov_b32_e32 v2, s6 |
| ; GFX9-NEXT: v_mov_b32_e32 v3, s7 |
| ; GFX9-NEXT: v_pk_add_f16 v0, s8, v0 |
| ; GFX9-NEXT: v_pk_add_f16 v1, s9, v1 |
| ; GFX9-NEXT: v_pk_add_f16 v2, s10, v2 |
| ; GFX9-NEXT: v_pk_add_f16 v3, s11, v3 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <8 x half>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <8 x half>, ptr addrspace(6) %p0 |
| %r1 = load <8 x half>, ptr addrspace(6) %gep1 |
| %r = fadd <8 x half> %r0, %r1 |
| ret <8 x half> %r |
| } |
| |
| define amdgpu_vs <16 x half> @load_v16f16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v16f16: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s9, 0 |
| ; GFX67-NEXT: s_mov_b32 s8, s1 |
| ; GFX67-NEXT: s_mov_b32 s1, s9 |
| ; GFX67-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x0 |
| ; GFX67-NEXT: s_load_dwordx8 s[8:15], s[8:9], 0x10 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_lshr_b32 s23, s7, 16 |
| ; GFX67-NEXT: s_lshr_b32 s28, s15, 16 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v0, s28 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s23 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v2, s7 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v3, s14 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v4, s6 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v1, v0 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v0, v0 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s15 |
| ; GFX67-NEXT: s_lshr_b32 s22, s6, 16 |
| ; GFX67-NEXT: s_lshr_b32 s29, s14, 16 |
| ; GFX67-NEXT: s_lshr_b32 s21, s5, 16 |
| ; GFX67-NEXT: s_lshr_b32 s28, s13, 16 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v7, 16, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v2, v1 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s29 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v2, s22 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v8, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v4, v3 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v3, s28 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v4, s21 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v6, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v2, v1 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v9, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v4, v3 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v0, v0 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s13 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v2, s5 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v3, s12 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v4, s4 |
| ; GFX67-NEXT: s_lshr_b32 s20, s4, 16 |
| ; GFX67-NEXT: s_lshr_b32 s23, s12, 16 |
| ; GFX67-NEXT: s_lshr_b32 s19, s3, 16 |
| ; GFX67-NEXT: s_lshr_b32 s27, s11, 16 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v5, 16, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v2, v1 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s23 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v2, s20 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v10, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v4, v3 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v3, s27 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v4, s19 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v11, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v2, v1 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v12, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v4, v3 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v0, v0 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s11 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v2, s3 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v3, s10 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v4, s2 |
| ; GFX67-NEXT: s_lshr_b32 s18, s2, 16 |
| ; GFX67-NEXT: s_lshr_b32 s26, s10, 16 |
| ; GFX67-NEXT: s_lshr_b32 s17, s1, 16 |
| ; GFX67-NEXT: s_lshr_b32 s25, s9, 16 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v13, 16, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v2, v1 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s26 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v2, s18 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v14, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v4, v3 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v3, s25 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v4, s17 |
| ; GFX67-NEXT: s_lshr_b32 s16, s0, 16 |
| ; GFX67-NEXT: s_lshr_b32 s24, s8, 16 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v15, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v2, v1 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v2, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v0, v4, v3 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v1, s9 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v3, s1 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v4, s8 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v16, s0 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v17, s24 |
| ; GFX67-NEXT: v_cvt_f32_f16_e32 v18, s16 |
| ; GFX67-NEXT: v_add_f32_e32 v1, v3, v1 |
| ; GFX67-NEXT: v_add_f32_e32 v3, v16, v4 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v0, v0 |
| ; GFX67-NEXT: v_add_f32_e32 v4, v18, v17 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v4, v4 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v3, v3 |
| ; GFX67-NEXT: v_cvt_f16_f32_e32 v1, v1 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v16, 16, v0 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v0, 16, v4 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v2, 16, v2 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v4, 16, v12 |
| ; GFX67-NEXT: v_lshlrev_b32_e32 v9, 16, v9 |
| ; GFX67-NEXT: v_or_b32_e32 v0, v3, v0 |
| ; GFX67-NEXT: v_or_b32_e32 v1, v1, v16 |
| ; GFX67-NEXT: v_or_b32_e32 v2, v15, v2 |
| ; GFX67-NEXT: v_or_b32_e32 v3, v14, v13 |
| ; GFX67-NEXT: v_or_b32_e32 v4, v11, v4 |
| ; GFX67-NEXT: v_or_b32_e32 v5, v10, v5 |
| ; GFX67-NEXT: v_or_b32_e32 v6, v6, v9 |
| ; GFX67-NEXT: v_or_b32_e32 v7, v8, v7 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v16f16: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s3, 0 |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, s3 |
| ; GFX8-NEXT: s_load_dwordx8 s[8:15], s[2:3], 0x40 |
| ; GFX8-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x0 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s15 |
| ; GFX8-NEXT: v_add_f16_e32 v7, s7, v0 |
| ; GFX8-NEXT: s_lshr_b32 s15, s15, 16 |
| ; GFX8-NEXT: s_lshr_b32 s7, s7, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s15 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s7 |
| ; GFX8-NEXT: v_add_f16_sdwa v8, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s14 |
| ; GFX8-NEXT: v_add_f16_e32 v6, s6, v0 |
| ; GFX8-NEXT: s_lshr_b32 s7, s14, 16 |
| ; GFX8-NEXT: s_lshr_b32 s6, s6, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s7 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s6 |
| ; GFX8-NEXT: v_add_f16_sdwa v9, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s13 |
| ; GFX8-NEXT: v_add_f16_e32 v5, s5, v0 |
| ; GFX8-NEXT: s_lshr_b32 s6, s13, 16 |
| ; GFX8-NEXT: s_lshr_b32 s5, s5, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s6 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX8-NEXT: v_add_f16_sdwa v10, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s12 |
| ; GFX8-NEXT: v_add_f16_e32 v4, s4, v0 |
| ; GFX8-NEXT: s_lshr_b32 s5, s12, 16 |
| ; GFX8-NEXT: s_lshr_b32 s4, s4, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s5 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s4 |
| ; GFX8-NEXT: v_add_f16_sdwa v11, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s11 |
| ; GFX8-NEXT: v_add_f16_e32 v3, s3, v0 |
| ; GFX8-NEXT: s_lshr_b32 s4, s11, 16 |
| ; GFX8-NEXT: s_lshr_b32 s3, s3, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s3 |
| ; GFX8-NEXT: v_add_f16_sdwa v12, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s10 |
| ; GFX8-NEXT: v_add_f16_e32 v2, s2, v0 |
| ; GFX8-NEXT: s_lshr_b32 s3, s10, 16 |
| ; GFX8-NEXT: s_lshr_b32 s2, s2, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s3 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s2 |
| ; GFX8-NEXT: v_add_f16_sdwa v13, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s9 |
| ; GFX8-NEXT: v_add_f16_e32 v1, s1, v0 |
| ; GFX8-NEXT: s_lshr_b32 s2, s9, 16 |
| ; GFX8-NEXT: s_lshr_b32 s1, s1, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s2 |
| ; GFX8-NEXT: v_mov_b32_e32 v14, s1 |
| ; GFX8-NEXT: v_add_f16_sdwa v14, v14, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s8 |
| ; GFX8-NEXT: v_add_f16_e32 v0, s0, v0 |
| ; GFX8-NEXT: s_lshr_b32 s1, s8, 16 |
| ; GFX8-NEXT: s_lshr_b32 s0, s0, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v15, s1 |
| ; GFX8-NEXT: v_mov_b32_e32 v16, s0 |
| ; GFX8-NEXT: v_add_f16_sdwa v15, v16, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GFX8-NEXT: v_or_b32_e32 v0, v0, v15 |
| ; GFX8-NEXT: v_or_b32_e32 v1, v1, v14 |
| ; GFX8-NEXT: v_or_b32_e32 v2, v2, v13 |
| ; GFX8-NEXT: v_or_b32_e32 v3, v3, v12 |
| ; GFX8-NEXT: v_or_b32_e32 v4, v4, v11 |
| ; GFX8-NEXT: v_or_b32_e32 v5, v5, v10 |
| ; GFX8-NEXT: v_or_b32_e32 v6, v6, v9 |
| ; GFX8-NEXT: v_or_b32_e32 v7, v7, v8 |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v16f16: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s3, 0 |
| ; GFX9-NEXT: s_mov_b32 s1, s3 |
| ; GFX9-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x40 |
| ; GFX9-NEXT: s_load_dwordx8 s[12:19], s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s4 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s5 |
| ; GFX9-NEXT: v_mov_b32_e32 v2, s6 |
| ; GFX9-NEXT: v_mov_b32_e32 v3, s7 |
| ; GFX9-NEXT: v_mov_b32_e32 v4, s8 |
| ; GFX9-NEXT: v_mov_b32_e32 v5, s9 |
| ; GFX9-NEXT: v_mov_b32_e32 v6, s10 |
| ; GFX9-NEXT: v_mov_b32_e32 v7, s11 |
| ; GFX9-NEXT: v_pk_add_f16 v0, s12, v0 |
| ; GFX9-NEXT: v_pk_add_f16 v1, s13, v1 |
| ; GFX9-NEXT: v_pk_add_f16 v2, s14, v2 |
| ; GFX9-NEXT: v_pk_add_f16 v3, s15, v3 |
| ; GFX9-NEXT: v_pk_add_f16 v4, s16, v4 |
| ; GFX9-NEXT: v_pk_add_f16 v5, s17, v5 |
| ; GFX9-NEXT: v_pk_add_f16 v6, s18, v6 |
| ; GFX9-NEXT: v_pk_add_f16 v7, s19, v7 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <16 x half>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <16 x half>, ptr addrspace(6) %p0 |
| %r1 = load <16 x half>, ptr addrspace(6) %gep1 |
| %r = fadd <16 x half> %r0, %r1 |
| ret <16 x half> %r |
| } |
| |
| ; define amdgpu_vs bfloat @load_bf16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; %gep1 = getelementptr inbounds bfloat, ptr addrspace(6) %p1, i32 2 |
| ; %r0 = load bfloat, ptr addrspace(6) %p0 |
| ; %r1 = load bfloat, ptr addrspace(6) %gep1 |
| ; %r = fadd bfloat %r0, %r1 |
| ; ret bfloat %r |
| ; } |
| |
| define amdgpu_vs <2 x bfloat> @load_v2bf16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 { |
| ; GFX67-LABEL: load_v2bf16: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s2, s1 |
| ; GFX67-NEXT: s_mov_b32 s3, 0 |
| ; GFX67-NEXT: s_mov_b32 s1, s3 |
| ; GFX67-NEXT: s_load_dword s2, s[2:3], 0x2 |
| ; GFX67-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: s_lshl_b32 s1, s2, 16 |
| ; GFX67-NEXT: v_mov_b32_e32 v0, s1 |
| ; GFX67-NEXT: s_and_b32 s1, s2, 0xffff0000 |
| ; GFX67-NEXT: s_lshl_b32 s3, s0, 16 |
| ; GFX67-NEXT: s_and_b32 s0, s0, 0xffff0000 |
| ; GFX67-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX67-NEXT: v_add_f32_e32 v1, s0, v1 |
| ; GFX67-NEXT: v_add_f32_e32 v0, s3, v0 |
| ; GFX67-NEXT: v_lshrrev_b32_e32 v1, 16, v1 |
| ; GFX67-NEXT: v_lshr_b64 v[0:1], v[0:1], 16 |
| ; GFX67-NEXT: v_readfirstlane_b32 s0, v0 |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_v2bf16: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s2, s1 |
| ; GFX8-NEXT: s_mov_b32 s1, 0 |
| ; GFX8-NEXT: s_mov_b32 s3, s1 |
| ; GFX8-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX8-NEXT: s_load_dword s1, s[2:3], 0x8 |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: s_lshl_b32 s2, s0, 16 |
| ; GFX8-NEXT: s_lshl_b32 s3, s1, 16 |
| ; GFX8-NEXT: v_mov_b32_e32 v0, s3 |
| ; GFX8-NEXT: v_add_f32_e32 v0, s2, v0 |
| ; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1 |
| ; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0 |
| ; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1 |
| ; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0 |
| ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 |
| ; GFX8-NEXT: s_and_b32 s1, s1, 0xffff0000 |
| ; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc |
| ; GFX8-NEXT: s_and_b32 s0, s0, 0xffff0000 |
| ; GFX8-NEXT: v_mov_b32_e32 v1, s1 |
| ; GFX8-NEXT: v_add_f32_e32 v1, s0, v1 |
| ; GFX8-NEXT: v_bfe_u32 v2, v1, 16, 1 |
| ; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v1 |
| ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2 |
| ; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v1 |
| ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 |
| ; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc |
| ; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1 |
| ; GFX8-NEXT: v_lshrrev_b64 v[0:1], 16, v[0:1] |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_v2bf16: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_mov_b32 s2, s1 |
| ; GFX9-NEXT: s_mov_b32 s1, 0 |
| ; GFX9-NEXT: s_mov_b32 s3, s1 |
| ; GFX9-NEXT: s_load_dword s4, s[0:1], 0x0 |
| ; GFX9-NEXT: s_load_dword s5, s[2:3], 0x8 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: s_and_b32 s0, s4, 0xffff0000 |
| ; GFX9-NEXT: s_and_b32 s1, s5, 0xffff0000 |
| ; GFX9-NEXT: v_mov_b32_e32 v0, s1 |
| ; GFX9-NEXT: v_add_f32_e32 v0, s0, v0 |
| ; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1 |
| ; GFX9-NEXT: v_add_u32_e32 v1, v1, v0 |
| ; GFX9-NEXT: v_add_u32_e32 v1, 0x7fff, v1 |
| ; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0 |
| ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 |
| ; GFX9-NEXT: s_lshl_b32 s0, s5, 16 |
| ; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc |
| ; GFX9-NEXT: s_lshl_b32 s1, s4, 16 |
| ; GFX9-NEXT: v_mov_b32_e32 v1, s0 |
| ; GFX9-NEXT: v_add_f32_e32 v1, s1, v1 |
| ; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1 |
| ; GFX9-NEXT: v_add_u32_e32 v2, v2, v1 |
| ; GFX9-NEXT: v_add_u32_e32 v2, 0x7fff, v2 |
| ; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v1 |
| ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 |
| ; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc |
| ; GFX9-NEXT: v_mov_b32_e32 v2, 0xffff |
| ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0 |
| ; GFX9-NEXT: v_and_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 |
| ; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v1 |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds <2 x bfloat>, ptr addrspace(6) %p1, i32 2 |
| %r0 = load <2 x bfloat>, ptr addrspace(6) %p0 |
| %r1 = load <2 x bfloat>, ptr addrspace(6) %gep1 |
| %r = fadd <2 x bfloat> %r0, %r1 |
| ret <2 x bfloat> %r |
| } |
| |
| define amdgpu_vs i32 @load_i32_max_gfx6_offset(ptr addrspace(6) inreg %ptr) #0 { |
| ; GFX67-LABEL: load_i32_max_gfx6_offset: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_mov_b32 s1, 0 |
| ; GFX67-NEXT: s_load_dword s0, s[0:1], 0xff |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX89-LABEL: load_i32_max_gfx6_offset: |
| ; GFX89: ; %bb.0: |
| ; GFX89-NEXT: s_mov_b32 s1, 0 |
| ; GFX89-NEXT: s_load_dword s0, s[0:1], 0x3fc |
| ; GFX89-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX89-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds i32, ptr addrspace(6) %ptr, i32 255 |
| %ld = load i32, ptr addrspace(6) %gep1 |
| ret i32 %ld |
| } |
| |
| define amdgpu_vs i32 @load_i32_max_gfx6_offset_p1(ptr addrspace(6) inreg %ptr) #0 { |
| ; GFX67-LABEL: load_i32_max_gfx6_offset_p1: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_addk_i32 s0, 0x400 |
| ; GFX67-NEXT: s_mov_b32 s1, 0 |
| ; GFX67-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX89-LABEL: load_i32_max_gfx6_offset_p1: |
| ; GFX89: ; %bb.0: |
| ; GFX89-NEXT: s_mov_b32 s1, 0 |
| ; GFX89-NEXT: s_load_dword s0, s[0:1], 0x400 |
| ; GFX89-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX89-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds i32, ptr addrspace(6) %ptr, i32 256 |
| %ld = load i32, ptr addrspace(6) %gep1 |
| ret i32 %ld |
| } |
| |
| define amdgpu_vs i32 @load_i32_max_gfx8_offset(ptr addrspace(6) inreg %ptr) #0 { |
| ; GFX67-LABEL: load_i32_max_gfx8_offset: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_add_i32 s0, s0, 0xfffff |
| ; GFX67-NEXT: s_mov_b32 s1, 0 |
| ; GFX67-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX8-LABEL: load_i32_max_gfx8_offset: |
| ; GFX8: ; %bb.0: |
| ; GFX8-NEXT: s_mov_b32 s1, 0 |
| ; GFX8-NEXT: s_load_dword s0, s[0:1], 0xfffff |
| ; GFX8-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX8-NEXT: ; return to shader part epilog |
| ; |
| ; GFX9-LABEL: load_i32_max_gfx8_offset: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_add_i32 s0, s0, 0xfffff |
| ; GFX9-NEXT: s_mov_b32 s1, 0 |
| ; GFX9-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX9-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX9-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds i8, ptr addrspace(6) %ptr, i32 1048575 |
| %ld = load i32, ptr addrspace(6) %gep1 |
| ret i32 %ld |
| } |
| |
| define amdgpu_vs i32 @load_i32_max_gfx8_offset_p1(ptr addrspace(6) inreg %ptr) #0 { |
| ; GFX67-LABEL: load_i32_max_gfx8_offset_p1: |
| ; GFX67: ; %bb.0: |
| ; GFX67-NEXT: s_add_i32 s0, s0, 0x100000 |
| ; GFX67-NEXT: s_mov_b32 s1, 0 |
| ; GFX67-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX67-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX67-NEXT: ; return to shader part epilog |
| ; |
| ; GFX89-LABEL: load_i32_max_gfx8_offset_p1: |
| ; GFX89: ; %bb.0: |
| ; GFX89-NEXT: s_add_i32 s0, s0, 0x100000 |
| ; GFX89-NEXT: s_mov_b32 s1, 0 |
| ; GFX89-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; GFX89-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX89-NEXT: ; return to shader part epilog |
| %gep1 = getelementptr inbounds i8, ptr addrspace(6) %ptr, i32 1048576 |
| %ld = load i32, ptr addrspace(6) %gep1 |
| ret i32 %ld |
| } |
| |
| declare float @llvm.amdgcn.interp.mov(i32, i32, i32, i32) #6 |
| declare <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32, float, <8 x i32>, <4 x i32>, i1, i32, i32) #7 |
| declare float @llvm.amdgcn.struct.ptr.buffer.load.format.f32(ptr addrspace(8), i32, i32, i32, i32) #7 |
| |
| !0 = !{} |
| |
| attributes #0 = { nounwind } |
| attributes #1 = { nounwind "amdgpu-32bit-address-high-bits"="0" } |
| attributes #2 = { nounwind "amdgpu-32bit-address-high-bits"="1" } |
| attributes #3 = { nounwind "amdgpu-32bit-address-high-bits"="0xffff8000" } |
| attributes #4 = { nounwind "amdgpu-32bit-address-high-bits"="0xfffffff0" } |
| attributes #5 = { "InitialPSInputAddr"="45175" } |
| attributes #6 = { nounwind readnone speculatable } |
| attributes #7 = { nounwind memory(argmem: read) } |
| attributes #8 = { nounwind readnone } |