blob: 92e532b6cf34022dc02266b1fed941d94faf9c33 [file]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -mattr=+unaligned-access-mode < %s | FileCheck %s
define amdgpu_ps void @uniform_load_i32(ptr addrspace(1) inreg %ptr0, ptr addrspace(1) inreg %ptr1, ptr addrspace(1) inreg %ptr2) {
; CHECK-LABEL: uniform_load_i32:
; CHECK: ; %bb.0:
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: global_load_dword v2, v0, s[2:3]
; CHECK-NEXT: v_readfirstlane_b32 s0, v1
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s1, v2
; CHECK-NEXT: s_add_i32 s0, s0, s1
; CHECK-NEXT: v_mov_b32_e32 v1, s0
; CHECK-NEXT: global_store_dword v0, v1, s[4:5]
; CHECK-NEXT: s_endpgm
%load0 = load volatile i32, ptr addrspace(1) %ptr0
%load1 = load i32, ptr addrspace(1) %ptr1, align 1
%sum = add i32 %load0, %load1
store i32 %sum, ptr addrspace(1) %ptr2
ret void
}
define amdgpu_ps void @uniform_load_v2i32(ptr addrspace(1) inreg %ptr0, ptr addrspace(1) inreg %ptr1) {
; CHECK-LABEL: uniform_load_v2i32:
; CHECK: ; %bb.0:
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: global_load_dwordx2 v[0:1], v2, s[0:1] glc dlc
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: v_readfirstlane_b32 s1, v1
; CHECK-NEXT: s_add_i32 s0, s0, s1
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: global_store_dword v2, v0, s[2:3]
; CHECK-NEXT: s_endpgm
%load = load volatile <2 x i32>, ptr addrspace(1) %ptr0
%elt0 = extractelement <2 x i32> %load, i32 0
%elt1 = extractelement <2 x i32> %load, i32 1
%sum = add i32 %elt0, %elt1
store i32 %sum, ptr addrspace(1) %ptr1
ret void
}
define amdgpu_ps void @uniform_load_v3i32(ptr addrspace(1) inreg %ptr0, ptr addrspace(1) inreg %ptr1) {
; CHECK-LABEL: uniform_load_v3i32:
; CHECK: ; %bb.0:
; CHECK-NEXT: v_mov_b32_e32 v3, 0
; CHECK-NEXT: global_load_dwordx3 v[0:2], v3, s[0:1]
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: v_readfirstlane_b32 s1, v1
; CHECK-NEXT: v_readfirstlane_b32 s4, v2
; CHECK-NEXT: s_add_i32 s0, s0, s1
; CHECK-NEXT: s_add_i32 s0, s0, s4
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: global_store_dword v3, v0, s[2:3]
; CHECK-NEXT: s_endpgm
%load = load <3 x i32>, ptr addrspace(1) %ptr0, align 2
%elt0 = extractelement <3 x i32> %load, i32 0
%elt1 = extractelement <3 x i32> %load, i32 1
%elt2 = extractelement <3 x i32> %load, i32 2
%sum0 = add i32 %elt0, %elt1
%sum = add i32 %sum0, %elt2
store i32 %sum, ptr addrspace(1) %ptr1
ret void
}
define amdgpu_ps void @uniform_load_v4i32(ptr addrspace(1) inreg %ptr0, ptr addrspace(1) inreg %ptr1) {
; CHECK-LABEL: uniform_load_v4i32:
; CHECK: ; %bb.0:
; CHECK-NEXT: v_mov_b32_e32 v4, 0
; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[0:1] glc dlc
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: v_readfirstlane_b32 s1, v1
; CHECK-NEXT: v_readfirstlane_b32 s4, v2
; CHECK-NEXT: v_readfirstlane_b32 s5, v3
; CHECK-NEXT: s_add_i32 s0, s0, s1
; CHECK-NEXT: s_add_i32 s0, s0, s4
; CHECK-NEXT: s_add_i32 s0, s0, s5
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: global_store_dword v4, v0, s[2:3]
; CHECK-NEXT: s_endpgm
%load = load volatile <4 x i32>, ptr addrspace(1) %ptr0
%elt0 = extractelement <4 x i32> %load, i32 0
%elt1 = extractelement <4 x i32> %load, i32 1
%elt2 = extractelement <4 x i32> %load, i32 2
%elt3 = extractelement <4 x i32> %load, i32 3
%sum0 = add i32 %elt0, %elt1
%sum1 = add i32 %sum0, %elt2
%sum = add i32 %sum1, %elt3
store i32 %sum, ptr addrspace(1) %ptr1
ret void
}