blob: a8560e8498c4dc1674879584a12e718dffce7873 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefix=SI %s
define amdgpu_kernel void @test_i64_vreg(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %inA, ptr addrspace(1) noalias %inB) {
; SI-LABEL: test_i64_vreg:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; SI-NEXT: s_mov_b32 s11, 0xf000
; SI-NEXT: s_mov_b32 s14, 0
; SI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: s_mov_b32 s15, s11
; SI-NEXT: s_mov_b64 s[6:7], s[14:15]
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 s[12:13], s[2:3]
; SI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[12:15], 0 addr64
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[4:7], 0 addr64
; SI-NEXT: s_mov_b32 s10, -1
; SI-NEXT: s_mov_b32 s8, s0
; SI-NEXT: s_mov_b32 s9, s1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, v2, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
; SI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() readnone
%a_ptr = getelementptr i64, ptr addrspace(1) %inA, i32 %tid
%b_ptr = getelementptr i64, ptr addrspace(1) %inB, i32 %tid
%a = load i64, ptr addrspace(1) %a_ptr
%b = load i64, ptr addrspace(1) %b_ptr
%result = add i64 %a, %b
store i64 %result, ptr addrspace(1) %out
ret void
}
; Check that the SGPR add operand is correctly moved to a VGPR.
define amdgpu_kernel void @sgpr_operand(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in, ptr addrspace(1) noalias %in_bar, i64 %a) {
; SI-LABEL: sgpr_operand:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xf
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_load_dwordx2 s[6:7], s[2:3], 0x0
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_add_u32 s4, s6, s4
; SI-NEXT: s_addc_u32 s5, s7, s5
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
%foo = load i64, ptr addrspace(1) %in, align 8
%result = add i64 %foo, %a
store i64 %result, ptr addrspace(1) %out
ret void
}
; Swap the arguments. Check that the SGPR -> VGPR copy works with the
; SGPR as other operand.
define amdgpu_kernel void @sgpr_operand_reversed(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in, i64 %a) {
; SI-LABEL: sgpr_operand_reversed:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_load_dwordx2 s[6:7], s[2:3], 0x0
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_add_u32 s4, s4, s6
; SI-NEXT: s_addc_u32 s5, s5, s7
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
%foo = load i64, ptr addrspace(1) %in, align 8
%result = add i64 %a, %foo
store i64 %result, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @test_v2i64_sreg(ptr addrspace(1) noalias %out, <2 x i64> %a, <2 x i64> %b) {
; SI-LABEL: test_v2i64_sreg:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0xd
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_add_u32 s4, s10, s14
; SI-NEXT: s_addc_u32 s5, s11, s15
; SI-NEXT: s_add_u32 s6, s8, s12
; SI-NEXT: s_addc_u32 s7, s9, s13
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: v_mov_b32_e32 v3, s5
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: v_mov_b32_e32 v1, s7
; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; SI-NEXT: s_endpgm
%result = add <2 x i64> %a, %b
store <2 x i64> %result, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @test_v2i64_vreg(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %inA, ptr addrspace(1) noalias %inB) {
; SI-LABEL: test_v2i64_vreg:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; SI-NEXT: s_mov_b32 s11, 0xf000
; SI-NEXT: s_mov_b32 s14, 0
; SI-NEXT: v_lshlrev_b32_e32 v4, 4, v0
; SI-NEXT: v_mov_b32_e32 v5, 0
; SI-NEXT: s_mov_b32 s15, s11
; SI-NEXT: s_mov_b64 s[6:7], s[14:15]
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 s[12:13], s[2:3]
; SI-NEXT: buffer_load_dwordx4 v[0:3], v[4:5], s[12:15], 0 addr64
; SI-NEXT: buffer_load_dwordx4 v[4:7], v[4:5], s[4:7], 0 addr64
; SI-NEXT: s_mov_b32 s10, -1
; SI-NEXT: s_mov_b32 s8, s0
; SI-NEXT: s_mov_b32 s9, s1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v2, vcc, v2, v6
; SI-NEXT: v_addc_u32_e32 v3, vcc, v3, v7, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v4
; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0
; SI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() readnone
%a_ptr = getelementptr <2 x i64>, ptr addrspace(1) %inA, i32 %tid
%b_ptr = getelementptr <2 x i64>, ptr addrspace(1) %inB, i32 %tid
%a = load <2 x i64>, ptr addrspace(1) %a_ptr
%b = load <2 x i64>, ptr addrspace(1) %b_ptr
%result = add <2 x i64> %a, %b
store <2 x i64> %result, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @trunc_i64_add_to_i32(ptr addrspace(1) %out, i32, i64 %a, i32, i64 %b) {
; SI-LABEL: trunc_i64_add_to_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dword s2, s[4:5], 0xd
; SI-NEXT: s_load_dword s6, s[4:5], 0x11
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_add_i32 s4, s6, s2
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
%add = add i64 %b, %a
%trunc = trunc i64 %add to i32
store i32 %trunc, ptr addrspace(1) %out, align 8
ret void
}