blob: fe8c90ee7b6864f08e31b00548f2a0799b5f672e [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 < %s | FileCheck -check-prefixes=GCN %s
define amdgpu_kernel void @uniform_build_vector(i64 %in, ptr addrspace(1) %out) {
; GCN-LABEL: uniform_build_vector:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_lshr_b64 s[4:5], s[0:1], 1
; GCN-NEXT: s_mov_b32 s5, 0
; GCN-NEXT: s_mov_b32 s6, s5
; GCN-NEXT: s_mov_b32 s7, s5
; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v1, 0
; GCN-NEXT: ; sched_barrier mask(0x00000000)
; GCN-NEXT: v_and_b32_e32 v0, 1, v0
; GCN-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GCN-NEXT: global_store_dword v1, v0, s[2:3]
; GCN-NEXT: s_endpgm
entry:
%shifted = lshr i64 %in, 1
%trunc = trunc i64 %shifted to i32
%insert = insertelement <4 x i32> zeroinitializer, i32 %trunc, i64 0
%load = tail call <4 x i32> @llvm.amdgcn.raw.buffer.load.v4i32(<4 x i32> %insert, i32 0, i32 0, i32 0)
tail call void @llvm.amdgcn.sched.barrier(i32 0)
%extract = extractelement <4 x i32> %load, i64 0
%and = and i32 %extract, 1
%convert = sitofp i32 %and to float
store float %convert, ptr addrspace(1) %out
ret void
}
; Function Attrs: convergent nocallback nofree nounwind willreturn
declare void @llvm.amdgcn.sched.barrier(i32 immarg) #0
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(read)
declare <4 x i32> @llvm.amdgcn.raw.buffer.load.v4i32(<4 x i32>, i32, i32, i32 immarg) #1