blob: 7fcb29d3670068d75a03dd89c117e7a4a09bc94e [file] [log] [blame] [edit]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn < %s | FileCheck -check-prefix=GCN %s
define amdgpu_kernel void @alignbit_shr_pat(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1, i32 %arg2) {
; GCN-LABEL: alignbit_shr_pat:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-NEXT: s_load_dword s8, s[4:5], 0xd
; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_mov_b32 s4, s0
; GCN-NEXT: s_mov_b32 s5, s1
; GCN-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
; GCN-NEXT: s_mov_b32 s4, s2
; GCN-NEXT: s_mov_b32 s5, s3
; GCN-NEXT: s_and_b32 s0, s8, 31
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_lshr_b64 v[0:1], v[0:1], s0
; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GCN-NEXT: s_endpgm
bb:
%tmp = load i64, ptr addrspace(1) %arg, align 8
%tmp3 = and i32 %arg2, 31
%tmp4 = zext i32 %tmp3 to i64
%tmp5 = lshr i64 %tmp, %tmp4
%tmp6 = trunc i64 %tmp5 to i32
store i32 %tmp6, ptr addrspace(1) %arg1, align 4
ret void
}
define amdgpu_kernel void @alignbit_shr_pat_v(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1) {
; GCN-LABEL: alignbit_shr_pat_v:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_mov_b32 s6, 0
; GCN-NEXT: v_lshlrev_b32_e32 v1, 3, v0
; GCN-NEXT: v_mov_b32_e32 v2, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_mov_b64 s[4:5], s[0:1]
; GCN-NEXT: buffer_load_dwordx2 v[3:4], v[1:2], s[4:7], 0 addr64
; GCN-NEXT: v_lshlrev_b32_e32 v1, 2, v0
; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
; GCN-NEXT: buffer_load_dword v0, v[1:2], s[4:7], 0 addr64
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_alignbit_b32 v0, v4, v3, v0
; GCN-NEXT: buffer_store_dword v0, v[1:2], s[4:7], 0 addr64
; GCN-NEXT: s_endpgm
bb:
%tid = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep1 = getelementptr inbounds i64, ptr addrspace(1) %arg, i32 %tid
%tmp = load i64, ptr addrspace(1) %gep1, align 8
%gep2 = getelementptr inbounds i32, ptr addrspace(1) %arg1, i32 %tid
%amt = load i32, ptr addrspace(1) %gep2, align 4
%tmp3 = and i32 %amt, 31
%tmp4 = zext i32 %tmp3 to i64
%tmp5 = lshr i64 %tmp, %tmp4
%tmp6 = trunc i64 %tmp5 to i32
store i32 %tmp6, ptr addrspace(1) %gep2, align 4
ret void
}
define amdgpu_kernel void @alignbit_shr_pat_wrong_and30(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1, i32 %arg2) {
; GCN-LABEL: alignbit_shr_pat_wrong_and30:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-NEXT: s_load_dword s8, s[4:5], 0xd
; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_mov_b32 s4, s0
; GCN-NEXT: s_mov_b32 s5, s1
; GCN-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
; GCN-NEXT: s_mov_b32 s4, s2
; GCN-NEXT: s_mov_b32 s5, s3
; GCN-NEXT: s_and_b32 s0, s8, 30
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_lshr_b64 v[0:1], v[0:1], s0
; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GCN-NEXT: s_endpgm
bb:
%tmp = load i64, ptr addrspace(1) %arg, align 8
%tmp3 = and i32 %arg2, 30
%tmp4 = zext i32 %tmp3 to i64
%tmp5 = lshr i64 %tmp, %tmp4
%tmp6 = trunc i64 %tmp5 to i32
store i32 %tmp6, ptr addrspace(1) %arg1, align 4
ret void
}
define amdgpu_kernel void @alignbit_shr_pat_wrong_and63(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1, i32 %arg2) {
; GCN-LABEL: alignbit_shr_pat_wrong_and63:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-NEXT: s_load_dword s8, s[4:5], 0xd
; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_mov_b32 s4, s0
; GCN-NEXT: s_mov_b32 s5, s1
; GCN-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
; GCN-NEXT: s_mov_b32 s4, s2
; GCN-NEXT: s_mov_b32 s5, s3
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_lshr_b64 v[0:1], v[0:1], s8
; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GCN-NEXT: s_endpgm
bb:
%tmp = load i64, ptr addrspace(1) %arg, align 8
%tmp3 = and i32 %arg2, 63
%tmp4 = zext i32 %tmp3 to i64
%tmp5 = lshr i64 %tmp, %tmp4
%tmp6 = trunc i64 %tmp5 to i32
store i32 %tmp6, ptr addrspace(1) %arg1, align 4
ret void
}
define amdgpu_kernel void @alignbit_shr_pat_const30(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1) {
; GCN-LABEL: alignbit_shr_pat_const30:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_mov_b32 s4, s0
; GCN-NEXT: s_mov_b32 s5, s1
; GCN-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
; GCN-NEXT: s_mov_b32 s4, s2
; GCN-NEXT: s_mov_b32 s5, s3
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_lshr_b64 v[0:1], v[0:1], 30
; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GCN-NEXT: s_endpgm
bb:
%tmp = load i64, ptr addrspace(1) %arg, align 8
%tmp5 = lshr i64 %tmp, 30
%tmp6 = trunc i64 %tmp5 to i32
store i32 %tmp6, ptr addrspace(1) %arg1, align 4
ret void
}
define amdgpu_kernel void @alignbit_shr_pat_wrong_const33(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1) {
; GCN-LABEL: alignbit_shr_pat_wrong_const33:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_mov_b32 s4, s2
; GCN-NEXT: s_mov_b32 s5, s3
; GCN-NEXT: s_mov_b32 s2, s6
; GCN-NEXT: s_mov_b32 s3, s7
; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0 offset:4
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_lshrrev_b32_e32 v0, 1, v0
; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GCN-NEXT: s_endpgm
bb:
%tmp = load i64, ptr addrspace(1) %arg, align 8
%tmp5 = lshr i64 %tmp, 33
%tmp6 = trunc i64 %tmp5 to i32
store i32 %tmp6, ptr addrspace(1) %arg1, align 4
ret void
}
declare i32 @llvm.amdgcn.workitem.id.x() #0
attributes #0 = { nounwind readnone speculatable }