blob: 7367e48809992e332cceaece3a57a384f22ab6a1 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -misched-cluster=0 < %s | FileCheck -check-prefix=GCN %s
define amdgpu_kernel void @test_sched_group_barrier() #0 {
; GCN-LABEL: test_sched_group_barrier:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: ; sched_group_barrier mask(0x00000000) size(1) SyncID(2)
; GCN-NEXT: ; sched_group_barrier mask(0x00000001) size(2) SyncID(4)
; GCN-NEXT: ; sched_group_barrier mask(0x00000004) size(8) SyncID(16)
; GCN-NEXT: ; sched_group_barrier mask(0x0000000F) size(10000) SyncID(-1)
; GCN-NEXT: s_endpgm
entry:
call void @llvm.amdgcn.sched.group.barrier(i32 0, i32 1, i32 2) #1
call void @llvm.amdgcn.sched.group.barrier(i32 1, i32 2, i32 4) #1
call void @llvm.amdgcn.sched.group.barrier(i32 4, i32 8, i32 16) #1
call void @llvm.amdgcn.sched.group.barrier(i32 15, i32 10000, i32 -1) #1
ret void
}
define amdgpu_kernel void @test_sched_group_barrier_simple_pipeline(<32 x i32> addrspace(1)* noalias %in, <32 x i32> addrspace(1)* noalias %out) {
; GCN-LABEL: test_sched_group_barrier_simple_pipeline:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; GCN-NEXT: v_lshlrev_b32_e32 v32, 7, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_load_dwordx4 v[0:3], v32, s[0:1]
; GCN-NEXT: global_load_dwordx4 v[4:7], v32, s[0:1] offset:16
; GCN-NEXT: global_load_dwordx4 v[8:11], v32, s[0:1] offset:32
; GCN-NEXT: global_load_dwordx4 v[12:15], v32, s[0:1] offset:48
; GCN-NEXT: global_load_dwordx4 v[16:19], v32, s[0:1] offset:64
; GCN-NEXT: global_load_dwordx4 v[20:23], v32, s[0:1] offset:80
; GCN-NEXT: global_load_dwordx4 v[24:27], v32, s[0:1] offset:96
; GCN-NEXT: global_load_dwordx4 v[28:31], v32, s[0:1] offset:112
; GCN-NEXT: ; sched_group_barrier mask(0x00000020) size(8) SyncID(0)
; GCN-NEXT: s_waitcnt vmcnt(7)
; GCN-NEXT: v_mul_lo_u32 v3, v3, v3
; GCN-NEXT: v_mul_lo_u32 v2, v2, v2
; GCN-NEXT: v_mul_lo_u32 v1, v1, v1
; GCN-NEXT: v_mul_lo_u32 v0, v0, v0
; GCN-NEXT: s_waitcnt vmcnt(6)
; GCN-NEXT: v_mul_lo_u32 v7, v7, v7
; GCN-NEXT: v_mul_lo_u32 v6, v6, v6
; GCN-NEXT: v_mul_lo_u32 v5, v5, v5
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_mul_lo_u32 v31, v31, v31
; GCN-NEXT: v_mul_lo_u32 v30, v30, v30
; GCN-NEXT: v_mul_lo_u32 v29, v29, v29
; GCN-NEXT: v_mul_lo_u32 v28, v28, v28
; GCN-NEXT: v_mul_lo_u32 v4, v4, v4
; GCN-NEXT: v_mul_lo_u32 v11, v11, v11
; GCN-NEXT: v_mul_lo_u32 v10, v10, v10
; GCN-NEXT: v_mul_lo_u32 v9, v9, v9
; GCN-NEXT: v_mul_lo_u32 v8, v8, v8
; GCN-NEXT: v_mul_lo_u32 v15, v15, v15
; GCN-NEXT: v_mul_lo_u32 v14, v14, v14
; GCN-NEXT: v_mul_lo_u32 v13, v13, v13
; GCN-NEXT: v_mul_lo_u32 v12, v12, v12
; GCN-NEXT: v_mul_lo_u32 v19, v19, v19
; GCN-NEXT: v_mul_lo_u32 v18, v18, v18
; GCN-NEXT: v_mul_lo_u32 v17, v17, v17
; GCN-NEXT: v_mul_lo_u32 v16, v16, v16
; GCN-NEXT: v_mul_lo_u32 v23, v23, v23
; GCN-NEXT: v_mul_lo_u32 v22, v22, v22
; GCN-NEXT: v_mul_lo_u32 v21, v21, v21
; GCN-NEXT: v_mul_lo_u32 v20, v20, v20
; GCN-NEXT: v_mul_lo_u32 v27, v27, v27
; GCN-NEXT: v_mul_lo_u32 v26, v26, v26
; GCN-NEXT: v_mul_lo_u32 v25, v25, v25
; GCN-NEXT: v_mul_lo_u32 v24, v24, v24
; GCN-NEXT: ; sched_group_barrier mask(0x00000002) size(30) SyncID(0)
; GCN-NEXT: global_store_dwordx4 v32, v[28:31], s[2:3] offset:112
; GCN-NEXT: global_store_dwordx4 v32, v[24:27], s[2:3] offset:96
; GCN-NEXT: global_store_dwordx4 v32, v[20:23], s[2:3] offset:80
; GCN-NEXT: global_store_dwordx4 v32, v[16:19], s[2:3] offset:64
; GCN-NEXT: global_store_dwordx4 v32, v[12:15], s[2:3] offset:48
; GCN-NEXT: global_store_dwordx4 v32, v[8:11], s[2:3] offset:32
; GCN-NEXT: global_store_dwordx4 v32, v[4:7], s[2:3] offset:16
; GCN-NEXT: global_store_dwordx4 v32, v[0:3], s[2:3]
; GCN-NEXT: ; sched_group_barrier mask(0x00000040) size(8) SyncID(0)
; GCN-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() #2
%gep1 = getelementptr <32 x i32>, <32 x i32> addrspace(1)* %in, i32 %tid
%load = load <32 x i32>, <32 x i32> addrspace(1)* %gep1
%mul = mul <32 x i32> %load, %load
%gep2 = getelementptr <32 x i32>, <32 x i32> addrspace(1)* %out, i32 %tid
store <32 x i32> %mul, <32 x i32> addrspace(1)* %gep2
; VMEM read
call void @llvm.amdgcn.sched.group.barrier(i32 32, i32 8, i32 0)
; VALU
call void @llvm.amdgcn.sched.group.barrier(i32 2, i32 30, i32 0)
; VMEM write
call void @llvm.amdgcn.sched.group.barrier(i32 64, i32 8, i32 0)
ret void
}
declare i32 @llvm.amdgcn.workitem.id.x() #2
declare void @llvm.amdgcn.sched.group.barrier(i32, i32, i32) #1
attributes #0 = { nounwind }
attributes #1 = { convergent nounwind }
attributes #2 = { nounwind readnone speculatable }