blob: 944951d3a536a2c57408f3b5e1cd12cb3b593995 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: opt -mtriple=amdgcn -mcpu=gfx900 -amdgpu-aa -amdgpu-aa-wrapper -amdgpu-annotate-uniform -S < %s | FileCheck %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs -amdgpu-atomic-optimizer-strategy=None < %s | FileCheck -check-prefix=GCN %s
; Check that barrier or fence in between of loads is not considered a clobber
; for the purpose of converting vector loads into scalar.
@LDS = linkonce_odr hidden local_unnamed_addr addrspace(3) global i32 poison
define amdgpu_kernel void @simple_barrier(ptr addrspace(1) %arg) {
; CHECK-LABEL: @simple_barrier(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I:%.*]] = load i32, ptr addrspace(1) [[ARG:%.*]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: fence syncscope("workgroup") release
; CHECK-NEXT: tail call void @llvm.amdgcn.s.barrier()
; CHECK-NEXT: fence syncscope("workgroup") acquire
; CHECK-NEXT: tail call void @llvm.amdgcn.wave.barrier()
; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 1, !amdgpu.uniform !0
; CHECK-NEXT: [[I2:%.*]] = load i32, ptr addrspace(1) [[I1]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: [[I3:%.*]] = add i32 [[I2]], [[I]]
; CHECK-NEXT: [[I4:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 2
; CHECK-NEXT: store i32 [[I3]], ptr addrspace(1) [[I4]], align 4
; CHECK-NEXT: ret void
;
; GCN-LABEL: simple_barrier:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_load_dword s2, s[0:1], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_barrier
; GCN-NEXT: ; wave barrier
; GCN-NEXT: s_load_dword s3, s[0:1], 0x4
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_add_i32 s2, s3, s2
; GCN-NEXT: v_mov_b32_e32 v1, s2
; GCN-NEXT: global_store_dword v0, v1, s[0:1] offset:8
; GCN-NEXT: s_endpgm
bb:
%i = load i32, ptr addrspace(1) %arg, align 4
fence syncscope("workgroup") release
tail call void @llvm.amdgcn.s.barrier()
fence syncscope("workgroup") acquire
tail call void @llvm.amdgcn.wave.barrier()
%i1 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 1
%i2 = load i32, ptr addrspace(1) %i1, align 4
%i3 = add i32 %i2, %i
%i4 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 2
store i32 %i3, ptr addrspace(1) %i4, align 4
ret void
}
define amdgpu_kernel void @memory_phi_no_clobber(ptr addrspace(1) %arg, i1 %cond) {
; CHECK-LABEL: @memory_phi_no_clobber(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I:%.*]] = load i32, ptr addrspace(1) [[ARG:%.*]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: br i1 %cond, label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]], !amdgpu.uniform !0
; CHECK: if.then:
; CHECK-NEXT: tail call void @llvm.amdgcn.s.barrier()
; CHECK-NEXT: br label [[IF_END:%.*]], !amdgpu.uniform !0
; CHECK: if.else:
; CHECK-NEXT: fence syncscope("workgroup") release
; CHECK-NEXT: br label [[IF_END]], !amdgpu.uniform !0
; CHECK: if.end:
; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 1, !amdgpu.uniform !0
; CHECK-NEXT: [[I2:%.*]] = load i32, ptr addrspace(1) [[I1]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: [[I3:%.*]] = add i32 [[I2]], [[I]]
; CHECK-NEXT: [[I4:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 2
; CHECK-NEXT: store i32 [[I3]], ptr addrspace(1) [[I4]], align 4
; CHECK-NEXT: ret void
;
; GCN-LABEL: memory_phi_no_clobber:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GCN-NEXT: s_load_dword s2, s[4:5], 0x2c
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_bitcmp0_b32 s2, 0
; GCN-NEXT: s_load_dword s4, s[0:1], 0x0
; GCN-NEXT: s_mov_b64 s[2:3], -1
; GCN-NEXT: s_cbranch_scc0 .LBB1_2
; GCN-NEXT: ; %bb.1: ; %if.else
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_mov_b64 s[2:3], 0
; GCN-NEXT: .LBB1_2: ; %Flow
; GCN-NEXT: s_andn2_b64 vcc, exec, s[2:3]
; GCN-NEXT: s_cbranch_vccnz .LBB1_4
; GCN-NEXT: ; %bb.3: ; %if.then
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_barrier
; GCN-NEXT: .LBB1_4: ; %if.end
; GCN-NEXT: s_load_dword s2, s[0:1], 0x4
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_add_i32 s2, s2, s4
; GCN-NEXT: v_mov_b32_e32 v1, s2
; GCN-NEXT: global_store_dword v0, v1, s[0:1] offset:8
; GCN-NEXT: s_endpgm
bb:
%i = load i32, ptr addrspace(1) %arg, align 4
br i1 %cond, label %if.then, label %if.else
if.then:
tail call void @llvm.amdgcn.s.barrier()
br label %if.end
if.else:
fence syncscope("workgroup") release
br label %if.end
if.end:
%i1 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 1
%i2 = load i32, ptr addrspace(1) %i1, align 4
%i3 = add i32 %i2, %i
%i4 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 2
store i32 %i3, ptr addrspace(1) %i4, align 4
ret void
}
define amdgpu_kernel void @memory_phi_clobber1(ptr addrspace(1) %arg, i1 %cond) {
; CHECK-LABEL: @memory_phi_clobber1(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I:%.*]] = load i32, ptr addrspace(1) [[ARG:%.*]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: br i1 %cond, label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]], !amdgpu.uniform !0
; CHECK: if.then:
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 3
; CHECK-NEXT: store i32 1, ptr addrspace(1) [[GEP]], align 4
; CHECK-NEXT: br label [[IF_END:%.*]], !amdgpu.uniform !0
; CHECK: if.else:
; CHECK-NEXT: tail call void @llvm.amdgcn.s.barrier()
; CHECK-NEXT: br label [[IF_END]], !amdgpu.uniform !0
; CHECK: if.end:
; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 1, !amdgpu.uniform !0
; CHECK-NEXT: [[I2:%.*]] = load i32, ptr addrspace(1) [[I1]], align 4
; CHECK-NEXT: [[I3:%.*]] = add i32 [[I2]], [[I]]
; CHECK-NEXT: [[I4:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 2
; CHECK-NEXT: store i32 [[I3]], ptr addrspace(1) [[I4]], align 4
; CHECK-NEXT: ret void
;
; GCN-LABEL: memory_phi_clobber1:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GCN-NEXT: s_load_dword s2, s[4:5], 0x2c
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_bitcmp0_b32 s2, 0
; GCN-NEXT: s_load_dword s4, s[0:1], 0x0
; GCN-NEXT: s_mov_b64 s[2:3], -1
; GCN-NEXT: s_cbranch_scc0 .LBB2_2
; GCN-NEXT: ; %bb.1: ; %if.else
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_barrier
; GCN-NEXT: s_mov_b64 s[2:3], 0
; GCN-NEXT: .LBB2_2: ; %Flow
; GCN-NEXT: s_andn2_b64 vcc, exec, s[2:3]
; GCN-NEXT: s_cbranch_vccnz .LBB2_4
; GCN-NEXT: ; %bb.3: ; %if.then
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: v_mov_b32_e32 v1, 1
; GCN-NEXT: global_store_dword v0, v1, s[0:1] offset:12
; GCN-NEXT: .LBB2_4: ; %if.end
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: global_load_dword v1, v0, s[0:1] offset:4
; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN-NEXT: v_add_u32_e32 v1, s4, v1
; GCN-NEXT: global_store_dword v0, v1, s[0:1] offset:8
; GCN-NEXT: s_endpgm
bb:
%i = load i32, ptr addrspace(1) %arg, align 4
br i1 %cond, label %if.then, label %if.else
if.then:
%gep = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 3
store i32 1, ptr addrspace(1) %gep, align 4
br label %if.end
if.else:
tail call void @llvm.amdgcn.s.barrier()
br label %if.end
if.end:
%i1 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 1
%i2 = load i32, ptr addrspace(1) %i1, align 4
%i3 = add i32 %i2, %i
%i4 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 2
store i32 %i3, ptr addrspace(1) %i4, align 4
ret void
}
define amdgpu_kernel void @memory_phi_clobber2(ptr addrspace(1) %arg, i1 %cond) {
; CHECK-LABEL: @memory_phi_clobber2(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I:%.*]] = load i32, ptr addrspace(1) [[ARG:%.*]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: br i1 %cond, label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]], !amdgpu.uniform !0
; CHECK: if.then:
; CHECK-NEXT: tail call void @llvm.amdgcn.s.barrier()
; CHECK-NEXT: br label [[IF_END:%.*]], !amdgpu.uniform !0
; CHECK: if.else:
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 3
; CHECK-NEXT: store i32 1, ptr addrspace(1) [[GEP]], align 4
; CHECK-NEXT: br label [[IF_END]], !amdgpu.uniform !0
; CHECK: if.end:
; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 1, !amdgpu.uniform !0
; CHECK-NEXT: [[I2:%.*]] = load i32, ptr addrspace(1) [[I1]], align 4
; CHECK-NEXT: [[I3:%.*]] = add i32 [[I2]], [[I]]
; CHECK-NEXT: [[I4:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 2
; CHECK-NEXT: store i32 [[I3]], ptr addrspace(1) [[I4]], align 4
; CHECK-NEXT: ret void
;
; GCN-LABEL: memory_phi_clobber2:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dword s2, s[4:5], 0x2c
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_bitcmp0_b32 s2, 0
; GCN-NEXT: s_mov_b64 s[2:3], -1
; GCN-NEXT: s_cbranch_scc0 .LBB3_2
; GCN-NEXT: ; %bb.1: ; %if.else
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: v_mov_b32_e32 v1, 1
; GCN-NEXT: global_store_dword v0, v1, s[0:1] offset:12
; GCN-NEXT: s_mov_b64 s[2:3], 0
; GCN-NEXT: .LBB3_2: ; %Flow
; GCN-NEXT: s_load_dword s4, s[0:1], 0x0
; GCN-NEXT: s_andn2_b64 vcc, exec, s[2:3]
; GCN-NEXT: s_cbranch_vccnz .LBB3_4
; GCN-NEXT: ; %bb.3: ; %if.then
; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN-NEXT: s_barrier
; GCN-NEXT: .LBB3_4: ; %if.end
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: global_load_dword v1, v0, s[0:1] offset:4
; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN-NEXT: v_add_u32_e32 v1, s4, v1
; GCN-NEXT: global_store_dword v0, v1, s[0:1] offset:8
; GCN-NEXT: s_endpgm
bb:
%i = load i32, ptr addrspace(1) %arg, align 4
br i1 %cond, label %if.then, label %if.else
if.then:
tail call void @llvm.amdgcn.s.barrier()
br label %if.end
if.else:
%gep = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 3
store i32 1, ptr addrspace(1) %gep, align 4
br label %if.end
if.end:
%i1 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 1
%i2 = load i32, ptr addrspace(1) %i1, align 4
%i3 = add i32 %i2, %i
%i4 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 2
store i32 %i3, ptr addrspace(1) %i4, align 4
ret void
}
define amdgpu_kernel void @no_clobbering_loop1(ptr addrspace(1) %arg, i1 %cc) {
; CHECK-LABEL: @no_clobbering_loop1(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I:%.*]] = load i32, ptr addrspace(1) [[ARG:%.*]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: br label [[WHILE_COND:%.*]], !amdgpu.uniform !0
; CHECK: while.cond:
; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 1, !amdgpu.uniform !0
; CHECK-NEXT: [[I2:%.*]] = load i32, ptr addrspace(1) [[I1]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: [[I3:%.*]] = add i32 [[I2]], [[I]]
; CHECK-NEXT: [[I4:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 2
; CHECK-NEXT: store i32 [[I3]], ptr addrspace(1) [[I4]], align 4
; CHECK-NEXT: tail call void @llvm.amdgcn.wave.barrier()
; CHECK-NEXT: br i1 [[CC:%.*]], label [[WHILE_COND]], label [[END:%.*]], !amdgpu.uniform !0
; CHECK: end:
; CHECK-NEXT: ret void
;
; GCN-LABEL: no_clobbering_loop1:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dword s0, s[4:5], 0x2c
; GCN-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x24
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_bitcmp1_b32 s0, 0
; GCN-NEXT: s_load_dword s4, s[2:3], 0x0
; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1
; GCN-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1
; GCN-NEXT: .LBB4_1: ; %while.cond
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN-NEXT: s_load_dword s5, s[2:3], 0x4
; GCN-NEXT: s_and_b64 vcc, exec, s[0:1]
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_add_i32 s5, s5, s4
; GCN-NEXT: v_mov_b32_e32 v1, s5
; GCN-NEXT: global_store_dword v0, v1, s[2:3] offset:8
; GCN-NEXT: ; wave barrier
; GCN-NEXT: s_cbranch_vccnz .LBB4_1
; GCN-NEXT: ; %bb.2: ; %end
; GCN-NEXT: s_endpgm
bb:
%i = load i32, ptr addrspace(1) %arg, align 4
br label %while.cond
while.cond:
%i1 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 1
%i2 = load i32, ptr addrspace(1) %i1, align 4
%i3 = add i32 %i2, %i
%i4 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 2
store i32 %i3, ptr addrspace(1) %i4, align 4
tail call void @llvm.amdgcn.wave.barrier()
br i1 %cc, label %while.cond, label %end
end:
ret void
}
define amdgpu_kernel void @no_clobbering_loop2(ptr addrspace(1) noalias %arg, ptr addrspace(1) noalias %out, i32 %n) {
; CHECK-LABEL: @no_clobbering_loop2(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I:%.*]] = load i32, ptr addrspace(1) [[ARG:%.*]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: br label [[WHILE_COND:%.*]], !amdgpu.uniform !0
; CHECK: while.cond:
; CHECK-NEXT: [[C:%.*]] = phi i32 [ 0, [[BB:%.*]] ], [ [[INC:%.*]], [[WHILE_COND]] ]
; CHECK-NEXT: [[ACC:%.*]] = phi i32 [ [[I]], [[BB]] ], [ [[I3:%.*]], [[WHILE_COND]] ]
; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i32 [[C]], !amdgpu.uniform !0
; CHECK-NEXT: [[I2:%.*]] = load i32, ptr addrspace(1) [[I1]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: [[I3]] = add i32 [[I2]], [[ACC]]
; CHECK-NEXT: tail call void @llvm.amdgcn.wave.barrier()
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[C]], 1
; CHECK-NEXT: [[CC:%.*]] = icmp eq i32 [[INC]], [[N:%.*]]
; CHECK-NEXT: br i1 [[CC]], label [[WHILE_COND]], label [[END:%.*]], !amdgpu.uniform !0
; CHECK: end:
; CHECK-NEXT: store i32 [[I3]], ptr addrspace(1) [[OUT:%.*]], align 4
; CHECK-NEXT: ret void
;
; GCN-LABEL: no_clobbering_loop2:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_load_dword s6, s[4:5], 0x34
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_load_dword s4, s[0:1], 0x0
; GCN-NEXT: .LBB5_1: ; %while.cond
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN-NEXT: s_load_dword s5, s[0:1], 0x0
; GCN-NEXT: ; wave barrier
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_add_i32 s4, s5, s4
; GCN-NEXT: s_add_u32 s0, s0, 4
; GCN-NEXT: s_addc_u32 s1, s1, 0
; GCN-NEXT: s_add_i32 s6, s6, -1
; GCN-NEXT: s_cmp_eq_u32 s6, 0
; GCN-NEXT: s_cbranch_scc1 .LBB5_1
; GCN-NEXT: ; %bb.2: ; %end
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: v_mov_b32_e32 v1, s4
; GCN-NEXT: global_store_dword v0, v1, s[2:3]
; GCN-NEXT: s_endpgm
bb:
%i = load i32, ptr addrspace(1) %arg, align 4
br label %while.cond
while.cond:
%c = phi i32 [ 0, %bb ], [ %inc, %while.cond ]
%acc = phi i32 [ %i, %bb ], [ %i3, %while.cond ]
%i1 = getelementptr inbounds i32, ptr addrspace(1) %arg, i32 %c
%i2 = load i32, ptr addrspace(1) %i1, align 4
%i3 = add i32 %i2, %acc
tail call void @llvm.amdgcn.wave.barrier()
%inc = add nuw nsw i32 %c, 1
%cc = icmp eq i32 %inc, %n
br i1 %cc, label %while.cond, label %end
end:
store i32 %i3, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @clobbering_loop(ptr addrspace(1) %arg, ptr addrspace(1) %out, i1 %cc) {
; CHECK-LABEL: @clobbering_loop(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I:%.*]] = load i32, ptr addrspace(1) [[ARG:%.*]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: br label [[WHILE_COND:%.*]], !amdgpu.uniform !0
; CHECK: while.cond:
; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 1, !amdgpu.uniform !0
; CHECK-NEXT: [[I2:%.*]] = load i32, ptr addrspace(1) [[I1]], align 4
; CHECK-NEXT: [[I3:%.*]] = add i32 [[I2]], [[I]]
; CHECK-NEXT: [[I4:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 1
; CHECK-NEXT: store i32 [[I3]], ptr addrspace(1) [[I4]], align 4
; CHECK-NEXT: tail call void @llvm.amdgcn.wave.barrier()
; CHECK-NEXT: br i1 [[CC:%.*]], label [[WHILE_COND]], label [[END:%.*]], !amdgpu.uniform !0
; CHECK: end:
; CHECK-NEXT: ret void
;
; GCN-LABEL: clobbering_loop:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dword s0, s[4:5], 0x34
; GCN-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_bitcmp1_b32 s0, 0
; GCN-NEXT: s_load_dword s2, s[8:9], 0x0
; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1
; GCN-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1
; GCN-NEXT: .LBB6_1: ; %while.cond
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN-NEXT: global_load_dword v1, v0, s[8:9] offset:4
; GCN-NEXT: s_and_b64 vcc, exec, s[0:1]
; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN-NEXT: v_add_u32_e32 v1, s2, v1
; GCN-NEXT: global_store_dword v0, v1, s[10:11] offset:4
; GCN-NEXT: ; wave barrier
; GCN-NEXT: s_cbranch_vccnz .LBB6_1
; GCN-NEXT: ; %bb.2: ; %end
; GCN-NEXT: s_endpgm
bb:
%i = load i32, ptr addrspace(1) %arg, align 4
br label %while.cond
while.cond:
%i1 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 1
%i2 = load i32, ptr addrspace(1) %i1, align 4
%i3 = add i32 %i2, %i
%i4 = getelementptr inbounds i32, ptr addrspace(1) %out, i64 1
store i32 %i3, ptr addrspace(1) %i4, align 4
tail call void @llvm.amdgcn.wave.barrier()
br i1 %cc, label %while.cond, label %end
end:
ret void
}
define amdgpu_kernel void @clobber_by_atomic_load(ptr addrspace(1) %arg) {
; CHECK-LABEL: @clobber_by_atomic_load(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I:%.*]] = load i32, ptr addrspace(1) [[ARG:%.*]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 2, !amdgpu.uniform !0
; CHECK-NEXT: [[VAL:%.*]] = load atomic i32, ptr addrspace(1) [[GEP]] seq_cst, align 4, !amdgpu.noclobber !0
; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 3, !amdgpu.uniform !0
; CHECK-NEXT: [[I2:%.*]] = load i32, ptr addrspace(1) [[I1]], align 4
; CHECK-NEXT: [[I3:%.*]] = add i32 [[I2]], [[I]]
; CHECK-NEXT: [[I4:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 4
; CHECK-NEXT: store i32 [[I3]], ptr addrspace(1) [[I4]], align 4
; CHECK-NEXT: ret void
;
; GCN-LABEL: clobber_by_atomic_load:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_load_dword s2, s[0:1], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_load_dword v1, v0, s[0:1] offset:8 glc
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: buffer_wbinvl1_vol
; GCN-NEXT: global_load_dword v1, v0, s[0:1] offset:12
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_add_u32_e32 v1, s2, v1
; GCN-NEXT: global_store_dword v0, v1, s[0:1] offset:16
; GCN-NEXT: s_endpgm
bb:
%i = load i32, ptr addrspace(1) %arg, align 4
%gep = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 2
%val = load atomic i32, ptr addrspace(1) %gep seq_cst, align 4
%i1 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 3
%i2 = load i32, ptr addrspace(1) %i1, align 4
%i3 = add i32 %i2, %i
%i4 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 4
store i32 %i3, ptr addrspace(1) %i4, align 4
ret void
}
define protected amdgpu_kernel void @no_alias_store(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; CHECK-LABEL: @no_alias_store(
; CHECK-NEXT: entry:
; CHECK-NEXT: store i32 0, ptr addrspace(3) @LDS, align 4
; CHECK-NEXT: fence syncscope("workgroup") release
; CHECK-NEXT: tail call void @llvm.amdgcn.s.barrier()
; CHECK-NEXT: fence syncscope("workgroup") acquire
; CHECK-NEXT: [[LD:%.*]] = load i32, ptr addrspace(1) [[IN:%.*]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: store i32 [[LD]], ptr addrspace(1) [[OUT:%.*]], align 4
; CHECK-NEXT: ret void
;
; GCN-LABEL: no_alias_store:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: ds_write_b32 v0, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_barrier
; GCN-NEXT: s_load_dword s0, s[0:1], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v1, s0
; GCN-NEXT: global_store_dword v0, v1, s[2:3]
; GCN-NEXT: s_endpgm
entry:
store i32 0, ptr addrspace(3) @LDS, align 4
fence syncscope("workgroup") release
tail call void @llvm.amdgcn.s.barrier()
fence syncscope("workgroup") acquire
%ld = load i32, ptr addrspace(1) %in, align 4
store i32 %ld, ptr addrspace(1) %out, align 4
ret void
}
define protected amdgpu_kernel void @may_alias_store(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; CHECK-LABEL: @may_alias_store(
; CHECK-NEXT: entry:
; CHECK-NEXT: store i32 0, ptr addrspace(1) [[OUT:%.*]], align 4
; CHECK-NEXT: fence syncscope("workgroup") release
; CHECK-NEXT: tail call void @llvm.amdgcn.s.barrier()
; CHECK-NEXT: fence syncscope("workgroup") acquire
; CHECK-NEXT: [[LD:%.*]] = load i32, ptr addrspace(1) [[IN:%.*]], align 4
; CHECK-NEXT: store i32 [[LD]], ptr addrspace(1) [[OUT]], align 4
; CHECK-NEXT: ret void
;
; GCN-LABEL: may_alias_store:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_store_dword v0, v0, s[2:3]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_barrier
; GCN-NEXT: global_load_dword v1, v0, s[0:1]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: global_store_dword v0, v1, s[2:3]
; GCN-NEXT: s_endpgm
entry:
store i32 0, ptr addrspace(1) %out, align 4
fence syncscope("workgroup") release
tail call void @llvm.amdgcn.s.barrier()
fence syncscope("workgroup") acquire
%ld = load i32, ptr addrspace(1) %in, align 4
store i32 %ld, ptr addrspace(1) %out, align 4
ret void
}
define protected amdgpu_kernel void @no_alias_volatile_store(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; CHECK-LABEL: @no_alias_volatile_store(
; CHECK-NEXT: entry:
; CHECK-NEXT: store volatile i32 0, ptr addrspace(3) @LDS, align 4
; CHECK-NEXT: fence syncscope("workgroup") release
; CHECK-NEXT: tail call void @llvm.amdgcn.s.barrier()
; CHECK-NEXT: fence syncscope("workgroup") acquire
; CHECK-NEXT: [[LD:%.*]] = load i32, ptr addrspace(1) [[IN:%.*]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: store i32 [[LD]], ptr addrspace(1) [[OUT:%.*]], align 4
; CHECK-NEXT: ret void
;
; GCN-LABEL: no_alias_volatile_store:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: ds_write_b32 v0, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_barrier
; GCN-NEXT: s_load_dword s0, s[0:1], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v1, s0
; GCN-NEXT: global_store_dword v0, v1, s[2:3]
; GCN-NEXT: s_endpgm
entry:
store volatile i32 0, ptr addrspace(3) @LDS, align 4
fence syncscope("workgroup") release
tail call void @llvm.amdgcn.s.barrier()
fence syncscope("workgroup") acquire
%ld = load i32, ptr addrspace(1) %in, align 4
store i32 %ld, ptr addrspace(1) %out, align 4
ret void
}
define protected amdgpu_kernel void @no_alias_atomic_rmw_relaxed(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; CHECK-LABEL: @no_alias_atomic_rmw_relaxed(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[UNUSED:%.*]] = atomicrmw add ptr addrspace(3) @LDS, i32 5 monotonic, align 4
; CHECK-NEXT: [[LD:%.*]] = load i32, ptr addrspace(1) [[IN:%.*]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: store i32 [[LD]], ptr addrspace(1) [[OUT:%.*]], align 4
; CHECK-NEXT: ret void
;
; GCN-LABEL: no_alias_atomic_rmw_relaxed:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: v_mov_b32_e32 v0, 5
; GCN-NEXT: v_mov_b32_e32 v1, 0
; GCN-NEXT: ds_add_u32 v1, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_load_dword s0, s[0:1], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: global_store_dword v1, v0, s[2:3]
; GCN-NEXT: s_endpgm
entry:
%unused = atomicrmw add ptr addrspace(3) @LDS, i32 5 monotonic
%ld = load i32, ptr addrspace(1) %in, align 4
store i32 %ld, ptr addrspace(1) %out, align 4
ret void
}
define protected amdgpu_kernel void @no_alias_atomic_cmpxchg(ptr addrspace(1) %in, ptr addrspace(1) %out, i32 %swap) {
; CHECK-LABEL: @no_alias_atomic_cmpxchg(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[UNUSED:%.*]] = cmpxchg ptr addrspace(3) @LDS, i32 7, i32 [[SWAP:%.*]] seq_cst monotonic, align 4
; CHECK-NEXT: fence syncscope("workgroup") release
; CHECK-NEXT: tail call void @llvm.amdgcn.s.barrier()
; CHECK-NEXT: fence syncscope("workgroup") acquire
; CHECK-NEXT: [[LD:%.*]] = load i32, ptr addrspace(1) [[IN:%.*]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: store i32 [[LD]], ptr addrspace(1) [[OUT:%.*]], align 4
; CHECK-NEXT: ret void
;
; GCN-LABEL: no_alias_atomic_cmpxchg:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dword s6, s[4:5], 0x34
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: v_mov_b32_e32 v0, 7
; GCN-NEXT: v_mov_b32_e32 v1, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v2, s6
; GCN-NEXT: ds_cmpst_b32 v1, v0, v2
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_barrier
; GCN-NEXT: s_load_dword s0, s[0:1], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: global_store_dword v1, v0, s[2:3]
; GCN-NEXT: s_endpgm
entry:
%unused = cmpxchg ptr addrspace(3) @LDS, i32 7, i32 %swap seq_cst monotonic
fence syncscope("workgroup") release
tail call void @llvm.amdgcn.s.barrier()
fence syncscope("workgroup") acquire
%ld = load i32, ptr addrspace(1) %in, align 4
store i32 %ld, ptr addrspace(1) %out, align 4
ret void
}
define protected amdgpu_kernel void @no_alias_atomic_rmw(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; CHECK-LABEL: @no_alias_atomic_rmw(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[UNUSED:%.*]] = atomicrmw add ptr addrspace(3) @LDS, i32 5 seq_cst, align 4
; CHECK-NEXT: fence syncscope("workgroup") release
; CHECK-NEXT: tail call void @llvm.amdgcn.s.barrier()
; CHECK-NEXT: fence syncscope("workgroup") acquire
; CHECK-NEXT: [[LD:%.*]] = load i32, ptr addrspace(1) [[IN:%.*]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: store i32 [[LD]], ptr addrspace(1) [[OUT:%.*]], align 4
; CHECK-NEXT: ret void
;
; GCN-LABEL: no_alias_atomic_rmw:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: v_mov_b32_e32 v0, 5
; GCN-NEXT: v_mov_b32_e32 v1, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: ds_add_u32 v1, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_barrier
; GCN-NEXT: s_load_dword s0, s[0:1], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: global_store_dword v1, v0, s[2:3]
; GCN-NEXT: s_endpgm
entry:
%unused = atomicrmw add ptr addrspace(3) @LDS, i32 5 seq_cst
fence syncscope("workgroup") release
tail call void @llvm.amdgcn.s.barrier()
fence syncscope("workgroup") acquire
%ld = load i32, ptr addrspace(1) %in, align 4
store i32 %ld, ptr addrspace(1) %out, align 4
ret void
}
define protected amdgpu_kernel void @may_alias_atomic_cmpxchg(ptr addrspace(1) %in, ptr addrspace(1) %out, i32 %swap) {
; CHECK-LABEL: @may_alias_atomic_cmpxchg(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[UNUSED:%.*]] = cmpxchg ptr addrspace(1) [[OUT:%.*]], i32 7, i32 [[SWAP:%.*]] seq_cst monotonic, align 4
; CHECK-NEXT: fence syncscope("workgroup") release
; CHECK-NEXT: tail call void @llvm.amdgcn.s.barrier()
; CHECK-NEXT: fence syncscope("workgroup") acquire
; CHECK-NEXT: [[LD:%.*]] = load i32, ptr addrspace(1) [[IN:%.*]], align 4
; CHECK-NEXT: store i32 [[LD]], ptr addrspace(1) [[OUT]], align 4
; CHECK-NEXT: ret void
;
; GCN-LABEL: may_alias_atomic_cmpxchg:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dword s6, s[4:5], 0x34
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: v_mov_b32_e32 v2, 0
; GCN-NEXT: v_mov_b32_e32 v1, 7
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v0, s6
; GCN-NEXT: global_atomic_cmpswap v2, v[0:1], s[2:3]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: buffer_wbinvl1_vol
; GCN-NEXT: s_barrier
; GCN-NEXT: global_load_dword v0, v2, s[0:1]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: global_store_dword v2, v0, s[2:3]
; GCN-NEXT: s_endpgm
entry:
%unused = cmpxchg ptr addrspace(1) %out, i32 7, i32 %swap seq_cst monotonic
fence syncscope("workgroup") release
tail call void @llvm.amdgcn.s.barrier()
fence syncscope("workgroup") acquire
%ld = load i32, ptr addrspace(1) %in, align 4
store i32 %ld, ptr addrspace(1) %out, align 4
ret void
}
define protected amdgpu_kernel void @may_alias_atomic_rmw(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; CHECK-LABEL: @may_alias_atomic_rmw(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[UNUSED:%.*]] = atomicrmw add ptr addrspace(1) [[OUT:%.*]], i32 5 syncscope("agent") seq_cst, align 4
; CHECK-NEXT: fence syncscope("workgroup") release
; CHECK-NEXT: tail call void @llvm.amdgcn.s.barrier()
; CHECK-NEXT: fence syncscope("workgroup") acquire
; CHECK-NEXT: [[LD:%.*]] = load i32, ptr addrspace(1) [[IN:%.*]], align 4
; CHECK-NEXT: store i32 [[LD]], ptr addrspace(1) [[OUT]], align 4
; CHECK-NEXT: ret void
;
; GCN-LABEL: may_alias_atomic_rmw:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: v_mov_b32_e32 v1, 5
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_atomic_add v0, v1, s[2:3]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: buffer_wbinvl1_vol
; GCN-NEXT: s_barrier
; GCN-NEXT: global_load_dword v1, v0, s[0:1]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: global_store_dword v0, v1, s[2:3]
; GCN-NEXT: s_endpgm
entry:
%unused = atomicrmw add ptr addrspace(1) %out, i32 5 syncscope("agent") seq_cst
fence syncscope("workgroup") release
tail call void @llvm.amdgcn.s.barrier()
fence syncscope("workgroup") acquire
%ld = load i32, ptr addrspace(1) %in, align 4
store i32 %ld, ptr addrspace(1) %out, align 4
ret void
}
define protected amdgpu_kernel void @no_alias_atomic_rmw_then_clobber(ptr addrspace(1) %in, ptr addrspace(1) %out, ptr addrspace(1) noalias %noalias) {
; CHECK-LABEL: @no_alias_atomic_rmw_then_clobber(
; CHECK-NEXT: entry:
; CHECK-NEXT: store i32 1, ptr addrspace(1) [[OUT:%.*]], align 4
; CHECK-NEXT: store i32 2, ptr addrspace(1) [[NOALIAS:%.*]], align 4
; CHECK-NEXT: [[UNUSED:%.*]] = atomicrmw add ptr addrspace(3) @LDS, i32 5 seq_cst, align 4
; CHECK-NEXT: fence syncscope("workgroup") release
; CHECK-NEXT: tail call void @llvm.amdgcn.s.barrier()
; CHECK-NEXT: fence syncscope("workgroup") acquire
; CHECK-NEXT: [[LD:%.*]] = load i32, ptr addrspace(1) [[IN:%.*]], align 4
; CHECK-NEXT: store i32 [[LD]], ptr addrspace(1) [[OUT]], align 4
; CHECK-NEXT: ret void
;
; GCN-LABEL: no_alias_atomic_rmw_then_clobber:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GCN-NEXT: v_mov_b32_e32 v0, 1
; GCN-NEXT: v_mov_b32_e32 v1, 0
; GCN-NEXT: v_mov_b32_e32 v2, 2
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_store_dword v1, v0, s[2:3]
; GCN-NEXT: global_store_dword v1, v2, s[6:7]
; GCN-NEXT: v_mov_b32_e32 v0, 5
; GCN-NEXT: ds_add_u32 v1, v0
; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN-NEXT: s_barrier
; GCN-NEXT: global_load_dword v0, v1, s[0:1]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: global_store_dword v1, v0, s[2:3]
; GCN-NEXT: s_endpgm
entry:
store i32 1, ptr addrspace(1) %out, align 4
store i32 2, ptr addrspace(1) %noalias, align 4
%unused = atomicrmw add ptr addrspace(3) @LDS, i32 5 seq_cst
fence syncscope("workgroup") release
tail call void @llvm.amdgcn.s.barrier()
fence syncscope("workgroup") acquire
%ld = load i32, ptr addrspace(1) %in, align 4
store i32 %ld, ptr addrspace(1) %out, align 4
ret void
}
define protected amdgpu_kernel void @no_alias_atomic_rmw_then_no_alias_store(ptr addrspace(1) %in, ptr addrspace(1) %out, ptr addrspace(1) noalias %noalias) {
; CHECK-LABEL: @no_alias_atomic_rmw_then_no_alias_store(
; CHECK-NEXT: entry:
; CHECK-NEXT: store i32 2, ptr addrspace(1) [[NOALIAS:%.*]], align 4
; CHECK-NEXT: [[UNUSED:%.*]] = atomicrmw add ptr addrspace(3) @LDS, i32 5 seq_cst, align 4
; CHECK-NEXT: fence syncscope("workgroup") release
; CHECK-NEXT: tail call void @llvm.amdgcn.s.barrier()
; CHECK-NEXT: fence syncscope("workgroup") acquire
; CHECK-NEXT: [[LD:%.*]] = load i32, ptr addrspace(1) [[IN:%.*]], align 4, !amdgpu.noclobber !0
; CHECK-NEXT: store i32 [[LD]], ptr addrspace(1) [[OUT:%.*]], align 4
; CHECK-NEXT: ret void
;
; GCN-LABEL: no_alias_atomic_rmw_then_no_alias_store:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: v_mov_b32_e32 v0, 2
; GCN-NEXT: v_mov_b32_e32 v1, 0
; GCN-NEXT: v_mov_b32_e32 v2, 5
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_store_dword v1, v0, s[6:7]
; GCN-NEXT: ds_add_u32 v1, v2
; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN-NEXT: s_barrier
; GCN-NEXT: s_load_dword s0, s[0:1], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: global_store_dword v1, v0, s[2:3]
; GCN-NEXT: s_endpgm
entry:
store i32 2, ptr addrspace(1) %noalias, align 4
%unused = atomicrmw add ptr addrspace(3) @LDS, i32 5 seq_cst
fence syncscope("workgroup") release
tail call void @llvm.amdgcn.s.barrier()
fence syncscope("workgroup") acquire
%ld = load i32, ptr addrspace(1) %in, align 4
store i32 %ld, ptr addrspace(1) %out, align 4
ret void
}
declare void @llvm.amdgcn.s.barrier()
declare void @llvm.amdgcn.wave.barrier()