| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc -mtriple=amdgcn -mcpu=gfx90a -amdgpu-atomic-optimizer-strategy=None < %s | FileCheck -check-prefixes=CHECK,GFX90A %s |
| ; RUN: llc -mtriple=amdgcn -mcpu=gfx950 -amdgpu-atomic-optimizer-strategy=None < %s | FileCheck -check-prefixes=CHECK,GFX950 %s |
| |
| ;--------------------------------------------------------------------- |
| ; xchg i32 cases |
| ;--------------------------------------------------------------------- |
| |
| ; Input and result use AGPR |
| define void @global_atomic_xchg_i32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i32 %data seq_cst |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| ; Input is AGPR, result used as VGPR. |
| define void @global_atomic_xchg_i32_ret_a_v(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i32_ret_a_v: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i32_ret_a_v: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i32 %data seq_cst |
| call void asm "; use $0", "v"(i32 %result) |
| ret void |
| } |
| |
| ; Input is VGPR, result used as AGPR |
| define void @global_atomic_xchg_i32_ret_v_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i32_ret_v_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i32_ret_v_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=v"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i32 %data seq_cst |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| ; Input is AV, result also used as AV |
| define void @global_atomic_xchg_i32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i32 %data seq_cst |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| ; Input is AV, used as v |
| define void @global_atomic_xchg_i32_ret_av_v(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i32_ret_av_v: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i32_ret_av_v: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i32 %data seq_cst |
| call void asm "; use $0", "v"(i32 %result) |
| ret void |
| } |
| |
| ; Input is AV, used as a |
| define void @global_atomic_xchg_i32_ret_av_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i32_ret_av_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i32_ret_av_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i32 %data seq_cst |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| ; Input is a, result used as AV |
| define void @global_atomic_xchg_i32_ret_a_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i32_ret_a_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i32_ret_a_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i32 %data seq_cst |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| ; Input is v, result used as AV |
| define void @global_atomic_xchg_i32_ret_v_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i32_ret_v_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i32_ret_v_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=v"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i32 %data seq_cst |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_xchg_i32_ret_av_av_no_agprs(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i32_ret_av_av_no_agprs: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a3, v40 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a4, v41 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a5, v42 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a6, v43 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a7, v44 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a8, v45 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a9, v46 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a10, v47 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a11, v56 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a12, v57 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a13, v58 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a14, v59 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a15, v60 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a16, v61 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a17, v62 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a18, v63 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:31] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: s_nop 0 |
| ; GFX90A-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a19, v31 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a2 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a31, v19 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a30, v20 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a29, v21 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a28, v22 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a27, v23 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a26, v24 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a25, v25 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a24, v26 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a23, v27 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a22, v28 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a21, v29 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a20, v30 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v19, a31 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v20, a30 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v21, a29 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v22, a28 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v23, a27 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v24, a26 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v25, a25 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v26, a24 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v27, a23 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v28, a22 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v29, a21 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v30, a20 ; Reload Reuse |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v31, a19 ; Reload Reuse |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:31] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v63, a18 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v62, a17 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v61, a16 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v60, a15 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v59, a14 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v58, a13 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v57, a12 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v56, a11 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v47, a10 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v46, a9 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v45, a8 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v44, a7 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v43, a6 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v42, a5 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v41, a4 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v40, a3 ; Reload Reuse |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i32_ret_av_av_no_agprs: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a3, v40 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a4, v41 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a5, v42 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a6, v43 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a7, v44 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a8, v45 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a9, v46 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a10, v47 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a11, v56 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a12, v57 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a13, v58 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a14, v59 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a15, v60 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a16, v61 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a17, v62 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a18, v63 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:31] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: scratch_store_dwordx4 off, v[0:3], s32 ; 16-byte Folded Spill |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: scratch_store_dwordx4 off, v[4:7], s32 offset:16 ; 16-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dwordx4 off, v[8:11], s32 offset:32 ; 16-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dwordx4 off, v[12:15], s32 offset:48 ; 16-byte Folded Spill |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_accvgpr_write_b32 a19, v31 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a20, v30 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a21, v29 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a22, v28 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: scratch_store_dwordx3 off, v[16:18], s32 offset:64 ; 12-byte Folded Spill |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a2 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:40 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a31, v19 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a27, v23 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a28, v22 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a29, v21 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a30, v20 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a23, v27 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a24, v26 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a25, v25 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a26, v24 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: scratch_load_dwordx4 v[0:3], off, s32 ; 16-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dwordx4 v[4:7], off, s32 offset:16 ; 16-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dwordx4 v[8:11], off, s32 offset:32 ; 16-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dwordx4 v[12:15], off, s32 offset:48 ; 16-byte Folded Reload |
| ; GFX950-NEXT: v_accvgpr_read_b32 v19, a31 ; Reload Reuse |
| ; GFX950-NEXT: scratch_load_dwordx3 v[16:18], off, s32 offset:64 ; 12-byte Folded Reload |
| ; GFX950-NEXT: v_accvgpr_read_b32 v23, a27 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v22, a28 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v21, a29 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v20, a30 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v27, a23 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v26, a24 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v25, a25 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v24, a26 ; Reload Reuse |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_read_b32 v31, a19 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v30, a20 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v29, a21 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v28, a22 ; Reload Reuse |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:31] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_accvgpr_read_b32 v63, a18 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v62, a17 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v61, a16 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v60, a15 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v59, a14 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v58, a13 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v57, a12 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v56, a11 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v47, a10 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v46, a9 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v45, a8 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v44, a7 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v43, a6 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v42, a5 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v41, a4 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v40, a3 ; Reload Reuse |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %vgpr.def = call { <32 x i32>, <32 x i32> } asm sideeffect "; def $0", "=${v[0:31]},=${v[32:63]}"() |
| %vgpr.0 = extractvalue { <32 x i32>, <32 x i32> } %vgpr.def, 0 |
| %vgpr.1 = extractvalue { <32 x i32>, <32 x i32> } %vgpr.def, 1 |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i32 %data seq_cst |
| call void asm sideeffect "; use $0", "{v[0:31]},{v[32:63]}"(<32 x i32> %vgpr.0, <32 x i32> %vgpr.1) |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_xchg_i32_noret_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i32_noret_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap v[0:1], a0, off offset:40 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i32_noret_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap v[0:1], a0, off offset:40 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %unused = atomicrmw xchg ptr addrspace(1) %gep.0, i32 %data seq_cst |
| ret void |
| } |
| |
| define void @global_atomic_xchg_i32_noret_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i32_noret_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap v[0:1], v2, off offset:40 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i32_noret_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap v[0:1], v2, off offset:40 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %unused = atomicrmw xchg ptr addrspace(1) %gep.0, i32 %data seq_cst |
| ret void |
| } |
| |
| ;--------------------------------------------------------------------- |
| ; xchg i64 cases |
| ;--------------------------------------------------------------------- |
| |
| ; Input and result use AGPR |
| define void @global_atomic_xchg_i64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i64 %data seq_cst |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| ; Input is AGPR, result used as VGPR. |
| define void @global_atomic_xchg_i64_ret_a_v(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i64_ret_a_v: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i64_ret_a_v: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i64 %data seq_cst |
| call void asm "; use $0", "v"(i64 %result) |
| ret void |
| } |
| |
| ; Input is VGPR, result used as AGPR |
| define void @global_atomic_xchg_i64_ret_v_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i64_ret_v_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i64_ret_v_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=v"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i64 %data seq_cst |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| ; Input is AV, result also used as AV |
| define void @global_atomic_xchg_i64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i64 %data seq_cst |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| ; Input is AV, used as v |
| define void @global_atomic_xchg_i64_ret_av_v(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i64_ret_av_v: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i64_ret_av_v: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i64 %data seq_cst |
| call void asm "; use $0", "v"(i64 %result) |
| ret void |
| } |
| |
| ; Input is AV, used as a |
| define void @global_atomic_xchg_i64_ret_av_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i64_ret_av_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i64_ret_av_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i64 %data seq_cst |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| ; Input is a, result used as AV |
| define void @global_atomic_xchg_i64_ret_a_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i64_ret_a_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i64_ret_a_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i64 %data seq_cst |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| ; Input is v, result used as AV |
| define void @global_atomic_xchg_i64_ret_v_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i64_ret_v_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i64_ret_v_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=v"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i64 %data seq_cst |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_xchg_i64_noret_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i64_noret_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap_x2 v[0:1], a[0:1], off |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i64_noret_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap_x2 v[0:1], a[0:1], off sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %unused = atomicrmw xchg ptr addrspace(1) %ptr, i64 %data seq_cst |
| ret void |
| } |
| |
| define void @global_atomic_xchg_i64_noret_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i64_noret_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_swap_x2 v[0:1], v[2:3], off |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i64_noret_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_swap_x2 v[0:1], v[2:3], off sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %unused = atomicrmw xchg ptr addrspace(1) %ptr, i64 %data seq_cst |
| ret void |
| } |
| |
| ;--------------------------------------------------------------------- |
| ; xor i32 cases with cmpxchg expansion |
| ;--------------------------------------------------------------------- |
| |
| ; Input and result use AGPR |
| define void @global_atomic_xor_expansion_i32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB21_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_xor_b32_e32 v2, v3, v4 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB21_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX950-NEXT: .LBB21_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_xor_b32_e32 v2, v3, v4 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB21_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data seq_cst |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| ; Input is AGPR, result used as VGPR. |
| define void @global_atomic_xor_expansion_i32_ret_a_v(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i32_ret_a_v: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v2, v[0:1], off |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX90A-NEXT: v_xor_b32_e32 v4, v5, v3 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB22_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i32_ret_a_v: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v2, v[0:1], off |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX950-NEXT: .LBB22_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX950-NEXT: v_xor_b32_e32 v4, v5, v3 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB22_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data seq_cst |
| call void asm "; use $0", "v"(i32 %result) |
| ret void |
| } |
| |
| ; Input is VGPR, result used as AGPR |
| define void @global_atomic_xor_expansion_i32_ret_v_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i32_ret_v_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v4 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB23_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_xor_b32_e32 v2, v3, v4 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB23_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i32_ret_v_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v4 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB23_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_xor_b32_e32 v2, v3, v4 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB23_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=v"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data seq_cst |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| ; Input is AV, result also used as AV |
| define void @global_atomic_xor_expansion_i32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v2, v[0:1], off |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB24_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX90A-NEXT: v_xor_b32_e32 v4, v5, v3 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB24_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v2, v[0:1], off |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB24_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX950-NEXT: v_xor_b32_e32 v4, v5, v3 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB24_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data seq_cst |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| ; Input is AV, used as v |
| define void @global_atomic_xor_expansion_i32_ret_av_v(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i32_ret_av_v: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v2, v[0:1], off |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB25_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX90A-NEXT: v_xor_b32_e32 v4, v5, v3 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB25_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i32_ret_av_v: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v2, v[0:1], off |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB25_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX950-NEXT: v_xor_b32_e32 v4, v5, v3 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB25_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data seq_cst |
| call void asm "; use $0", "v"(i32 %result) |
| ret void |
| } |
| |
| ; Input is AV, used as a |
| define void @global_atomic_xor_expansion_i32_ret_av_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i32_ret_av_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v4 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB26_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_xor_b32_e32 v2, v3, v4 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB26_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i32_ret_av_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v4 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB26_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_xor_b32_e32 v2, v3, v4 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB26_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data seq_cst |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| ; Input is a, result used as AV |
| define void @global_atomic_xor_expansion_i32_ret_a_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i32_ret_a_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v2, v[0:1], off |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB27_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX90A-NEXT: v_xor_b32_e32 v4, v5, v3 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB27_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i32_ret_a_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v2, v[0:1], off |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX950-NEXT: .LBB27_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX950-NEXT: v_xor_b32_e32 v4, v5, v3 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB27_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data seq_cst |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| ; Input is v, result used as AV |
| define void @global_atomic_xor_expansion_i32_ret_v_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i32_ret_v_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v2, v[0:1], off |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB28_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX90A-NEXT: v_xor_b32_e32 v4, v5, v3 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB28_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i32_ret_v_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v2, v[0:1], off |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB28_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX950-NEXT: v_xor_b32_e32 v4, v5, v3 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB28_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=v"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data seq_cst |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_xor_expansion_i32_ret_av_av_no_agprs(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i32_ret_av_av_no_agprs: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword a32, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword a33, off, s[0:3], s32 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a33, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a32, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:31] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a2, v2 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a3, v3 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a4, v4 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a5, v5 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a6, v6 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a7, v7 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a8, v8 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a9, v9 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a10, v10 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a11, v11 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a12, v12 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a13, v13 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a14, v14 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a15, v15 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a16, v16 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a17, v17 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a18, v18 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a19, v19 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a20, v20 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a21, v21 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a22, v22 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a23, v23 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a24, v24 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a25, v25 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a26, v26 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a27, v27 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a28, v28 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a29, v29 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a30, v30 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a31, v31 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a32 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v5, a33 |
| ; GFX90A-NEXT: global_load_dword v1, v[4:5], off |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB29_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v1 |
| ; GFX90A-NEXT: v_xor_b32_e32 v2, v3, v0 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v1, v[4:5], v[2:3], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB29_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a32, v1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a2 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a3 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a4 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v5, a5 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v6, a6 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v7, a7 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v8, a8 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v9, a9 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v10, a10 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v11, a11 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v12, a12 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v13, a13 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v14, a14 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v15, a15 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v16, a16 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v17, a17 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v18, a18 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v19, a19 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v20, a20 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v21, a21 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v22, a22 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v23, a23 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v24, a24 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v25, a25 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v26, a26 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v27, a27 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v28, a28 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v29, a29 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v30, a30 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v31, a31 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:31] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a32 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: buffer_load_dword a33, off, s[0:3], s32 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword a32, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i32_ret_av_av_no_agprs: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: scratch_store_dword off, v40, s32 offset:68 ; 4-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dword off, v41, s32 offset:64 ; 4-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dword off, v42, s32 offset:60 ; 4-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dword off, v43, s32 offset:56 ; 4-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dword off, v44, s32 offset:52 ; 4-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dword off, v45, s32 offset:48 ; 4-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dword off, v46, s32 offset:44 ; 4-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dword off, v47, s32 offset:40 ; 4-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dword off, v56, s32 offset:36 ; 4-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dword off, v57, s32 offset:32 ; 4-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dword off, v58, s32 offset:28 ; 4-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dword off, v59, s32 offset:24 ; 4-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dword off, v60, s32 offset:20 ; 4-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dword off, v61, s32 offset:16 ; 4-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dword off, v62, s32 offset:12 ; 4-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dword off, v63, s32 offset:8 ; 4-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dword off, a32, s32 offset:4 ; 4-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dword off, a33, s32 ; 4-byte Folded Spill |
| ; GFX950-NEXT: v_accvgpr_write_b32 a33, v1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a32, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:31] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a2, v2 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a3, v3 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a4, v4 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a5, v5 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a6, v6 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a7, v7 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a8, v8 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a9, v9 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a10, v10 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a11, v11 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a12, v12 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a13, v13 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a14, v14 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a15, v15 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a16, v16 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a17, v17 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a18, v18 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a19, v19 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a20, v20 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a21, v21 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a22, v22 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a23, v23 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a24, v24 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a25, v25 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a26, v26 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a27, v27 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a28, v28 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a29, v29 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a30, v30 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a31, v31 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a32 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v5, a33 |
| ; GFX950-NEXT: global_load_dword v1, v[4:5], off |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB29_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v1 |
| ; GFX950-NEXT: v_xor_b32_e32 v2, v3, v0 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap v1, v[4:5], v[2:3], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB29_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a32, v1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a2 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a3 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a4 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v5, a5 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v6, a6 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v7, a7 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v8, a8 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v9, a9 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v10, a10 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v11, a11 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v12, a12 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v13, a13 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v14, a14 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v15, a15 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v16, a16 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v17, a17 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v18, a18 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v19, a19 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v20, a20 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v21, a21 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v22, a22 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v23, a23 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v24, a24 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v25, a25 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v26, a26 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v27, a27 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v28, a28 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v29, a29 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v30, a30 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v31, a31 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:31] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a32 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: scratch_load_dword a33, off, s32 ; 4-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dword a32, off, s32 offset:4 ; 4-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dword v63, off, s32 offset:8 ; 4-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dword v62, off, s32 offset:12 ; 4-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dword v61, off, s32 offset:16 ; 4-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dword v60, off, s32 offset:20 ; 4-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dword v59, off, s32 offset:24 ; 4-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dword v58, off, s32 offset:28 ; 4-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dword v57, off, s32 offset:32 ; 4-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dword v56, off, s32 offset:36 ; 4-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dword v47, off, s32 offset:40 ; 4-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dword v46, off, s32 offset:44 ; 4-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dword v45, off, s32 offset:48 ; 4-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dword v44, off, s32 offset:52 ; 4-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dword v43, off, s32 offset:56 ; 4-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dword v42, off, s32 offset:60 ; 4-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dword v41, off, s32 offset:64 ; 4-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dword v40, off, s32 offset:68 ; 4-byte Folded Reload |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %vgpr.def = call { <32 x i32>, <32 x i32> } asm sideeffect "; def $0", "=${v[0:31]},=${v[32:63]}"() |
| %vgpr.0 = extractvalue { <32 x i32>, <32 x i32> } %vgpr.def, 0 |
| %vgpr.1 = extractvalue { <32 x i32>, <32 x i32> } %vgpr.def, 1 |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data seq_cst |
| call void asm sideeffect "; use $0", "{v[0:31]},{v[32:63]}"(<32 x i32> %vgpr.0, <32 x i32> %vgpr.1) |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_xor_expansion_i32_noret_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i32_noret_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB30_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_xor_b32_e32 v2, v3, v4 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB30_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i32_noret_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX950-NEXT: .LBB30_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_xor_b32_e32 v2, v3, v4 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB30_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %unused = atomicrmw xor ptr addrspace(1) %ptr, i32 %data seq_cst |
| ret void |
| } |
| |
| define void @global_atomic_xor_expansion_i32_noret_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i32_noret_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v4 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB31_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_xor_b32_e32 v2, v3, v4 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB31_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i32_noret_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v4 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB31_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_xor_b32_e32 v2, v3, v4 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB31_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %unused = atomicrmw xor ptr addrspace(1) %ptr, i32 %data seq_cst |
| ret void |
| } |
| |
| ;--------------------------------------------------------------------- |
| ; xor i64 cases with cmpxchg expansion |
| ;--------------------------------------------------------------------- |
| |
| ; Input and result use AGPR |
| define void @global_atomic_xor_expansion_i64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v7, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v6, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB32_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_xor_b32_e32 v3, v5, v7 |
| ; GFX90A-NEXT: v_xor_b32_e32 v2, v4, v6 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB32_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v7, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v6, a0 |
| ; GFX950-NEXT: .LBB32_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_xor_b32_e32 v3, v5, v7 |
| ; GFX950-NEXT: v_xor_b32_e32 v2, v4, v6 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX950-NEXT: v_mov_b64_e32 v[4:5], v[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB32_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %data seq_cst |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| ; Input is AGPR, result used as VGPR. |
| define void @global_atomic_xor_expansion_i64_ret_a_v(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i64_ret_a_v: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB33_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1] |
| ; GFX90A-NEXT: v_xor_b32_e32 v5, v7, v3 |
| ; GFX90A-NEXT: v_xor_b32_e32 v4, v6, v2 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB33_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[4:5] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i64_ret_a_v: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: .LBB33_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b64_e32 v[6:7], v[4:5] |
| ; GFX950-NEXT: v_xor_b32_e32 v5, v7, v3 |
| ; GFX950-NEXT: v_xor_b32_e32 v4, v6, v2 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB33_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[4:5] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %data seq_cst |
| call void asm "; use $0", "v"(i64 %result) |
| ret void |
| } |
| |
| ; Input is VGPR, result used as AGPR |
| define void @global_atomic_xor_expansion_i64_ret_v_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i64_ret_v_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[6:7] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB34_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_xor_b32_e32 v3, v5, v7 |
| ; GFX90A-NEXT: v_xor_b32_e32 v2, v4, v6 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB34_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i64_ret_v_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[6:7] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB34_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_xor_b32_e32 v3, v5, v7 |
| ; GFX950-NEXT: v_xor_b32_e32 v2, v4, v6 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX950-NEXT: v_mov_b64_e32 v[4:5], v[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB34_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=v"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %data seq_cst |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| ; Input is AV, result also used as AV |
| define void @global_atomic_xor_expansion_i64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB35_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1] |
| ; GFX90A-NEXT: v_xor_b32_e32 v5, v7, v3 |
| ; GFX90A-NEXT: v_xor_b32_e32 v4, v6, v2 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB35_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[4:5] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB35_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b64_e32 v[6:7], v[4:5] |
| ; GFX950-NEXT: v_xor_b32_e32 v5, v7, v3 |
| ; GFX950-NEXT: v_xor_b32_e32 v4, v6, v2 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB35_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[4:5] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %data seq_cst |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| ; Input is AV, used as v |
| define void @global_atomic_xor_expansion_i64_ret_av_v(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i64_ret_av_v: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB36_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1] |
| ; GFX90A-NEXT: v_xor_b32_e32 v5, v7, v3 |
| ; GFX90A-NEXT: v_xor_b32_e32 v4, v6, v2 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB36_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[4:5] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i64_ret_av_v: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB36_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b64_e32 v[6:7], v[4:5] |
| ; GFX950-NEXT: v_xor_b32_e32 v5, v7, v3 |
| ; GFX950-NEXT: v_xor_b32_e32 v4, v6, v2 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB36_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[4:5] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %data seq_cst |
| call void asm "; use $0", "v"(i64 %result) |
| ret void |
| } |
| |
| ; Input is AV, used as a |
| define void @global_atomic_xor_expansion_i64_ret_av_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i64_ret_av_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[6:7] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB37_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_xor_b32_e32 v3, v5, v7 |
| ; GFX90A-NEXT: v_xor_b32_e32 v2, v4, v6 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB37_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i64_ret_av_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[6:7] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB37_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_xor_b32_e32 v3, v5, v7 |
| ; GFX950-NEXT: v_xor_b32_e32 v2, v4, v6 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX950-NEXT: v_mov_b64_e32 v[4:5], v[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB37_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %data seq_cst |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| ; Input is a, result used as AV |
| define void @global_atomic_xor_expansion_i64_ret_a_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i64_ret_a_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB38_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1] |
| ; GFX90A-NEXT: v_xor_b32_e32 v5, v7, v3 |
| ; GFX90A-NEXT: v_xor_b32_e32 v4, v6, v2 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB38_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[4:5] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i64_ret_a_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: .LBB38_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b64_e32 v[6:7], v[4:5] |
| ; GFX950-NEXT: v_xor_b32_e32 v5, v7, v3 |
| ; GFX950-NEXT: v_xor_b32_e32 v4, v6, v2 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB38_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[4:5] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %data seq_cst |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| ; Input is v, result used as AV |
| define void @global_atomic_xor_expansion_i64_ret_v_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i64_ret_v_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB39_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1] |
| ; GFX90A-NEXT: v_xor_b32_e32 v5, v7, v3 |
| ; GFX90A-NEXT: v_xor_b32_e32 v4, v6, v2 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB39_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[4:5] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i64_ret_v_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB39_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b64_e32 v[6:7], v[4:5] |
| ; GFX950-NEXT: v_xor_b32_e32 v5, v7, v3 |
| ; GFX950-NEXT: v_xor_b32_e32 v4, v6, v2 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB39_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[4:5] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=v"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %data seq_cst |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_xor_expansion_i64_noret_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i64_noret_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v7, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v6, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB40_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_xor_b32_e32 v3, v5, v7 |
| ; GFX90A-NEXT: v_xor_b32_e32 v2, v4, v6 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB40_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i64_noret_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v7, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v6, a0 |
| ; GFX950-NEXT: .LBB40_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_xor_b32_e32 v3, v5, v7 |
| ; GFX950-NEXT: v_xor_b32_e32 v2, v4, v6 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b64_e32 v[4:5], v[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB40_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %unused = atomicrmw xor ptr addrspace(1) %ptr, i64 %data seq_cst |
| ret void |
| } |
| |
| define void @global_atomic_xor_expansion_i64_noret_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_expansion_i64_noret_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[6:7] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB41_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_xor_b32_e32 v3, v5, v7 |
| ; GFX90A-NEXT: v_xor_b32_e32 v2, v4, v6 |
| ; GFX90A-NEXT: buffer_wbl2 |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_invl2 |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB41_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_expansion_i64_noret_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[6:7] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB41_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_xor_b32_e32 v3, v5, v7 |
| ; GFX950-NEXT: v_xor_b32_e32 v2, v4, v6 |
| ; GFX950-NEXT: buffer_wbl2 sc0 sc1 |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off sc0 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc0 sc1 |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b64_e32 v[4:5], v[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB41_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %unused = atomicrmw xor ptr addrspace(1) %ptr, i64 %data seq_cst |
| ret void |
| } |
| |
| ;--------------------------------------------------------------------- |
| ; xor i32 cases with instruction |
| ;--------------------------------------------------------------------- |
| |
| ; Input and result use AGPR |
| define void @global_atomic_xor_i32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_xor v0, v[0:1], v2, off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor v0, v[0:1], v2, off sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| ; Input is AGPR, result used as VGPR. |
| define void @global_atomic_xor_i32_ret_a_v(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i32_ret_a_v: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_xor v0, v[0:1], v2, off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i32_ret_a_v: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor v0, v[0:1], v2, off sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "v"(i32 %result) |
| ret void |
| } |
| |
| ; Input is VGPR, result used as AGPR |
| define void @global_atomic_xor_i32_ret_v_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i32_ret_v_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_xor v0, v[0:1], v2, off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i32_ret_v_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor v0, v[0:1], v2, off sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=v"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| ; Input is AV, result also used as AV |
| define void @global_atomic_xor_i32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_xor v0, v[0:1], v2, off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor v0, v[0:1], v2, off sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| ; Input is AV, used as v |
| define void @global_atomic_xor_i32_ret_av_v(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i32_ret_av_v: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_xor v0, v[0:1], v2, off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i32_ret_av_v: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor v0, v[0:1], v2, off sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "v"(i32 %result) |
| ret void |
| } |
| |
| ; Input is AV, used as a |
| define void @global_atomic_xor_i32_ret_av_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i32_ret_av_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_xor v0, v[0:1], v2, off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i32_ret_av_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor v0, v[0:1], v2, off sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| ; Input is a, result used as AV |
| define void @global_atomic_xor_i32_ret_a_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i32_ret_a_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_xor v0, v[0:1], v2, off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i32_ret_a_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor v0, v[0:1], v2, off sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| ; Input is v, result used as AV |
| define void @global_atomic_xor_i32_ret_v_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i32_ret_v_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_xor v0, v[0:1], v2, off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i32_ret_v_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor v0, v[0:1], v2, off sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=v"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_xor_i32_ret_av_av_no_agprs(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i32_ret_av_av_no_agprs: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a3, v40 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a4, v41 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a5, v42 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a6, v43 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a7, v44 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a8, v45 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a9, v46 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a10, v47 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a11, v56 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a12, v57 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a13, v58 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a14, v59 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a15, v60 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a16, v61 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a17, v62 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a18, v63 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:31] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: s_nop 0 |
| ; GFX90A-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a19, v31 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a2 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: global_atomic_xor v0, v[0:1], v2, off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a31, v19 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a30, v20 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a29, v21 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a28, v22 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a27, v23 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a26, v24 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a25, v25 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a24, v26 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a23, v27 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a22, v28 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a21, v29 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a20, v30 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v19, a31 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v20, a30 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v21, a29 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v22, a28 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v23, a27 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v24, a26 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v25, a25 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v26, a24 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v27, a23 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v28, a22 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v29, a21 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v30, a20 ; Reload Reuse |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v31, a19 ; Reload Reuse |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:31] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v63, a18 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v62, a17 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v61, a16 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v60, a15 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v59, a14 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v58, a13 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v57, a12 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v56, a11 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v47, a10 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v46, a9 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v45, a8 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v44, a7 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v43, a6 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v42, a5 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v41, a4 ; Reload Reuse |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v40, a3 ; Reload Reuse |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i32_ret_av_av_no_agprs: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a3, v40 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a4, v41 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a5, v42 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a6, v43 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a7, v44 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a8, v45 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a9, v46 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a10, v47 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a11, v56 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a12, v57 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a13, v58 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a14, v59 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a15, v60 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a16, v61 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a17, v62 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a18, v63 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:31] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: scratch_store_dwordx4 off, v[0:3], s32 ; 16-byte Folded Spill |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: scratch_store_dwordx4 off, v[4:7], s32 offset:16 ; 16-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dwordx4 off, v[8:11], s32 offset:32 ; 16-byte Folded Spill |
| ; GFX950-NEXT: scratch_store_dwordx4 off, v[12:15], s32 offset:48 ; 16-byte Folded Spill |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_accvgpr_write_b32 a19, v31 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a20, v30 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a21, v29 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a22, v28 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: scratch_store_dwordx3 off, v[16:18], s32 offset:64 ; 12-byte Folded Spill |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a2 |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: global_atomic_xor v0, v[0:1], v2, off sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a31, v19 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a27, v23 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a28, v22 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a29, v21 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a30, v20 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a23, v27 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a24, v26 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a25, v25 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a26, v24 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: scratch_load_dwordx4 v[0:3], off, s32 ; 16-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dwordx4 v[4:7], off, s32 offset:16 ; 16-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dwordx4 v[8:11], off, s32 offset:32 ; 16-byte Folded Reload |
| ; GFX950-NEXT: scratch_load_dwordx4 v[12:15], off, s32 offset:48 ; 16-byte Folded Reload |
| ; GFX950-NEXT: v_accvgpr_read_b32 v19, a31 ; Reload Reuse |
| ; GFX950-NEXT: scratch_load_dwordx3 v[16:18], off, s32 offset:64 ; 12-byte Folded Reload |
| ; GFX950-NEXT: v_accvgpr_read_b32 v23, a27 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v22, a28 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v21, a29 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v20, a30 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v27, a23 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v26, a24 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v25, a25 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v24, a26 ; Reload Reuse |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_read_b32 v31, a19 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v30, a20 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v29, a21 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v28, a22 ; Reload Reuse |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:31] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_accvgpr_read_b32 v63, a18 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v62, a17 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v61, a16 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v60, a15 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v59, a14 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v58, a13 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v57, a12 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v56, a11 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v47, a10 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v46, a9 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v45, a8 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v44, a7 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v43, a6 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v42, a5 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v41, a4 ; Reload Reuse |
| ; GFX950-NEXT: v_accvgpr_read_b32 v40, a3 ; Reload Reuse |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %vgpr.def = call { <32 x i32>, <32 x i32> } asm sideeffect "; def $0", "=${v[0:31]},=${v[32:63]}"() |
| %vgpr.0 = extractvalue { <32 x i32>, <32 x i32> } %vgpr.def, 0 |
| %vgpr.1 = extractvalue { <32 x i32>, <32 x i32> } %vgpr.def, 1 |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm sideeffect "; use $0", "{v[0:31]},{v[32:63]}"(<32 x i32> %vgpr.0, <32 x i32> %vgpr.1) |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_xor_i32_noret_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i32_noret_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_xor v[0:1], a0, off |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i32_noret_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor v[0:1], a0, off |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %unused = atomicrmw xor ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| ret void |
| } |
| |
| define void @global_atomic_xor_i32_noret_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i32_noret_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_xor v[0:1], v2, off |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i32_noret_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor v[0:1], v2, off |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %unused = atomicrmw xor ptr addrspace(1) %ptr, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| ret void |
| } |
| |
| ;--------------------------------------------------------------------- |
| ; xor i64 cases with instruction |
| ;--------------------------------------------------------------------- |
| |
| ; Input and result use AGPR |
| define void @global_atomic_xor_i64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| ; Input is AGPR, result used as VGPR. |
| define void @global_atomic_xor_i64_ret_a_v(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i64_ret_a_v: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i64_ret_a_v: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "v"(i64 %result) |
| ret void |
| } |
| |
| ; Input is VGPR, result used as AGPR |
| define void @global_atomic_xor_i64_ret_v_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i64_ret_v_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i64_ret_v_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=v"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| ; Input is AV, result also used as AV |
| define void @global_atomic_xor_i64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| ; Input is AV, used as v |
| define void @global_atomic_xor_i64_ret_av_v(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i64_ret_av_v: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i64_ret_av_v: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "v"(i64 %result) |
| ret void |
| } |
| |
| ; Input is AV, used as a |
| define void @global_atomic_xor_i64_ret_av_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i64_ret_av_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i64_ret_av_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| ; Input is a, result used as AV |
| define void @global_atomic_xor_i64_ret_a_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i64_ret_a_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i64_ret_a_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| ; Input is v, result used as AV |
| define void @global_atomic_xor_i64_ret_v_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i64_ret_v_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i64_ret_v_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=v"() |
| %result = atomicrmw xor ptr addrspace(1) %ptr, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_xor_i64_noret_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i64_noret_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_xor_x2 v[0:1], a[0:1], off |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i64_noret_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor_x2 v[0:1], a[0:1], off |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %unused = atomicrmw xor ptr addrspace(1) %ptr, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| ret void |
| } |
| |
| define void @global_atomic_xor_i64_noret_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i64_noret_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_xor_x2 v[0:1], v[2:3], off |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: buffer_wbinvl1_vol |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i64_noret_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: buffer_wbl2 sc1 |
| ; GFX950-NEXT: global_atomic_xor_x2 v[0:1], v[2:3], off |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: buffer_inv sc1 |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %unused = atomicrmw xor ptr addrspace(1) %ptr, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| ret void |
| } |
| |
| ;--------------------------------------------------------------------- |
| ; other atomics i32, with aa+av cases |
| ;--------------------------------------------------------------------- |
| |
| define void @global_atomic_add_i32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_add_i32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_add v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_add_i32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_add v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw add ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_add_i32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_add_i32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_add v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_add_i32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_add v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw add ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_sub_i32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_sub_i32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_sub v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_sub_i32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_sub v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw sub ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_sub_i32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_sub_i32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_sub v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_sub_i32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_sub v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw sub ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_and_i32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_and_i32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_and v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_and_i32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_and v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw and ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_and_i32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_and_i32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_and v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_and_i32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_and v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw and ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_nand_i32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_nand_i32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB69_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_and_b32_e32 v2, v3, v4 |
| ; GFX90A-NEXT: v_not_b32_e32 v2, v2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB69_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_nand_i32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX950-NEXT: .LBB69_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_bitop3_b32 v2, v3, v4, v3 bitop3:0x3f |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB69_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw nand ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_nand_i32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_nand_i32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB70_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v3 |
| ; GFX90A-NEXT: v_and_b32_e32 v3, v5, v2 |
| ; GFX90A-NEXT: v_not_b32_e32 v4, v3 |
| ; GFX90A-NEXT: global_atomic_cmpswap v3, v[0:1], v[4:5], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB70_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_nand_i32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB70_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX950-NEXT: v_bitop3_b32 v4, v5, v3, v5 bitop3:0x3f |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB70_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw nand ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_or_i32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_or_i32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_or v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_or_i32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_or v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw or ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_or_i32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_or_i32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_or v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_or_i32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_or v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw or ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_max_i32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_max_i32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_smax v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_max_i32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_smax v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw max ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_max_i32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_max_i32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_smax v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_max_i32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_smax v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw max ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_min_i32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_min_i32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_smin v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_min_i32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_smin v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw min ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_min_i32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_min_i32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_smin v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_min_i32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_smin v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw min ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_umax_i32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_umax_i32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_umax v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_umax_i32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_umax v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw umax ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_umax_i32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_umax_i32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_umax v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_umax_i32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_umax v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw umax ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_umin_i32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_umin_i32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_umin v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_umin_i32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_umin v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw umin ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_umin_i32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_umin_i32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_umin v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_umin_i32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_umin v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw umin ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_uinc_wrap_i32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_uinc_wrap_i32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_inc v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_uinc_wrap_i32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_inc v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw uinc_wrap ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_uinc_wrap_i32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_uinc_wrap_i32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_inc v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_uinc_wrap_i32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_inc v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw uinc_wrap ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_udec_wrap_i32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_udec_wrap_i32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_dec v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_udec_wrap_i32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_dec v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw udec_wrap ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_udec_wrap_i32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_udec_wrap_i32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_dec v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_udec_wrap_i32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_dec v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw udec_wrap ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_usub_cond_i32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_usub_cond_i32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB85_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_sub_u32_e32 v2, v3, v4 |
| ; GFX90A-NEXT: v_cmp_ge_u32_e32 vcc, v3, v4 |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB85_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_usub_cond_i32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX950-NEXT: .LBB85_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_sub_u32_e32 v2, v3, v4 |
| ; GFX950-NEXT: v_cmp_ge_u32_e32 vcc, v3, v4 |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB85_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw usub_cond ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_usub_cond_i32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_usub_cond_i32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB86_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v3 |
| ; GFX90A-NEXT: v_sub_u32_e32 v3, v5, v2 |
| ; GFX90A-NEXT: v_cmp_ge_u32_e32 vcc, v5, v2 |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v3, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap v3, v[0:1], v[4:5], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB86_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_usub_cond_i32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB86_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v3 |
| ; GFX950-NEXT: v_sub_u32_e32 v3, v5, v2 |
| ; GFX950-NEXT: v_cmp_ge_u32_e32 vcc, v5, v2 |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e32 v4, v5, v3, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap v3, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB86_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw usub_cond ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_usub_sat_i32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_usub_sat_i32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB87_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_sub_u32_e64 v2, v3, v4 clamp |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB87_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_usub_sat_i32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX950-NEXT: .LBB87_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_sub_u32_e64 v2, v3, v4 clamp |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB87_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw usub_sat ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_usub_sat_i32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_usub_sat_i32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB88_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX90A-NEXT: v_sub_u32_e64 v4, v5, v3 clamp |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB88_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_usub_sat_i32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB88_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX950-NEXT: v_sub_u32_e64 v4, v5, v3 clamp |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB88_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw usub_sat ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| ;--------------------------------------------------------------------- |
| ; other atomics i64, with aa+av cases |
| ;--------------------------------------------------------------------- |
| |
| define void @global_atomic_add_i64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_add_i64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_add_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_add_i64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_add_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw add ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_add_i64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_add_i64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_add_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_add_i64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_add_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw add ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_sub_i64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_sub_i64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_sub_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_sub_i64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_sub_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw sub ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_sub_i64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_sub_i64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_sub_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_sub_i64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_sub_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw sub ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_and_i64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_and_i64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_and_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_and_i64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_and_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw and ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_and_i64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_and_i64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_and_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_and_i64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_and_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw and ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_nand_i64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_nand_i64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v7, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v6, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB95_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_and_b32_e32 v2, v5, v7 |
| ; GFX90A-NEXT: v_and_b32_e32 v8, v4, v6 |
| ; GFX90A-NEXT: v_not_b32_e32 v3, v2 |
| ; GFX90A-NEXT: v_not_b32_e32 v2, v8 |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB95_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_nand_i64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v7, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v6, a0 |
| ; GFX950-NEXT: .LBB95_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_and_b32_e32 v2, v5, v7 |
| ; GFX950-NEXT: v_and_b32_e32 v8, v4, v6 |
| ; GFX950-NEXT: v_not_b32_e32 v3, v2 |
| ; GFX950-NEXT: v_not_b32_e32 v2, v8 |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b64_e32 v[4:5], v[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB95_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw nand ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_nand_i64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_nand_i64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB96_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1] |
| ; GFX90A-NEXT: v_and_b32_e32 v4, v7, v3 |
| ; GFX90A-NEXT: v_and_b32_e32 v8, v6, v2 |
| ; GFX90A-NEXT: v_not_b32_e32 v5, v4 |
| ; GFX90A-NEXT: v_not_b32_e32 v4, v8 |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB96_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[4:5] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_nand_i64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB96_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b64_e32 v[6:7], v[4:5] |
| ; GFX950-NEXT: v_and_b32_e32 v4, v7, v3 |
| ; GFX950-NEXT: v_and_b32_e32 v8, v6, v2 |
| ; GFX950-NEXT: v_not_b32_e32 v5, v4 |
| ; GFX950-NEXT: v_not_b32_e32 v4, v8 |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB96_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[4:5] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw nand ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_or_i64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_or_i64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_or_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_or_i64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_or_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw or ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_or_i64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_or_i64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_or_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_or_i64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_or_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw or ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_max_i64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_max_i64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_smax_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_max_i64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_smax_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw max ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_max_i64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_max_i64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_smax_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_max_i64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_smax_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw max ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_min_i64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_min_i64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_smin_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_min_i64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_smin_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw min ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_min_i64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_min_i64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_smin_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_min_i64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_smin_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw min ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_umax_i64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_umax_i64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_umax_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_umax_i64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_umax_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw umax ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_umax_i64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_umax_i64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_umax_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_umax_i64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_umax_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw umax ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_umin_i64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_umin_i64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_umin_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_umin_i64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_umin_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw umin ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_umin_i64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_umin_i64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_umin_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_umin_i64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_umin_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw umin ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_uinc_wrap_i64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_uinc_wrap_i64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_inc_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_uinc_wrap_i64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_inc_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw uinc_wrap ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_uinc_wrap_i64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_uinc_wrap_i64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_inc_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_uinc_wrap_i64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_inc_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw uinc_wrap ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_udec_wrap_i64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_udec_wrap_i64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_dec_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_udec_wrap_i64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_dec_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw udec_wrap ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_udec_wrap_i64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_udec_wrap_i64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_dec_x2 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_udec_wrap_i64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_dec_x2 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw udec_wrap ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_usub_cond_i64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_usub_cond_i64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v7, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v6, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB111_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_sub_co_u32_e32 v2, vcc, v4, v6 |
| ; GFX90A-NEXT: v_subb_co_u32_e32 v3, vcc, v5, v7, vcc |
| ; GFX90A-NEXT: v_cmp_ge_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB111_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_usub_cond_i64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v7, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v6, a0 |
| ; GFX950-NEXT: .LBB111_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_sub_co_u32_e32 v2, vcc, v4, v6 |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_subb_co_u32_e32 v3, vcc, v5, v7, vcc |
| ; GFX950-NEXT: v_cmp_ge_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc |
| ; GFX950-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b64_e32 v[4:5], v[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB111_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw usub_cond ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_usub_cond_i64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_usub_cond_i64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB112_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1] |
| ; GFX90A-NEXT: v_sub_co_u32_e32 v4, vcc, v6, v2 |
| ; GFX90A-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v3, vcc |
| ; GFX90A-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[2:3] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v7, v5, vcc |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB112_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[4:5] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_usub_cond_i64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB112_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b64_e32 v[6:7], v[4:5] |
| ; GFX950-NEXT: v_sub_co_u32_e32 v4, vcc, v6, v2 |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v3, vcc |
| ; GFX950-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[2:3] |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e32 v5, v7, v5, vcc |
| ; GFX950-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB112_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[4:5] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw usub_cond ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_usub_sat_i64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_usub_sat_i64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v7, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v6, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB113_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_sub_co_u32_e32 v2, vcc, v4, v6 |
| ; GFX90A-NEXT: v_subb_co_u32_e32 v3, vcc, v5, v7, vcc |
| ; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB113_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_usub_sat_i64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v7, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v6, a0 |
| ; GFX950-NEXT: .LBB113_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_sub_co_u32_e32 v2, vcc, v4, v6 |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_subb_co_u32_e32 v3, vcc, v5, v7, vcc |
| ; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc |
| ; GFX950-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b64_e32 v[4:5], v[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB113_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw usub_sat ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_usub_sat_i64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_usub_sat_i64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB114_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1] |
| ; GFX90A-NEXT: v_sub_co_u32_e32 v4, vcc, v6, v2 |
| ; GFX90A-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v3, vcc |
| ; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v4, v4, 0, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB114_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[4:5] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_usub_sat_i64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB114_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b64_e32 v[6:7], v[4:5] |
| ; GFX950-NEXT: v_sub_co_u32_e32 v4, vcc, v6, v2 |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v3, vcc |
| ; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc |
| ; GFX950-NEXT: v_cndmask_b32_e64 v4, v4, 0, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB114_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[4:5] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw usub_sat ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| ;--------------------------------------------------------------------- |
| ; other atomics f32, with aa+av cases |
| ;--------------------------------------------------------------------- |
| |
| define void @global_atomic_fadd_f32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fadd_f32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_add_f32 v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fadd_f32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_add_f32 v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=a"() |
| %result = atomicrmw fadd ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "a"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fadd_f32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fadd_f32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_add_f32 v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fadd_f32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_add_f32 v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=^VA"() |
| %result = atomicrmw fadd ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "^VA"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fsub_f32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fsub_f32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB117_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_sub_f32_e32 v2, v3, v4 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB117_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fsub_f32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX950-NEXT: .LBB117_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_sub_f32_e32 v2, v3, v4 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB117_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=a"() |
| %result = atomicrmw fsub ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "a"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fsub_f32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fsub_f32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB118_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX90A-NEXT: v_sub_f32_e32 v4, v5, v3 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB118_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fsub_f32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB118_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX950-NEXT: v_sub_f32_e32 v4, v5, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB118_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=^VA"() |
| %result = atomicrmw fsub ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "^VA"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmax_f32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmax_f32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_max_f32_e32 v4, v2, v2 |
| ; GFX90A-NEXT: .LBB119_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_max_f32_e32 v2, v3, v3 |
| ; GFX90A-NEXT: v_max_f32_e32 v2, v2, v4 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB119_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmax_f32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: v_max_f32_e32 v4, v2, v2 |
| ; GFX950-NEXT: .LBB119_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_max_f32_e32 v2, v3, v3 |
| ; GFX950-NEXT: v_max_f32_e32 v2, v2, v4 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB119_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=a"() |
| %result = atomicrmw fmax ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "a"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmax_f32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmax_f32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_max_f32_e32 v3, v3, v3 |
| ; GFX90A-NEXT: .LBB120_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX90A-NEXT: v_max_f32_e32 v2, v5, v5 |
| ; GFX90A-NEXT: v_max_f32_e32 v4, v2, v3 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB120_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmax_f32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_max_f32_e32 v3, v3, v3 |
| ; GFX950-NEXT: .LBB120_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX950-NEXT: v_max_f32_e32 v2, v5, v5 |
| ; GFX950-NEXT: v_max_f32_e32 v4, v2, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB120_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=^VA"() |
| %result = atomicrmw fmax ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "^VA"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmin_f32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmin_f32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_max_f32_e32 v4, v2, v2 |
| ; GFX90A-NEXT: .LBB121_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_max_f32_e32 v2, v3, v3 |
| ; GFX90A-NEXT: v_min_f32_e32 v2, v2, v4 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB121_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmin_f32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: v_max_f32_e32 v4, v2, v2 |
| ; GFX950-NEXT: .LBB121_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_max_f32_e32 v2, v3, v3 |
| ; GFX950-NEXT: v_min_f32_e32 v2, v2, v4 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB121_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=a"() |
| %result = atomicrmw fmin ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "a"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmin_f32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmin_f32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_max_f32_e32 v3, v3, v3 |
| ; GFX90A-NEXT: .LBB122_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX90A-NEXT: v_max_f32_e32 v2, v5, v5 |
| ; GFX90A-NEXT: v_min_f32_e32 v4, v2, v3 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB122_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmin_f32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_max_f32_e32 v3, v3, v3 |
| ; GFX950-NEXT: .LBB122_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX950-NEXT: v_max_f32_e32 v2, v5, v5 |
| ; GFX950-NEXT: v_min_f32_e32 v4, v2, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB122_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=^VA"() |
| %result = atomicrmw fmin ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "^VA"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmaximum_f32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmaximum_f32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, 0x7fc00000 |
| ; GFX90A-NEXT: .LBB123_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_max_f32_e32 v2, v3, v4 |
| ; GFX90A-NEXT: v_cmp_o_f32_e32 vcc, v3, v4 |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB123_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmaximum_f32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX950-NEXT: .LBB123_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_maximum3_f32 v2, v3, v4, v4 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB123_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=a"() |
| %result = atomicrmw fmaximum ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "a"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmaximum_f32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmaximum_f32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v4, v[0:1], off offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, 0x7fc00000 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB124_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v4 |
| ; GFX90A-NEXT: v_max_f32_e32 v4, v5, v2 |
| ; GFX90A-NEXT: v_cmp_o_f32_e32 vcc, v5, v2 |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v4, v3, v4, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap v4, v[0:1], v[4:5], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB124_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v4 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmaximum_f32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB124_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX950-NEXT: v_maximum3_f32 v4, v5, v3, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB124_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=^VA"() |
| %result = atomicrmw fmaximum ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "^VA"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fminimum_f32_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fminimum_f32_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, 0x7fc00000 |
| ; GFX90A-NEXT: .LBB125_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_min_f32_e32 v2, v3, v4 |
| ; GFX90A-NEXT: v_cmp_o_f32_e32 vcc, v3, v4 |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB125_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fminimum_f32_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX950-NEXT: .LBB125_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_minimum3_f32 v2, v3, v4, v4 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB125_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=a"() |
| %result = atomicrmw fminimum ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "a"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fminimum_f32_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fminimum_f32_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v4, v[0:1], off offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, 0x7fc00000 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB126_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v4 |
| ; GFX90A-NEXT: v_min_f32_e32 v4, v5, v2 |
| ; GFX90A-NEXT: v_cmp_o_f32_e32 vcc, v5, v2 |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v4, v3, v4, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap v4, v[0:1], v[4:5], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB126_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v4 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fminimum_f32_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB126_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX950-NEXT: v_minimum3_f32 v4, v5, v3, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB126_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=^VA"() |
| %result = atomicrmw fminimum ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "^VA"(float %result) |
| ret void |
| } |
| |
| ;--------------------------------------------------------------------- |
| ; other atomics f64, with aa+av cases |
| ;--------------------------------------------------------------------- |
| |
| define void @global_atomic_fadd_f64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fadd_f64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fadd_f64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=a"() |
| %result = atomicrmw fadd ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fadd_f64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fadd_f64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fadd_f64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=^VA"() |
| %result = atomicrmw fadd ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fsub_f64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fsub_f64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v7, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v6, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB129_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_add_f64 v[2:3], v[4:5], -v[6:7] |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB129_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fsub_f64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v7, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v6, a0 |
| ; GFX950-NEXT: .LBB129_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_add_f64 v[2:3], v[4:5], -v[6:7] |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX950-NEXT: v_mov_b64_e32 v[4:5], v[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB129_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=a"() |
| %result = atomicrmw fsub ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fsub_f64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fsub_f64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB130_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1] |
| ; GFX90A-NEXT: v_add_f64 v[4:5], v[6:7], -v[2:3] |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB130_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[4:5] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fsub_f64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB130_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b64_e32 v[6:7], v[4:5] |
| ; GFX950-NEXT: v_add_f64 v[4:5], v[6:7], -v[2:3] |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7] |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB130_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[4:5] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=^VA"() |
| %result = atomicrmw fsub ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmax_f64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmax_f64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_max_f64 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmax_f64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_max_f64 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=a"() |
| %result = atomicrmw fmax ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmax_f64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmax_f64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_max_f64 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmax_f64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_max_f64 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=^VA"() |
| %result = atomicrmw fmax ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmin_f64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmin_f64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_min_f64 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmin_f64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_min_f64 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=a"() |
| %result = atomicrmw fmin ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmin_f64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmin_f64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_min_f64 v[0:1], v[0:1], v[2:3], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmin_f64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_min_f64 v[0:1], v[0:1], v[2:3], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=^VA"() |
| %result = atomicrmw fmin ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmaximum_f64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmaximum_f64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v7, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v6, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v8, 0x7ff80000 |
| ; GFX90A-NEXT: .LBB135_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7] |
| ; GFX90A-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB135_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmaximum_f64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v7, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v6, a0 |
| ; GFX950-NEXT: v_mov_b32_e32 v8, 0x7ff80000 |
| ; GFX950-NEXT: .LBB135_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7] |
| ; GFX950-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7] |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc |
| ; GFX950-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b64_e32 v[4:5], v[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB135_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=a"() |
| %result = atomicrmw fmaximum ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmaximum_f64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmaximum_f64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v6, 0x7ff80000 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB136_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[4:5], v[4:5] op_sel:[0,1] |
| ; GFX90A-NEXT: v_max_f64 v[4:5], v[10:11], v[2:3] |
| ; GFX90A-NEXT: v_cmp_u_f64_e32 vcc, v[10:11], v[2:3] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v9, v5, v6, vcc |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v8, v4, 0, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[8:11], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[10:11] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB136_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[4:5] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmaximum_f64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_mov_b32_e32 v6, 0x7ff80000 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB136_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b64_e32 v[10:11], v[4:5] |
| ; GFX950-NEXT: v_max_f64 v[4:5], v[10:11], v[2:3] |
| ; GFX950-NEXT: v_cmp_u_f64_e32 vcc, v[10:11], v[2:3] |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e32 v9, v5, v6, vcc |
| ; GFX950-NEXT: v_cndmask_b32_e64 v8, v4, 0, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[8:11], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[10:11] |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB136_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[4:5] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=^VA"() |
| %result = atomicrmw fmaximum ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fminimum_f64_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fminimum_f64_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v7, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v6, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v8, 0x7ff80000 |
| ; GFX90A-NEXT: .LBB137_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7] |
| ; GFX90A-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB137_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fminimum_f64_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v7, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v6, a0 |
| ; GFX950-NEXT: v_mov_b32_e32 v8, 0x7ff80000 |
| ; GFX950-NEXT: .LBB137_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7] |
| ; GFX950-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7] |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc |
| ; GFX950-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b64_e32 v[4:5], v[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB137_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=a"() |
| %result = atomicrmw fminimum ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fminimum_f64_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fminimum_f64_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v6, 0x7ff80000 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB138_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[4:5], v[4:5] op_sel:[0,1] |
| ; GFX90A-NEXT: v_min_f64 v[4:5], v[10:11], v[2:3] |
| ; GFX90A-NEXT: v_cmp_u_f64_e32 vcc, v[10:11], v[2:3] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v9, v5, v6, vcc |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v8, v4, 0, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[8:11], off offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[10:11] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB138_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[4:5] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fminimum_f64_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:80 |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_mov_b32_e32 v6, 0x7ff80000 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB138_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b64_e32 v[10:11], v[4:5] |
| ; GFX950-NEXT: v_min_f64 v[4:5], v[10:11], v[2:3] |
| ; GFX950-NEXT: v_cmp_u_f64_e32 vcc, v[10:11], v[2:3] |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e32 v9, v5, v6, vcc |
| ; GFX950-NEXT: v_cndmask_b32_e64 v8, v4, 0, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[8:11], off offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[10:11] |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB138_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[4:5] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=^VA"() |
| %result = atomicrmw fminimum ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(double %result) |
| ret void |
| } |
| |
| ;--------------------------------------------------------------------- |
| ; other atomics v2f16, with aa+av cases |
| ;--------------------------------------------------------------------- |
| |
| define void @global_atomic_fadd_v2f16_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fadd_v2f16_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: global_atomic_pk_add_f16 v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fadd_v2f16_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_pk_add_f16 v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=a"() |
| %result = atomicrmw fadd ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fadd_v2f16_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fadd_v2f16_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_pk_add_f16 v0, v[0:1], v2, off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fadd_v2f16_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_pk_add_f16 v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=^VA"() |
| %result = atomicrmw fadd ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fsub_v2f16_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fsub_v2f16_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB141_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_add_f16 v2, v3, v4 neg_lo:[0,1] neg_hi:[0,1] |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB141_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fsub_v2f16_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX950-NEXT: .LBB141_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_pk_add_f16 v2, v3, v4 neg_lo:[0,1] neg_hi:[0,1] |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB141_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=a"() |
| %result = atomicrmw fsub ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fsub_v2f16_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fsub_v2f16_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB142_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX90A-NEXT: v_pk_add_f16 v4, v5, v3 neg_lo:[0,1] neg_hi:[0,1] |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB142_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fsub_v2f16_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB142_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX950-NEXT: v_pk_add_f16 v4, v5, v3 neg_lo:[0,1] neg_hi:[0,1] |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB142_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=^VA"() |
| %result = atomicrmw fsub ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmax_v2f16_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmax_v2f16_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_pk_max_f16 v4, v2, v2 |
| ; GFX90A-NEXT: .LBB143_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_max_f16 v2, v3, v3 |
| ; GFX90A-NEXT: v_pk_max_f16 v2, v2, v4 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB143_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmax_v2f16_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: v_pk_max_f16 v4, v2, v2 |
| ; GFX950-NEXT: .LBB143_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_pk_max_f16 v2, v3, v3 |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_pk_max_f16 v2, v2, v4 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB143_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=a"() |
| %result = atomicrmw fmax ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmax_v2f16_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmax_v2f16_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_pk_max_f16 v3, v3, v3 |
| ; GFX90A-NEXT: .LBB144_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX90A-NEXT: v_pk_max_f16 v2, v5, v5 |
| ; GFX90A-NEXT: v_pk_max_f16 v4, v2, v3 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB144_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmax_v2f16_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_pk_max_f16 v3, v3, v3 |
| ; GFX950-NEXT: .LBB144_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX950-NEXT: v_pk_max_f16 v2, v5, v5 |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_pk_max_f16 v4, v2, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB144_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=^VA"() |
| %result = atomicrmw fmax ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmin_v2f16_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmin_v2f16_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_pk_max_f16 v4, v2, v2 |
| ; GFX90A-NEXT: .LBB145_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_max_f16 v2, v3, v3 |
| ; GFX90A-NEXT: v_pk_min_f16 v2, v2, v4 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB145_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmin_v2f16_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: v_pk_max_f16 v4, v2, v2 |
| ; GFX950-NEXT: .LBB145_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_pk_max_f16 v2, v3, v3 |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_pk_min_f16 v2, v2, v4 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB145_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=a"() |
| %result = atomicrmw fmin ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmin_v2f16_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmin_v2f16_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_pk_max_f16 v3, v3, v3 |
| ; GFX90A-NEXT: .LBB146_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX90A-NEXT: v_pk_max_f16 v2, v5, v5 |
| ; GFX90A-NEXT: v_pk_min_f16 v4, v2, v3 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB146_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmin_v2f16_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_pk_max_f16 v3, v3, v3 |
| ; GFX950-NEXT: .LBB146_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX950-NEXT: v_pk_max_f16 v2, v5, v5 |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_pk_min_f16 v4, v2, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB146_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=^VA"() |
| %result = atomicrmw fmin ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmaximum_v2f16_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmaximum_v2f16_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, 0x7e00 |
| ; GFX90A-NEXT: s_mov_b32 s8, 0x5040100 |
| ; GFX90A-NEXT: .LBB147_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_max_f16 v2, v3, v4 |
| ; GFX90A-NEXT: v_cmp_o_f16_sdwa vcc, v3, v4 src0_sel:WORD_1 src1_sel:WORD_1 |
| ; GFX90A-NEXT: v_cmp_o_f16_e64 s[4:5], v3, v4 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v6, v5, v2, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_sdwa v2, v5, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 |
| ; GFX90A-NEXT: v_perm_b32 v2, v2, v6, s8 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB147_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmaximum_v2f16_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX950-NEXT: .LBB147_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_pk_maximum3_f16 v2, v3, v4, v4 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB147_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=a"() |
| %result = atomicrmw fmaximum ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmaximum_v2f16_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmaximum_v2f16_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v4, v[0:1], off offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, 0x7e00 |
| ; GFX90A-NEXT: s_mov_b32 s8, 0x5040100 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB148_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v4 |
| ; GFX90A-NEXT: v_pk_max_f16 v4, v5, v2 |
| ; GFX90A-NEXT: v_cmp_o_f16_sdwa vcc, v5, v2 src0_sel:WORD_1 src1_sel:WORD_1 |
| ; GFX90A-NEXT: v_cmp_o_f16_e64 s[4:5], v5, v2 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v6, v3, v4, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_sdwa v4, v3, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 |
| ; GFX90A-NEXT: v_perm_b32 v4, v4, v6, s8 |
| ; GFX90A-NEXT: global_atomic_cmpswap v4, v[0:1], v[4:5], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v5 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB148_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v4 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmaximum_v2f16_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB148_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX950-NEXT: v_pk_maximum3_f16 v4, v5, v3, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB148_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=^VA"() |
| %result = atomicrmw fmaximum ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fminimum_v2f16_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fminimum_v2f16_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, 0x7e00 |
| ; GFX90A-NEXT: s_mov_b32 s8, 0x5040100 |
| ; GFX90A-NEXT: .LBB149_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_min_f16 v2, v3, v4 |
| ; GFX90A-NEXT: v_cmp_o_f16_sdwa vcc, v3, v4 src0_sel:WORD_1 src1_sel:WORD_1 |
| ; GFX90A-NEXT: v_cmp_o_f16_e64 s[4:5], v3, v4 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v6, v5, v2, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_sdwa v2, v5, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 |
| ; GFX90A-NEXT: v_perm_b32 v2, v2, v6, s8 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB149_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fminimum_v2f16_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX950-NEXT: .LBB149_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_pk_minimum3_f16 v2, v3, v4, v4 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB149_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=a"() |
| %result = atomicrmw fminimum ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fminimum_v2f16_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fminimum_v2f16_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v4, v[0:1], off offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, 0x7e00 |
| ; GFX90A-NEXT: s_mov_b32 s8, 0x5040100 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB150_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v4 |
| ; GFX90A-NEXT: v_pk_min_f16 v4, v5, v2 |
| ; GFX90A-NEXT: v_cmp_o_f16_sdwa vcc, v5, v2 src0_sel:WORD_1 src1_sel:WORD_1 |
| ; GFX90A-NEXT: v_cmp_o_f16_e64 s[4:5], v5, v2 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v6, v3, v4, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_sdwa v4, v3, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 |
| ; GFX90A-NEXT: v_perm_b32 v4, v4, v6, s8 |
| ; GFX90A-NEXT: global_atomic_cmpswap v4, v[0:1], v[4:5], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v5 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB150_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v4 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fminimum_v2f16_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v2, v[0:1], off offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB150_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX950-NEXT: v_pk_minimum3_f16 v4, v5, v3, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB150_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=^VA"() |
| %result = atomicrmw fminimum ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x half> %result) |
| ret void |
| } |
| |
| ;--------------------------------------------------------------------- |
| ; other atomics v2bf16, with aa+av cases |
| ;--------------------------------------------------------------------- |
| |
| define void @global_atomic_fadd_v2bf16_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fadd_v2bf16_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v2 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB151_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v3 |
| ; GFX90A-NEXT: v_and_b32_e32 v6, 0xffff0000, v3 |
| ; GFX90A-NEXT: v_add_f32_e32 v2, v2, v4 |
| ; GFX90A-NEXT: v_add_f32_e32 v6, v6, v5 |
| ; GFX90A-NEXT: v_bfe_u32 v7, v2, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v9, v6, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v2 |
| ; GFX90A-NEXT: v_or_b32_e32 v10, 0x400000, v6 |
| ; GFX90A-NEXT: v_add3_u32 v7, v7, v2, s8 |
| ; GFX90A-NEXT: v_add3_u32 v9, v9, v6, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v2, v2 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v2, v7, v8, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc |
| ; GFX90A-NEXT: v_perm_b32 v2, v6, v2, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB151_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fadd_v2bf16_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: global_atomic_pk_add_bf16 v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=a"() |
| %result = atomicrmw fadd ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fadd_v2bf16_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fadd_v2bf16_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v4, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v3 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB152_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v4 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v5 |
| ; GFX90A-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 |
| ; GFX90A-NEXT: v_add_f32_e32 v4, v4, v2 |
| ; GFX90A-NEXT: v_add_f32_e32 v6, v6, v3 |
| ; GFX90A-NEXT: v_bfe_u32 v7, v4, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v9, v6, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v4 |
| ; GFX90A-NEXT: v_or_b32_e32 v10, 0x400000, v6 |
| ; GFX90A-NEXT: v_add3_u32 v7, v7, v4, s8 |
| ; GFX90A-NEXT: v_add3_u32 v9, v9, v6, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v4, v7, v8, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc |
| ; GFX90A-NEXT: v_perm_b32 v4, v6, v4, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v4, v[0:1], v[4:5], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v5 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB152_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v4 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fadd_v2bf16_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_pk_add_bf16 v0, v[0:1], v2, off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=^VA"() |
| %result = atomicrmw fadd ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fsub_v2bf16_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fsub_v2bf16_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v2 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB153_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v3 |
| ; GFX90A-NEXT: v_and_b32_e32 v6, 0xffff0000, v3 |
| ; GFX90A-NEXT: v_sub_f32_e32 v2, v2, v4 |
| ; GFX90A-NEXT: v_sub_f32_e32 v6, v6, v5 |
| ; GFX90A-NEXT: v_bfe_u32 v7, v2, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v9, v6, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v2 |
| ; GFX90A-NEXT: v_or_b32_e32 v10, 0x400000, v6 |
| ; GFX90A-NEXT: v_add3_u32 v7, v7, v2, s8 |
| ; GFX90A-NEXT: v_add3_u32 v9, v9, v6, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v2, v2 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v2, v7, v8, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc |
| ; GFX90A-NEXT: v_perm_b32 v2, v6, v2, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB153_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fsub_v2bf16_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v2 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v2 |
| ; GFX950-NEXT: .LBB153_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v3 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v3 |
| ; GFX950-NEXT: v_sub_f32_e32 v2, v2, v4 |
| ; GFX950-NEXT: v_sub_f32_e32 v6, v6, v5 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v6, v2 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB153_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=a"() |
| %result = atomicrmw fsub ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fsub_v2bf16_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fsub_v2bf16_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v4, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v3 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB154_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v4 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v5 |
| ; GFX90A-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 |
| ; GFX90A-NEXT: v_sub_f32_e32 v4, v4, v2 |
| ; GFX90A-NEXT: v_sub_f32_e32 v6, v6, v3 |
| ; GFX90A-NEXT: v_bfe_u32 v7, v4, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v9, v6, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v4 |
| ; GFX90A-NEXT: v_or_b32_e32 v10, 0x400000, v6 |
| ; GFX90A-NEXT: v_add3_u32 v7, v7, v4, s8 |
| ; GFX90A-NEXT: v_add3_u32 v9, v9, v6, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v4, v7, v8, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc |
| ; GFX90A-NEXT: v_perm_b32 v4, v6, v4, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v4, v[0:1], v[4:5], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v5 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB154_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v4 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fsub_v2bf16_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v4, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v3 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 |
| ; GFX950-NEXT: .LBB154_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v4 |
| ; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v5 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v5 |
| ; GFX950-NEXT: v_sub_f32_e32 v4, v4, v2 |
| ; GFX950-NEXT: v_sub_f32_e32 v6, v6, v3 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v6, v4 |
| ; GFX950-NEXT: global_atomic_cmpswap v4, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v4, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB154_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v4 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=^VA"() |
| %result = atomicrmw fsub ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmax_v2bf16_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmax_v2bf16_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v2 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB155_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v3 |
| ; GFX90A-NEXT: v_and_b32_e32 v6, 0xffff0000, v3 |
| ; GFX90A-NEXT: v_max_f32_e32 v2, v2, v4 |
| ; GFX90A-NEXT: v_max_f32_e32 v6, v6, v5 |
| ; GFX90A-NEXT: v_bfe_u32 v7, v2, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v9, v6, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v2 |
| ; GFX90A-NEXT: v_or_b32_e32 v10, 0x400000, v6 |
| ; GFX90A-NEXT: v_add3_u32 v7, v7, v2, s8 |
| ; GFX90A-NEXT: v_add3_u32 v9, v9, v6, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v2, v2 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v2, v7, v8, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc |
| ; GFX90A-NEXT: v_perm_b32 v2, v6, v2, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB155_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmax_v2bf16_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v2 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v2 |
| ; GFX950-NEXT: .LBB155_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v3 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v3 |
| ; GFX950-NEXT: v_max_f32_e32 v2, v2, v4 |
| ; GFX950-NEXT: v_max_f32_e32 v6, v6, v5 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v6, v2 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB155_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=a"() |
| %result = atomicrmw fmax ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmax_v2bf16_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmax_v2bf16_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v4, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v3 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB156_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v4 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v5 |
| ; GFX90A-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 |
| ; GFX90A-NEXT: v_max_f32_e32 v4, v4, v2 |
| ; GFX90A-NEXT: v_max_f32_e32 v6, v6, v3 |
| ; GFX90A-NEXT: v_bfe_u32 v7, v4, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v9, v6, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v4 |
| ; GFX90A-NEXT: v_or_b32_e32 v10, 0x400000, v6 |
| ; GFX90A-NEXT: v_add3_u32 v7, v7, v4, s8 |
| ; GFX90A-NEXT: v_add3_u32 v9, v9, v6, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v4, v7, v8, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc |
| ; GFX90A-NEXT: v_perm_b32 v4, v6, v4, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v4, v[0:1], v[4:5], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v5 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB156_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v4 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmax_v2bf16_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v4, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v3 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 |
| ; GFX950-NEXT: .LBB156_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v4 |
| ; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v5 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v5 |
| ; GFX950-NEXT: v_max_f32_e32 v4, v4, v2 |
| ; GFX950-NEXT: v_max_f32_e32 v6, v6, v3 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v6, v4 |
| ; GFX950-NEXT: global_atomic_cmpswap v4, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v4, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB156_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v4 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=^VA"() |
| %result = atomicrmw fmax ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmin_v2bf16_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmin_v2bf16_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v2 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB157_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v3 |
| ; GFX90A-NEXT: v_and_b32_e32 v6, 0xffff0000, v3 |
| ; GFX90A-NEXT: v_min_f32_e32 v2, v2, v4 |
| ; GFX90A-NEXT: v_min_f32_e32 v6, v6, v5 |
| ; GFX90A-NEXT: v_bfe_u32 v7, v2, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v9, v6, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v2 |
| ; GFX90A-NEXT: v_or_b32_e32 v10, 0x400000, v6 |
| ; GFX90A-NEXT: v_add3_u32 v7, v7, v2, s8 |
| ; GFX90A-NEXT: v_add3_u32 v9, v9, v6, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v2, v2 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v2, v7, v8, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc |
| ; GFX90A-NEXT: v_perm_b32 v2, v6, v2, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB157_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmin_v2bf16_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v2 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v2 |
| ; GFX950-NEXT: .LBB157_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v3 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v3 |
| ; GFX950-NEXT: v_min_f32_e32 v2, v2, v4 |
| ; GFX950-NEXT: v_min_f32_e32 v6, v6, v5 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v6, v2 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB157_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=a"() |
| %result = atomicrmw fmin ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmin_v2bf16_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmin_v2bf16_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v4, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v3 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB158_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v4 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v5 |
| ; GFX90A-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 |
| ; GFX90A-NEXT: v_min_f32_e32 v4, v4, v2 |
| ; GFX90A-NEXT: v_min_f32_e32 v6, v6, v3 |
| ; GFX90A-NEXT: v_bfe_u32 v7, v4, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v9, v6, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v4 |
| ; GFX90A-NEXT: v_or_b32_e32 v10, 0x400000, v6 |
| ; GFX90A-NEXT: v_add3_u32 v7, v7, v4, s8 |
| ; GFX90A-NEXT: v_add3_u32 v9, v9, v6, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v4, v7, v8, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc |
| ; GFX90A-NEXT: v_perm_b32 v4, v6, v4, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v4, v[0:1], v[4:5], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v5 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB158_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v4 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmin_v2bf16_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v4, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v3 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 |
| ; GFX950-NEXT: .LBB158_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v4 |
| ; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v5 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v5 |
| ; GFX950-NEXT: v_min_f32_e32 v4, v4, v2 |
| ; GFX950-NEXT: v_min_f32_e32 v6, v6, v3 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v6, v4 |
| ; GFX950-NEXT: global_atomic_cmpswap v4, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v4, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB158_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v4 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=^VA"() |
| %result = atomicrmw fmin ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmaximum_v2bf16_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmaximum_v2bf16_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v2 |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, 0x7fc00000 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v6, 0xffff0000, v2 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB159_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v3 |
| ; GFX90A-NEXT: v_and_b32_e32 v7, 0xffff0000, v3 |
| ; GFX90A-NEXT: v_max_f32_e32 v8, v2, v4 |
| ; GFX90A-NEXT: v_max_f32_e32 v9, v7, v6 |
| ; GFX90A-NEXT: v_cmp_o_f32_e32 vcc, v7, v6 |
| ; GFX90A-NEXT: v_cmp_o_f32_e64 s[4:5], v2, v4 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v2, v5, v8, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v7, v5, v9, vcc |
| ; GFX90A-NEXT: v_bfe_u32 v8, v2, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v10, v7, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v2 |
| ; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v7 |
| ; GFX90A-NEXT: v_add3_u32 v8, v8, v2, s8 |
| ; GFX90A-NEXT: v_add3_u32 v10, v10, v7, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v2, v2 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v2, v8, v9, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v7, v10, v11, vcc |
| ; GFX90A-NEXT: v_perm_b32 v2, v7, v2, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB159_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmaximum_v2bf16_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v2 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v2 |
| ; GFX950-NEXT: .LBB159_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v3 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v3 |
| ; GFX950-NEXT: v_maximum3_f32 v2, v2, v4, v4 |
| ; GFX950-NEXT: v_maximum3_f32 v6, v6, v5, v5 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v6, v2 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB159_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=a"() |
| %result = atomicrmw fmaximum ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmaximum_v2bf16_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmaximum_v2bf16_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v5, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v4 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v4 |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, 0x7fc00000 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB160_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v7, v5 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v5, 16, v7 |
| ; GFX90A-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 |
| ; GFX90A-NEXT: v_max_f32_e32 v8, v5, v2 |
| ; GFX90A-NEXT: v_max_f32_e32 v9, v6, v4 |
| ; GFX90A-NEXT: v_cmp_o_f32_e32 vcc, v6, v4 |
| ; GFX90A-NEXT: v_cmp_o_f32_e64 s[4:5], v5, v2 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v5, v3, v8, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v3, v9, vcc |
| ; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v10, v6, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5 |
| ; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v6 |
| ; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8 |
| ; GFX90A-NEXT: v_add3_u32 v10, v10, v6, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v5, v5 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v5, v8, v9, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc |
| ; GFX90A-NEXT: v_perm_b32 v6, v6, v5, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v5, v[0:1], v[6:7], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB160_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v5 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmaximum_v2bf16_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v4, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v3 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 |
| ; GFX950-NEXT: .LBB160_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v4 |
| ; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v5 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v5 |
| ; GFX950-NEXT: v_maximum3_f32 v4, v4, v2, v2 |
| ; GFX950-NEXT: v_maximum3_f32 v6, v6, v3, v3 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v6, v4 |
| ; GFX950-NEXT: global_atomic_cmpswap v4, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v4, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB160_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v4 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=^VA"() |
| %result = atomicrmw fmaximum ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fminimum_v2bf16_ret_a_a(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fminimum_v2bf16_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v2 |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, 0x7fc00000 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v6, 0xffff0000, v2 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB161_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v3 |
| ; GFX90A-NEXT: v_and_b32_e32 v7, 0xffff0000, v3 |
| ; GFX90A-NEXT: v_min_f32_e32 v8, v2, v4 |
| ; GFX90A-NEXT: v_min_f32_e32 v9, v7, v6 |
| ; GFX90A-NEXT: v_cmp_o_f32_e32 vcc, v7, v6 |
| ; GFX90A-NEXT: v_cmp_o_f32_e64 s[4:5], v2, v4 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v2, v5, v8, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v7, v5, v9, vcc |
| ; GFX90A-NEXT: v_bfe_u32 v8, v2, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v10, v7, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v2 |
| ; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v7 |
| ; GFX90A-NEXT: v_add3_u32 v8, v8, v2, s8 |
| ; GFX90A-NEXT: v_add3_u32 v10, v10, v7, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v2, v2 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v2, v8, v9, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v7, v10, v11, vcc |
| ; GFX90A-NEXT: v_perm_b32 v2, v7, v2, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB161_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fminimum_v2bf16_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v3, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v2, a0 |
| ; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v2 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v2 |
| ; GFX950-NEXT: .LBB161_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v3 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v3 |
| ; GFX950-NEXT: v_minimum3_f32 v2, v2, v4, v4 |
| ; GFX950-NEXT: v_minimum3_f32 v6, v6, v5, v5 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v6, v2 |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v[0:1], v[2:3], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v2 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB161_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=a"() |
| %result = atomicrmw fminimum ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fminimum_v2bf16_ret_av_av(ptr addrspace(1) %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fminimum_v2bf16_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_load_dword v5, v[0:1], off offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v4 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v4 |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, 0x7fc00000 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB162_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v7, v5 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v5, 16, v7 |
| ; GFX90A-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 |
| ; GFX90A-NEXT: v_min_f32_e32 v8, v5, v2 |
| ; GFX90A-NEXT: v_min_f32_e32 v9, v6, v4 |
| ; GFX90A-NEXT: v_cmp_o_f32_e32 vcc, v6, v4 |
| ; GFX90A-NEXT: v_cmp_o_f32_e64 s[4:5], v5, v2 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v5, v3, v8, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v3, v9, vcc |
| ; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v10, v6, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5 |
| ; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v6 |
| ; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8 |
| ; GFX90A-NEXT: v_add3_u32 v10, v10, v6, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v5, v5 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v5, v8, v9, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc |
| ; GFX90A-NEXT: v_perm_b32 v6, v6, v5, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v5, v[0:1], v[6:7], off offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB162_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v5 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fminimum_v2bf16_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: global_load_dword v4, v[0:1], off offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[0:1], 0 |
| ; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v3 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 |
| ; GFX950-NEXT: .LBB162_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v4 |
| ; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v5 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v5 |
| ; GFX950-NEXT: v_minimum3_f32 v4, v4, v2, v2 |
| ; GFX950-NEXT: v_minimum3_f32 v6, v6, v3, v3 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v6, v4 |
| ; GFX950-NEXT: global_atomic_cmpswap v4, v[0:1], v[4:5], off offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v4, v5 |
| ; GFX950-NEXT: s_or_b64 s[0:1], vcc, s[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB162_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[0:1] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v4 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=^VA"() |
| %result = atomicrmw fminimum ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| ;--------------------------------------------------------------------- |
| ; other atomics i32, with aa+av cases using saddr |
| ;--------------------------------------------------------------------- |
| |
| define void @global_atomic_xchg_i32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX90A-NEXT: global_atomic_swap v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX950-NEXT: global_atomic_swap v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_xchg_i32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_swap v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_swap v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_add_i32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_add_i32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX90A-NEXT: global_atomic_add v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_add_i32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX950-NEXT: global_atomic_add v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw add ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_add_i32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_add_i32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_add v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_add_i32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_add v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw add ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_sub_i32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_sub_i32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX90A-NEXT: global_atomic_sub v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_sub_i32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX950-NEXT: global_atomic_sub v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw sub ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_sub_i32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_sub_i32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_sub v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_sub_i32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_sub v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw sub ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_and_i32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_and_i32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX90A-NEXT: global_atomic_and v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_and_i32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX950-NEXT: global_atomic_and v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw and ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_and_i32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_and_i32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_and v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_and_i32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_and v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw and ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_nand_i32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_nand_i32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB171_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_and_b32_e32 v0, v1, v3 |
| ; GFX90A-NEXT: v_not_b32_e32 v0, v0 |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB171_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_nand_i32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX950-NEXT: .LBB171_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_bitop3_b32 v0, v1, v3, v1 bitop3:0x3f |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB171_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw nand ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_nand_i32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_nand_i32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v2, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB172_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: v_and_b32_e32 v2, v3, v1 |
| ; GFX90A-NEXT: v_not_b32_e32 v2, v2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v0, v[2:3], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB172_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_nand_i32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v1, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB172_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX950-NEXT: v_bitop3_b32 v4, v5, v2, v5 bitop3:0x3f |
| ; GFX950-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB172_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw nand ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_or_i32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_or_i32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX90A-NEXT: global_atomic_or v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_or_i32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX950-NEXT: global_atomic_or v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw or ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_or_i32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_or_i32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_or v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_or_i32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_or v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw or ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_xor_i32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX90A-NEXT: global_atomic_xor v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX950-NEXT: global_atomic_xor v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw xor ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_xor_i32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_xor v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_xor v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw xor ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_max_i32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_max_i32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX90A-NEXT: global_atomic_smax v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_max_i32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX950-NEXT: global_atomic_smax v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw max ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_max_i32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_max_i32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_smax v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_max_i32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_smax v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw max ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_min_i32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_min_i32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX90A-NEXT: global_atomic_smin v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_min_i32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX950-NEXT: global_atomic_smin v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw min ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_min_i32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_min_i32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_smin v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_min_i32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_smin v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw min ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_umax_i32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_umax_i32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX90A-NEXT: global_atomic_umax v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_umax_i32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX950-NEXT: global_atomic_umax v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw umax ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_umax_i32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_umax_i32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_umax v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_umax_i32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_umax v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw umax ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_umin_i32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_umin_i32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX90A-NEXT: global_atomic_umin v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_umin_i32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX950-NEXT: global_atomic_umin v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw umin ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_umin_i32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_umin_i32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_umin v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_umin_i32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_umin v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw umin ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_uinc_wrap_i32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_uinc_wrap_i32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX90A-NEXT: global_atomic_inc v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_uinc_wrap_i32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX950-NEXT: global_atomic_inc v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw uinc_wrap ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_uinc_wrap_i32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_uinc_wrap_i32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_inc v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_uinc_wrap_i32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_inc v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw uinc_wrap ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_udec_wrap_i32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_udec_wrap_i32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX90A-NEXT: global_atomic_dec v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_udec_wrap_i32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX950-NEXT: global_atomic_dec v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw udec_wrap ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_udec_wrap_i32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_udec_wrap_i32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_dec v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_udec_wrap_i32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_dec v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw udec_wrap ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_usub_cond_i32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_usub_cond_i32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB189_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_sub_u32_e32 v0, v1, v3 |
| ; GFX90A-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3 |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB189_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_usub_cond_i32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX950-NEXT: .LBB189_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_sub_u32_e32 v0, v1, v3 |
| ; GFX950-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3 |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB189_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw usub_cond ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_usub_cond_i32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_usub_cond_i32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v2, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB190_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX90A-NEXT: v_sub_u32_e32 v2, v3, v1 |
| ; GFX90A-NEXT: v_cmp_ge_u32_e32 vcc, v3, v1 |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap v2, v0, v[2:3], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB190_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_usub_cond_i32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v2, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB190_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v3, v2 |
| ; GFX950-NEXT: v_sub_u32_e32 v2, v3, v1 |
| ; GFX950-NEXT: v_cmp_ge_u32_e32 vcc, v3, v1 |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap v2, v0, v[2:3], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB190_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw usub_cond ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_usub_sat_i32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_usub_sat_i32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB191_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_sub_u32_e64 v0, v1, v3 clamp |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB191_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_usub_sat_i32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX950-NEXT: .LBB191_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_sub_u32_e64 v0, v1, v3 clamp |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB191_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=a"() |
| %result = atomicrmw usub_sat ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i32 %result) |
| ret void |
| } |
| |
| define void @global_atomic_usub_sat_i32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_usub_sat_i32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB192_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX90A-NEXT: v_sub_u32_e64 v4, v5, v2 clamp |
| ; GFX90A-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB192_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_usub_sat_i32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v1, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB192_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX950-NEXT: v_sub_u32_e64 v4, v5, v2 clamp |
| ; GFX950-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB192_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i32 asm "; def $0", "=^VA"() |
| %result = atomicrmw usub_sat ptr addrspace(1) %gep.0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i32 %result) |
| ret void |
| } |
| |
| ;--------------------------------------------------------------------- |
| ; other atomics i64, with aa+av cases using saddr |
| ;--------------------------------------------------------------------- |
| |
| define void @global_atomic_xchg_i64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: global_atomic_swap_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: global_atomic_swap_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_xchg_i64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xchg_i64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_swap_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xchg_i64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_swap_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw xchg ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_add_i64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_add_i64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: global_atomic_add_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_add_i64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: global_atomic_add_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw add ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_add_i64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_add_i64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_add_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_add_i64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_add_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw add ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_sub_i64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_sub_i64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: global_atomic_sub_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_sub_i64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: global_atomic_sub_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw sub ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_sub_i64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_sub_i64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_sub_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_sub_i64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_sub_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw sub ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_and_i64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_and_i64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: global_atomic_and_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_and_i64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: global_atomic_and_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw and ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_and_i64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_and_i64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_and_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_and_i64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_and_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw and ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_nand_i64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_nand_i64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v6, 0 |
| ; GFX90A-NEXT: global_load_dwordx2 v[2:3], v6, s[16:17] offset:80 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v5, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB201_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_and_b32_e32 v0, v3, v5 |
| ; GFX90A-NEXT: v_and_b32_e32 v7, v2, v4 |
| ; GFX90A-NEXT: v_not_b32_e32 v1, v0 |
| ; GFX90A-NEXT: v_not_b32_e32 v0, v7 |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB201_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_nand_i64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v6, 0 |
| ; GFX950-NEXT: global_load_dwordx2 v[2:3], v6, s[0:1] offset:80 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v5, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX950-NEXT: .LBB201_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_and_b32_e32 v0, v3, v5 |
| ; GFX950-NEXT: v_and_b32_e32 v7, v2, v4 |
| ; GFX950-NEXT: v_not_b32_e32 v1, v0 |
| ; GFX950-NEXT: v_not_b32_e32 v0, v7 |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_mov_b64_e32 v[2:3], v[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB201_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw nand ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_nand_i64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_nand_i64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v4, 0 |
| ; GFX90A-NEXT: global_load_dwordx2 v[2:3], v4, s[16:17] offset:80 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB202_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_and_b32_e32 v2, v9, v1 |
| ; GFX90A-NEXT: v_and_b32_e32 v3, v8, v0 |
| ; GFX90A-NEXT: v_not_b32_e32 v7, v2 |
| ; GFX90A-NEXT: v_not_b32_e32 v6, v3 |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB202_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_nand_i64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v4, 0 |
| ; GFX950-NEXT: global_load_dwordx2 v[2:3], v4, s[0:1] offset:80 |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB202_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b64_e32 v[8:9], v[2:3] |
| ; GFX950-NEXT: v_and_b32_e32 v2, v9, v1 |
| ; GFX950-NEXT: v_and_b32_e32 v3, v8, v0 |
| ; GFX950-NEXT: v_not_b32_e32 v7, v2 |
| ; GFX950-NEXT: v_not_b32_e32 v6, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9] |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB202_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw nand ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_or_i64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_or_i64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: global_atomic_or_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_or_i64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: global_atomic_or_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw or ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_or_i64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_or_i64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_or_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_or_i64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_or_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw or ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_xor_i64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: global_atomic_xor_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: global_atomic_xor_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw xor ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_xor_i64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_xor_i64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_xor_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_xor_i64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_xor_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw xor ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_max_i64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_max_i64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: global_atomic_smax_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_max_i64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: global_atomic_smax_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw max ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_max_i64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_max_i64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_smax_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_max_i64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_smax_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw max ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_min_i64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_min_i64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: global_atomic_smin_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_min_i64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: global_atomic_smin_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw min ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_min_i64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_min_i64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_smin_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_min_i64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_smin_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw min ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_umax_i64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_umax_i64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: global_atomic_umax_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_umax_i64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: global_atomic_umax_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw umax ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_umax_i64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_umax_i64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_umax_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_umax_i64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_umax_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw umax ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_umin_i64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_umin_i64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: global_atomic_umin_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_umin_i64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: global_atomic_umin_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw umin ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_umin_i64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_umin_i64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_umin_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_umin_i64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_umin_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw umin ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_uinc_wrap_i64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_uinc_wrap_i64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: global_atomic_inc_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_uinc_wrap_i64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: global_atomic_inc_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw uinc_wrap ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_uinc_wrap_i64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_uinc_wrap_i64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_inc_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_uinc_wrap_i64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_inc_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw uinc_wrap ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_udec_wrap_i64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_udec_wrap_i64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: global_atomic_dec_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_udec_wrap_i64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: global_atomic_dec_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw udec_wrap ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_udec_wrap_i64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_udec_wrap_i64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_dec_x2 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_udec_wrap_i64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_dec_x2 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw udec_wrap ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_usub_cond_i64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_usub_cond_i64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v6, 0 |
| ; GFX90A-NEXT: global_load_dwordx2 v[2:3], v6, s[16:17] offset:80 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v5, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB219_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v4 |
| ; GFX90A-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v5, vcc |
| ; GFX90A-NEXT: v_cmp_ge_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB219_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_usub_cond_i64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v6, 0 |
| ; GFX950-NEXT: global_load_dwordx2 v[2:3], v6, s[0:1] offset:80 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v5, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX950-NEXT: .LBB219_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v4 |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v5, vcc |
| ; GFX950-NEXT: v_cmp_ge_u64_e32 vcc, v[2:3], v[4:5] |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc |
| ; GFX950-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_mov_b64_e32 v[2:3], v[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB219_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw usub_cond ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_usub_cond_i64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_usub_cond_i64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v4, 0 |
| ; GFX90A-NEXT: global_load_dwordx2 v[2:3], v4, s[16:17] offset:80 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB220_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_sub_co_u32_e32 v2, vcc, v8, v0 |
| ; GFX90A-NEXT: v_subb_co_u32_e32 v3, vcc, v9, v1, vcc |
| ; GFX90A-NEXT: v_cmp_ge_u64_e32 vcc, v[8:9], v[0:1] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v7, v9, v3, vcc |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v8, v2, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB220_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_usub_cond_i64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v4, 0 |
| ; GFX950-NEXT: global_load_dwordx2 v[2:3], v4, s[0:1] offset:80 |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB220_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b64_e32 v[8:9], v[2:3] |
| ; GFX950-NEXT: v_sub_co_u32_e32 v2, vcc, v8, v0 |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_subb_co_u32_e32 v3, vcc, v9, v1, vcc |
| ; GFX950-NEXT: v_cmp_ge_u64_e32 vcc, v[8:9], v[0:1] |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e32 v7, v9, v3, vcc |
| ; GFX950-NEXT: v_cndmask_b32_e32 v6, v8, v2, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9] |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB220_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw usub_cond ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_usub_sat_i64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_usub_sat_i64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v6, 0 |
| ; GFX90A-NEXT: global_load_dwordx2 v[2:3], v6, s[16:17] offset:80 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v5, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB221_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v4 |
| ; GFX90A-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v5, vcc |
| ; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3] |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB221_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_usub_sat_i64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v6, 0 |
| ; GFX950-NEXT: global_load_dwordx2 v[2:3], v6, s[0:1] offset:80 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v5, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX950-NEXT: .LBB221_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v4 |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v5, vcc |
| ; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3] |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc |
| ; GFX950-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_mov_b64_e32 v[2:3], v[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB221_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=a"() |
| %result = atomicrmw usub_sat ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(i64 %result) |
| ret void |
| } |
| |
| define void @global_atomic_usub_sat_i64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_usub_sat_i64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v4, 0 |
| ; GFX90A-NEXT: global_load_dwordx2 v[2:3], v4, s[16:17] offset:80 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB222_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_sub_co_u32_e32 v2, vcc, v8, v0 |
| ; GFX90A-NEXT: v_subb_co_u32_e32 v3, vcc, v9, v1, vcc |
| ; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9] |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v6, v2, 0, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB222_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_usub_sat_i64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v4, 0 |
| ; GFX950-NEXT: global_load_dwordx2 v[2:3], v4, s[0:1] offset:80 |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB222_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b64_e32 v[8:9], v[2:3] |
| ; GFX950-NEXT: v_sub_co_u32_e32 v2, vcc, v8, v0 |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_subb_co_u32_e32 v3, vcc, v9, v1, vcc |
| ; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9] |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc |
| ; GFX950-NEXT: v_cndmask_b32_e64 v6, v2, 0, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9] |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB222_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x i64], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call i64 asm "; def $0", "=^VA"() |
| %result = atomicrmw usub_sat ptr addrspace(1) %gep.0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(i64 %result) |
| ret void |
| } |
| |
| ;--------------------------------------------------------------------- |
| ; other atomics f32, with aa+av cases using saddr |
| ;--------------------------------------------------------------------- |
| |
| define void @global_atomic_fadd_f32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fadd_f32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX90A-NEXT: global_atomic_add_f32 v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fadd_f32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX950-NEXT: global_atomic_add_f32 v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=a"() |
| %result = atomicrmw fadd ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "a"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fadd_f32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fadd_f32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_add_f32 v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fadd_f32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_add_f32 v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=^VA"() |
| %result = atomicrmw fadd ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "^VA"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fsub_f32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fsub_f32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB225_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_sub_f32_e32 v0, v1, v3 |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB225_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fsub_f32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX950-NEXT: .LBB225_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_sub_f32_e32 v0, v1, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB225_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=a"() |
| %result = atomicrmw fsub ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "a"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fsub_f32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fsub_f32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB226_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX90A-NEXT: v_sub_f32_e32 v4, v5, v2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB226_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fsub_f32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v1, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB226_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX950-NEXT: v_sub_f32_e32 v4, v5, v2 |
| ; GFX950-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB226_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=^VA"() |
| %result = atomicrmw fsub ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "^VA"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmax_f32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmax_f32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_max_f32_e32 v3, v0, v0 |
| ; GFX90A-NEXT: .LBB227_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_max_f32_e32 v0, v1, v1 |
| ; GFX90A-NEXT: v_max_f32_e32 v0, v0, v3 |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB227_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmax_f32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_max_f32_e32 v3, v0, v0 |
| ; GFX950-NEXT: .LBB227_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_max_f32_e32 v0, v1, v1 |
| ; GFX950-NEXT: v_max_f32_e32 v0, v0, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB227_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=a"() |
| %result = atomicrmw fmax ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "a"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmax_f32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmax_f32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_max_f32_e32 v2, v2, v2 |
| ; GFX90A-NEXT: .LBB228_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX90A-NEXT: v_max_f32_e32 v1, v5, v5 |
| ; GFX90A-NEXT: v_max_f32_e32 v4, v1, v2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB228_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmax_f32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v1, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_max_f32_e32 v2, v2, v2 |
| ; GFX950-NEXT: .LBB228_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX950-NEXT: v_max_f32_e32 v1, v5, v5 |
| ; GFX950-NEXT: v_max_f32_e32 v4, v1, v2 |
| ; GFX950-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB228_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=^VA"() |
| %result = atomicrmw fmax ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "^VA"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmin_f32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmin_f32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_max_f32_e32 v3, v0, v0 |
| ; GFX90A-NEXT: .LBB229_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_max_f32_e32 v0, v1, v1 |
| ; GFX90A-NEXT: v_min_f32_e32 v0, v0, v3 |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB229_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmin_f32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_max_f32_e32 v3, v0, v0 |
| ; GFX950-NEXT: .LBB229_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_max_f32_e32 v0, v1, v1 |
| ; GFX950-NEXT: v_min_f32_e32 v0, v0, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB229_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=a"() |
| %result = atomicrmw fmin ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "a"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmin_f32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmin_f32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_max_f32_e32 v2, v2, v2 |
| ; GFX90A-NEXT: .LBB230_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX90A-NEXT: v_max_f32_e32 v1, v5, v5 |
| ; GFX90A-NEXT: v_min_f32_e32 v4, v1, v2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB230_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmin_f32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v1, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_max_f32_e32 v2, v2, v2 |
| ; GFX950-NEXT: .LBB230_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX950-NEXT: v_max_f32_e32 v1, v5, v5 |
| ; GFX950-NEXT: v_min_f32_e32 v4, v1, v2 |
| ; GFX950-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB230_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=^VA"() |
| %result = atomicrmw fmin ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "^VA"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmaximum_f32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmaximum_f32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v4, 0x7fc00000 |
| ; GFX90A-NEXT: .LBB231_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_max_f32_e32 v0, v1, v3 |
| ; GFX90A-NEXT: v_cmp_o_f32_e32 vcc, v1, v3 |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB231_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmaximum_f32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX950-NEXT: .LBB231_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_maximum3_f32 v0, v1, v3, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB231_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=a"() |
| %result = atomicrmw fmaximum ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "a"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmaximum_f32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmaximum_f32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v3, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0x7fc00000 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB232_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v3 |
| ; GFX90A-NEXT: v_max_f32_e32 v3, v5, v1 |
| ; GFX90A-NEXT: v_cmp_o_f32_e32 vcc, v5, v1 |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v4, v2, v3, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap v3, v0, v[4:5], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB232_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmaximum_f32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v1, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB232_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX950-NEXT: v_maximum3_f32 v4, v5, v2, v2 |
| ; GFX950-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB232_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=^VA"() |
| %result = atomicrmw fmaximum ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "^VA"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fminimum_f32_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fminimum_f32_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v4, 0x7fc00000 |
| ; GFX90A-NEXT: .LBB233_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_min_f32_e32 v0, v1, v3 |
| ; GFX90A-NEXT: v_cmp_o_f32_e32 vcc, v1, v3 |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB233_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fminimum_f32_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX950-NEXT: .LBB233_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_minimum3_f32 v0, v1, v3, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB233_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=a"() |
| %result = atomicrmw fminimum ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "a"(float %result) |
| ret void |
| } |
| |
| define void @global_atomic_fminimum_f32_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fminimum_f32_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v3, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0x7fc00000 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB234_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v3 |
| ; GFX90A-NEXT: v_min_f32_e32 v3, v5, v1 |
| ; GFX90A-NEXT: v_cmp_o_f32_e32 vcc, v5, v1 |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v4, v2, v3, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap v3, v0, v[4:5], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB234_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fminimum_f32_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v1, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB234_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX950-NEXT: v_minimum3_f32 v4, v5, v2, v2 |
| ; GFX950-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB234_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x float], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call float asm "; def $0", "=^VA"() |
| %result = atomicrmw fminimum ptr addrspace(1) %gep.0, float %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 |
| call void asm "; use $0", "^VA"(float %result) |
| ret void |
| } |
| |
| ;--------------------------------------------------------------------- |
| ; other atomics f64, with aa+av cases using saddr |
| ;--------------------------------------------------------------------- |
| |
| define void @global_atomic_fadd_f64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fadd_f64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: global_atomic_add_f64 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fadd_f64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: global_atomic_add_f64 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=a"() |
| %result = atomicrmw fadd ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fadd_f64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fadd_f64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_add_f64 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fadd_f64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_add_f64 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=^VA"() |
| %result = atomicrmw fadd ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fsub_f64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fsub_f64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v6, 0 |
| ; GFX90A-NEXT: global_load_dwordx2 v[2:3], v6, s[16:17] offset:80 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v5, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB237_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5] |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB237_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fsub_f64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v6, 0 |
| ; GFX950-NEXT: global_load_dwordx2 v[2:3], v6, s[0:1] offset:80 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v5, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX950-NEXT: .LBB237_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5] |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: v_mov_b64_e32 v[2:3], v[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB237_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=a"() |
| %result = atomicrmw fsub ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fsub_f64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fsub_f64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v4, 0 |
| ; GFX90A-NEXT: global_load_dwordx2 v[2:3], v4, s[16:17] offset:80 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB238_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_add_f64 v[6:7], v[8:9], -v[0:1] |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB238_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fsub_f64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v4, 0 |
| ; GFX950-NEXT: global_load_dwordx2 v[2:3], v4, s[0:1] offset:80 |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB238_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b64_e32 v[8:9], v[2:3] |
| ; GFX950-NEXT: v_add_f64 v[6:7], v[8:9], -v[0:1] |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9] |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB238_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=^VA"() |
| %result = atomicrmw fsub ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmax_f64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmax_f64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: global_atomic_max_f64 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmax_f64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: global_atomic_max_f64 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=a"() |
| %result = atomicrmw fmax ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmax_f64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmax_f64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_max_f64 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmax_f64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_max_f64 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=^VA"() |
| %result = atomicrmw fmax ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmin_f64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmin_f64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX90A-NEXT: global_atomic_min_f64 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmin_f64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a1 |
| ; GFX950-NEXT: global_atomic_min_f64 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=a"() |
| %result = atomicrmw fmin ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmin_f64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmin_f64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_min_f64 v[0:1], v2, v[0:1], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmin_f64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_min_f64 v[0:1], v2, v[0:1], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=^VA"() |
| %result = atomicrmw fmin ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmaximum_f64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmaximum_f64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v6, 0 |
| ; GFX90A-NEXT: global_load_dwordx2 v[2:3], v6, s[16:17] offset:80 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v5, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v7, 0x7ff80000 |
| ; GFX90A-NEXT: .LBB243_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_max_f64 v[0:1], v[2:3], v[4:5] |
| ; GFX90A-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB243_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmaximum_f64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v6, 0 |
| ; GFX950-NEXT: global_load_dwordx2 v[2:3], v6, s[0:1] offset:80 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v5, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX950-NEXT: v_mov_b32_e32 v7, 0x7ff80000 |
| ; GFX950-NEXT: .LBB243_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_max_f64 v[0:1], v[2:3], v[4:5] |
| ; GFX950-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[4:5] |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc |
| ; GFX950-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_mov_b64_e32 v[2:3], v[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB243_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=a"() |
| %result = atomicrmw fmaximum ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmaximum_f64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmaximum_f64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v4, 0 |
| ; GFX90A-NEXT: global_load_dwordx2 v[2:3], v4, s[16:17] offset:80 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, 0x7ff80000 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB244_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_max_f64 v[2:3], v[8:9], v[0:1] |
| ; GFX90A-NEXT: v_cmp_u_f64_e32 vcc, v[8:9], v[0:1] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v7, v3, v5, vcc |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v6, v2, 0, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB244_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmaximum_f64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v4, 0 |
| ; GFX950-NEXT: global_load_dwordx2 v[2:3], v4, s[0:1] offset:80 |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_mov_b32_e32 v5, 0x7ff80000 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB244_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b64_e32 v[8:9], v[2:3] |
| ; GFX950-NEXT: v_max_f64 v[2:3], v[8:9], v[0:1] |
| ; GFX950-NEXT: v_cmp_u_f64_e32 vcc, v[8:9], v[0:1] |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e32 v7, v3, v5, vcc |
| ; GFX950-NEXT: v_cndmask_b32_e64 v6, v2, 0, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9] |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB244_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=^VA"() |
| %result = atomicrmw fmaximum ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fminimum_f64_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fminimum_f64_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v6, 0 |
| ; GFX90A-NEXT: global_load_dwordx2 v[2:3], v6, s[16:17] offset:80 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v5, a1 |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v7, 0x7ff80000 |
| ; GFX90A-NEXT: .LBB245_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_min_f64 v[0:1], v[2:3], v[4:5] |
| ; GFX90A-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB245_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fminimum_f64_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v6, 0 |
| ; GFX950-NEXT: global_load_dwordx2 v[2:3], v6, s[0:1] offset:80 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v5, a1 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v4, a0 |
| ; GFX950-NEXT: v_mov_b32_e32 v7, 0x7ff80000 |
| ; GFX950-NEXT: .LBB245_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_min_f64 v[0:1], v[2:3], v[4:5] |
| ; GFX950-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[4:5] |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc |
| ; GFX950-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a1, v1 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_mov_b64_e32 v[2:3], v[0:1] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB245_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=a"() |
| %result = atomicrmw fminimum ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(double %result) |
| ret void |
| } |
| |
| define void @global_atomic_fminimum_f64_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fminimum_f64_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v4, 0 |
| ; GFX90A-NEXT: global_load_dwordx2 v[2:3], v4, s[16:17] offset:80 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, 0x7ff80000 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v[0:1] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB246_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_min_f64 v[2:3], v[8:9], v[0:1] |
| ; GFX90A-NEXT: v_cmp_u_f64_e32 vcc, v[8:9], v[0:1] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v7, v3, v5, vcc |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v6, v2, 0, vcc |
| ; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[16:17] offset:80 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9] |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB246_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v[2:3] |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fminimum_f64_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v4, 0 |
| ; GFX950-NEXT: global_load_dwordx2 v[2:3], v4, s[0:1] offset:80 |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_mov_b32_e32 v5, 0x7ff80000 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v[0:1] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB246_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b64_e32 v[8:9], v[2:3] |
| ; GFX950-NEXT: v_min_f64 v[2:3], v[8:9], v[0:1] |
| ; GFX950-NEXT: v_cmp_u_f64_e32 vcc, v[8:9], v[0:1] |
| ; GFX950-NEXT: s_nop 1 |
| ; GFX950-NEXT: v_cndmask_b32_e32 v7, v3, v5, vcc |
| ; GFX950-NEXT: v_cndmask_b32_e64 v6, v2, 0, vcc |
| ; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[0:1] offset:80 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9] |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB246_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v[2:3] |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x double], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call double asm "; def $0", "=^VA"() |
| %result = atomicrmw fminimum ptr addrspace(1) %gep.0, double %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(double %result) |
| ret void |
| } |
| |
| ;--------------------------------------------------------------------- |
| ; other atomics v2f16, with aa+av cases using saddr |
| ;--------------------------------------------------------------------- |
| |
| define void @global_atomic_fadd_v2f16_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fadd_v2f16_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX90A-NEXT: global_atomic_pk_add_f16 v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fadd_v2f16_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX950-NEXT: global_atomic_pk_add_f16 v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=a"() |
| %result = atomicrmw fadd ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fadd_v2f16_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fadd_v2f16_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: global_atomic_pk_add_f16 v0, v0, v1, s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fadd_v2f16_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_pk_add_f16 v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=^VA"() |
| %result = atomicrmw fadd ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fsub_v2f16_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fsub_v2f16_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: .LBB249_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_add_f16 v0, v1, v3 neg_lo:[0,1] neg_hi:[0,1] |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB249_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fsub_v2f16_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX950-NEXT: .LBB249_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_pk_add_f16 v0, v1, v3 neg_lo:[0,1] neg_hi:[0,1] |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB249_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=a"() |
| %result = atomicrmw fsub ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fsub_v2f16_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fsub_v2f16_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB250_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX90A-NEXT: v_pk_add_f16 v4, v5, v2 neg_lo:[0,1] neg_hi:[0,1] |
| ; GFX90A-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB250_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fsub_v2f16_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v1, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB250_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX950-NEXT: v_pk_add_f16 v4, v5, v2 neg_lo:[0,1] neg_hi:[0,1] |
| ; GFX950-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB250_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=^VA"() |
| %result = atomicrmw fsub ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmax_v2f16_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmax_v2f16_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_pk_max_f16 v3, v0, v0 |
| ; GFX90A-NEXT: .LBB251_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_max_f16 v0, v1, v1 |
| ; GFX90A-NEXT: v_pk_max_f16 v0, v0, v3 |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB251_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmax_v2f16_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_pk_max_f16 v3, v0, v0 |
| ; GFX950-NEXT: .LBB251_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_pk_max_f16 v0, v1, v1 |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_pk_max_f16 v0, v0, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB251_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=a"() |
| %result = atomicrmw fmax ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmax_v2f16_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmax_v2f16_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_pk_max_f16 v2, v2, v2 |
| ; GFX90A-NEXT: .LBB252_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX90A-NEXT: v_pk_max_f16 v1, v5, v5 |
| ; GFX90A-NEXT: v_pk_max_f16 v4, v1, v2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB252_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmax_v2f16_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v1, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_pk_max_f16 v2, v2, v2 |
| ; GFX950-NEXT: .LBB252_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX950-NEXT: v_pk_max_f16 v1, v5, v5 |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_pk_max_f16 v4, v1, v2 |
| ; GFX950-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB252_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=^VA"() |
| %result = atomicrmw fmax ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmin_v2f16_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmin_v2f16_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_pk_max_f16 v3, v0, v0 |
| ; GFX90A-NEXT: .LBB253_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_max_f16 v0, v1, v1 |
| ; GFX90A-NEXT: v_pk_min_f16 v0, v0, v3 |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB253_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmin_v2f16_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_pk_max_f16 v3, v0, v0 |
| ; GFX950-NEXT: .LBB253_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_pk_max_f16 v0, v1, v1 |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_pk_min_f16 v0, v0, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB253_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=a"() |
| %result = atomicrmw fmin ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmin_v2f16_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmin_v2f16_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 |
| ; GFX90A-NEXT: v_pk_max_f16 v2, v2, v2 |
| ; GFX90A-NEXT: .LBB254_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX90A-NEXT: v_pk_max_f16 v1, v5, v5 |
| ; GFX90A-NEXT: v_pk_min_f16 v4, v1, v2 |
| ; GFX90A-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB254_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmin_v2f16_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v1, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_pk_max_f16 v2, v2, v2 |
| ; GFX950-NEXT: .LBB254_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX950-NEXT: v_pk_max_f16 v1, v5, v5 |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_pk_min_f16 v4, v1, v2 |
| ; GFX950-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB254_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=^VA"() |
| %result = atomicrmw fmin ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmaximum_v2f16_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmaximum_v2f16_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v4, 0x7e00 |
| ; GFX90A-NEXT: s_mov_b32 s8, 0x5040100 |
| ; GFX90A-NEXT: .LBB255_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_max_f16 v0, v1, v3 |
| ; GFX90A-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1 |
| ; GFX90A-NEXT: v_cmp_o_f16_e64 s[4:5], v1, v3 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v5, v4, v0, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_sdwa v0, v4, v0, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 |
| ; GFX90A-NEXT: v_perm_b32 v0, v0, v5, s8 |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB255_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmaximum_v2f16_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX950-NEXT: .LBB255_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_pk_maximum3_f16 v0, v1, v3, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB255_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=a"() |
| %result = atomicrmw fmaximum ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmaximum_v2f16_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmaximum_v2f16_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v3, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0x7e00 |
| ; GFX90A-NEXT: s_mov_b32 s8, 0x5040100 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB256_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v3 |
| ; GFX90A-NEXT: v_pk_max_f16 v3, v5, v1 |
| ; GFX90A-NEXT: v_cmp_o_f16_sdwa vcc, v5, v1 src0_sel:WORD_1 src1_sel:WORD_1 |
| ; GFX90A-NEXT: v_cmp_o_f16_e64 s[4:5], v5, v1 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v4, v2, v3, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_sdwa v3, v2, v3, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 |
| ; GFX90A-NEXT: v_perm_b32 v4, v3, v4, s8 |
| ; GFX90A-NEXT: global_atomic_cmpswap v3, v0, v[4:5], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB256_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmaximum_v2f16_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v1, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB256_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX950-NEXT: v_pk_maximum3_f16 v4, v5, v2, v2 |
| ; GFX950-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB256_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=^VA"() |
| %result = atomicrmw fmaximum ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fminimum_v2f16_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fminimum_v2f16_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v4, 0x7e00 |
| ; GFX90A-NEXT: s_mov_b32 s8, 0x5040100 |
| ; GFX90A-NEXT: .LBB257_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_pk_min_f16 v0, v1, v3 |
| ; GFX90A-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1 |
| ; GFX90A-NEXT: v_cmp_o_f16_e64 s[4:5], v1, v3 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v5, v4, v0, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_sdwa v0, v4, v0, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 |
| ; GFX90A-NEXT: v_perm_b32 v0, v0, v5, s8 |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB257_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fminimum_v2f16_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v3, a0 |
| ; GFX950-NEXT: .LBB257_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_pk_minimum3_f16 v0, v1, v3, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB257_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=a"() |
| %result = atomicrmw fminimum ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x half> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fminimum_v2f16_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fminimum_v2f16_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v3, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0x7e00 |
| ; GFX90A-NEXT: s_mov_b32 s8, 0x5040100 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v1 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: .LBB258_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v3 |
| ; GFX90A-NEXT: v_pk_min_f16 v3, v5, v1 |
| ; GFX90A-NEXT: v_cmp_o_f16_sdwa vcc, v5, v1 src0_sel:WORD_1 src1_sel:WORD_1 |
| ; GFX90A-NEXT: v_cmp_o_f16_e64 s[4:5], v5, v1 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v4, v2, v3, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_sdwa v3, v2, v3, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 |
| ; GFX90A-NEXT: v_perm_b32 v4, v3, v4, s8 |
| ; GFX90A-NEXT: global_atomic_cmpswap v3, v0, v[4:5], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB258_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fminimum_v2f16_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v1, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: .LBB258_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v1 |
| ; GFX950-NEXT: v_pk_minimum3_f16 v4, v5, v2, v2 |
| ; GFX950-NEXT: global_atomic_cmpswap v1, v0, v[4:5], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v1, v5 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB258_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x half>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x half> asm "; def $0", "=^VA"() |
| %result = atomicrmw fminimum ptr addrspace(1) %gep.0, <2 x half> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x half> %result) |
| ret void |
| } |
| |
| ;--------------------------------------------------------------------- |
| ; other atomics v2bf16, with aa+av cases using saddr |
| ;--------------------------------------------------------------------- |
| |
| define void @global_atomic_fadd_v2bf16_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fadd_v2bf16_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v0 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v4, 0xffff0000, v0 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB259_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v0, 16, v1 |
| ; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v1 |
| ; GFX90A-NEXT: v_add_f32_e32 v0, v0, v3 |
| ; GFX90A-NEXT: v_add_f32_e32 v5, v5, v4 |
| ; GFX90A-NEXT: v_bfe_u32 v6, v0, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v0 |
| ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5 |
| ; GFX90A-NEXT: v_add3_u32 v6, v6, v0, s8 |
| ; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v0, v0 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc |
| ; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB259_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fadd_v2bf16_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_nop 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v1, a0 |
| ; GFX950-NEXT: global_atomic_pk_add_bf16 v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=a"() |
| %result = atomicrmw fadd ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fadd_v2bf16_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fadd_v2bf16_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v3, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v2 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB260_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v3 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v5 |
| ; GFX90A-NEXT: v_and_b32_e32 v4, 0xffff0000, v5 |
| ; GFX90A-NEXT: v_add_f32_e32 v3, v3, v1 |
| ; GFX90A-NEXT: v_add_f32_e32 v4, v4, v2 |
| ; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v8, v4, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3 |
| ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v4 |
| ; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8 |
| ; GFX90A-NEXT: v_add3_u32 v8, v8, v4, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v4, v8, v9, vcc |
| ; GFX90A-NEXT: v_perm_b32 v4, v4, v3, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v3, v0, v[4:5], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB260_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fadd_v2bf16_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v1 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: global_atomic_pk_add_bf16 v0, v0, v1, s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=^VA"() |
| %result = atomicrmw fadd ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fsub_v2bf16_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fsub_v2bf16_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v0 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v4, 0xffff0000, v0 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB261_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v0, 16, v1 |
| ; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v1 |
| ; GFX90A-NEXT: v_sub_f32_e32 v0, v0, v3 |
| ; GFX90A-NEXT: v_sub_f32_e32 v5, v5, v4 |
| ; GFX90A-NEXT: v_bfe_u32 v6, v0, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v0 |
| ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5 |
| ; GFX90A-NEXT: v_add3_u32 v6, v6, v0, s8 |
| ; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v0, v0 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc |
| ; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB261_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fsub_v2bf16_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v0 |
| ; GFX950-NEXT: .LBB261_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_and_b32_e32 v0, 0xffff0000, v1 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v1 |
| ; GFX950-NEXT: v_sub_f32_e32 v0, v0, v3 |
| ; GFX950-NEXT: v_sub_f32_e32 v5, v5, v4 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v5, v0 |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB261_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=a"() |
| %result = atomicrmw fsub ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fsub_v2bf16_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fsub_v2bf16_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v3, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v2 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB262_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v3 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v5 |
| ; GFX90A-NEXT: v_and_b32_e32 v4, 0xffff0000, v5 |
| ; GFX90A-NEXT: v_sub_f32_e32 v3, v3, v1 |
| ; GFX90A-NEXT: v_sub_f32_e32 v4, v4, v2 |
| ; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v8, v4, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3 |
| ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v4 |
| ; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8 |
| ; GFX90A-NEXT: v_add3_u32 v8, v8, v4, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v4, v8, v9, vcc |
| ; GFX90A-NEXT: v_perm_b32 v4, v4, v3, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v3, v0, v[4:5], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB262_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fsub_v2bf16_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v3, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v2 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 |
| ; GFX950-NEXT: .LBB262_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v3 |
| ; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v5 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v5 |
| ; GFX950-NEXT: v_sub_f32_e32 v3, v3, v1 |
| ; GFX950-NEXT: v_sub_f32_e32 v4, v4, v2 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v4, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v3, v0, v[4:5], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB262_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=^VA"() |
| %result = atomicrmw fsub ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmax_v2bf16_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmax_v2bf16_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v0 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v4, 0xffff0000, v0 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB263_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v0, 16, v1 |
| ; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v1 |
| ; GFX90A-NEXT: v_max_f32_e32 v0, v0, v3 |
| ; GFX90A-NEXT: v_max_f32_e32 v5, v5, v4 |
| ; GFX90A-NEXT: v_bfe_u32 v6, v0, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v0 |
| ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5 |
| ; GFX90A-NEXT: v_add3_u32 v6, v6, v0, s8 |
| ; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v0, v0 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc |
| ; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB263_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmax_v2bf16_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v0 |
| ; GFX950-NEXT: .LBB263_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_and_b32_e32 v0, 0xffff0000, v1 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v1 |
| ; GFX950-NEXT: v_max_f32_e32 v0, v0, v3 |
| ; GFX950-NEXT: v_max_f32_e32 v5, v5, v4 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v5, v0 |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB263_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=a"() |
| %result = atomicrmw fmax ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmax_v2bf16_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmax_v2bf16_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v3, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v2 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB264_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v3 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v5 |
| ; GFX90A-NEXT: v_and_b32_e32 v4, 0xffff0000, v5 |
| ; GFX90A-NEXT: v_max_f32_e32 v3, v3, v1 |
| ; GFX90A-NEXT: v_max_f32_e32 v4, v4, v2 |
| ; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v8, v4, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3 |
| ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v4 |
| ; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8 |
| ; GFX90A-NEXT: v_add3_u32 v8, v8, v4, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v4, v8, v9, vcc |
| ; GFX90A-NEXT: v_perm_b32 v4, v4, v3, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v3, v0, v[4:5], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB264_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmax_v2bf16_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v3, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v2 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 |
| ; GFX950-NEXT: .LBB264_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v3 |
| ; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v5 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v5 |
| ; GFX950-NEXT: v_max_f32_e32 v3, v3, v1 |
| ; GFX950-NEXT: v_max_f32_e32 v4, v4, v2 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v4, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v3, v0, v[4:5], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB264_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=^VA"() |
| %result = atomicrmw fmax ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmin_v2bf16_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmin_v2bf16_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v0 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v4, 0xffff0000, v0 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB265_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v0, 16, v1 |
| ; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v1 |
| ; GFX90A-NEXT: v_min_f32_e32 v0, v0, v3 |
| ; GFX90A-NEXT: v_min_f32_e32 v5, v5, v4 |
| ; GFX90A-NEXT: v_bfe_u32 v6, v0, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v0 |
| ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5 |
| ; GFX90A-NEXT: v_add3_u32 v6, v6, v0, s8 |
| ; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v0, v0 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc |
| ; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB265_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmin_v2bf16_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v0 |
| ; GFX950-NEXT: .LBB265_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_and_b32_e32 v0, 0xffff0000, v1 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v1 |
| ; GFX950-NEXT: v_min_f32_e32 v0, v0, v3 |
| ; GFX950-NEXT: v_min_f32_e32 v5, v5, v4 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v5, v0 |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB265_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=a"() |
| %result = atomicrmw fmin ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmin_v2bf16_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmin_v2bf16_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v3, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v2 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v2 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB266_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v3 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v5 |
| ; GFX90A-NEXT: v_and_b32_e32 v4, 0xffff0000, v5 |
| ; GFX90A-NEXT: v_min_f32_e32 v3, v3, v1 |
| ; GFX90A-NEXT: v_min_f32_e32 v4, v4, v2 |
| ; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v8, v4, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3 |
| ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v4 |
| ; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8 |
| ; GFX90A-NEXT: v_add3_u32 v8, v8, v4, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v4, v8, v9, vcc |
| ; GFX90A-NEXT: v_perm_b32 v4, v4, v3, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v3, v0, v[4:5], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB266_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmin_v2bf16_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v3, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v2 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 |
| ; GFX950-NEXT: .LBB266_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v3 |
| ; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v5 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v5 |
| ; GFX950-NEXT: v_min_f32_e32 v3, v3, v1 |
| ; GFX950-NEXT: v_min_f32_e32 v4, v4, v2 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v4, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v3, v0, v[4:5], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB266_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=^VA"() |
| %result = atomicrmw fmin ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmaximum_v2bf16_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmaximum_v2bf16_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v4, 0x7fc00000 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v0 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB267_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v0, 16, v1 |
| ; GFX90A-NEXT: v_and_b32_e32 v6, 0xffff0000, v1 |
| ; GFX90A-NEXT: v_max_f32_e32 v7, v0, v3 |
| ; GFX90A-NEXT: v_max_f32_e32 v8, v6, v5 |
| ; GFX90A-NEXT: v_cmp_o_f32_e32 vcc, v6, v5 |
| ; GFX90A-NEXT: v_cmp_o_f32_e64 s[4:5], v0, v3 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v0, v4, v7, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v4, v8, vcc |
| ; GFX90A-NEXT: v_bfe_u32 v7, v0, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v9, v6, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v0 |
| ; GFX90A-NEXT: v_or_b32_e32 v10, 0x400000, v6 |
| ; GFX90A-NEXT: v_add3_u32 v7, v7, v0, s8 |
| ; GFX90A-NEXT: v_add3_u32 v9, v9, v6, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v0, v0 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v0, v7, v8, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc |
| ; GFX90A-NEXT: v_perm_b32 v0, v6, v0, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB267_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmaximum_v2bf16_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v0 |
| ; GFX950-NEXT: .LBB267_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_and_b32_e32 v0, 0xffff0000, v1 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v1 |
| ; GFX950-NEXT: v_maximum3_f32 v0, v0, v3, v3 |
| ; GFX950-NEXT: v_maximum3_f32 v5, v5, v4, v4 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v5, v0 |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB267_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=a"() |
| %result = atomicrmw fmaximum ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fmaximum_v2bf16_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fmaximum_v2bf16_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v4, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v3 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0x7fc00000 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB268_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v4 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v5 |
| ; GFX90A-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 |
| ; GFX90A-NEXT: v_max_f32_e32 v7, v4, v1 |
| ; GFX90A-NEXT: v_max_f32_e32 v8, v6, v3 |
| ; GFX90A-NEXT: v_cmp_o_f32_e32 vcc, v6, v3 |
| ; GFX90A-NEXT: v_cmp_o_f32_e64 s[4:5], v4, v1 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v4, v2, v7, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc |
| ; GFX90A-NEXT: v_bfe_u32 v7, v4, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v9, v6, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v4 |
| ; GFX90A-NEXT: v_or_b32_e32 v10, 0x400000, v6 |
| ; GFX90A-NEXT: v_add3_u32 v7, v7, v4, s8 |
| ; GFX90A-NEXT: v_add3_u32 v9, v9, v6, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v4, v7, v8, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc |
| ; GFX90A-NEXT: v_perm_b32 v4, v6, v4, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v4, v0, v[4:5], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v5 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB268_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v4 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fmaximum_v2bf16_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v3, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v2 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 |
| ; GFX950-NEXT: .LBB268_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v3 |
| ; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v5 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v5 |
| ; GFX950-NEXT: v_maximum3_f32 v3, v3, v1, v1 |
| ; GFX950-NEXT: v_maximum3_f32 v4, v4, v2, v2 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v4, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v3, v0, v[4:5], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB268_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=^VA"() |
| %result = atomicrmw fmaximum ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fminimum_v2bf16_saddr_ret_a_a(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fminimum_v2bf16_saddr_ret_a_a: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: global_load_dword v1, v2, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v4, 0x7fc00000 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v0 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB269_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v0, 16, v1 |
| ; GFX90A-NEXT: v_and_b32_e32 v6, 0xffff0000, v1 |
| ; GFX90A-NEXT: v_min_f32_e32 v7, v0, v3 |
| ; GFX90A-NEXT: v_min_f32_e32 v8, v6, v5 |
| ; GFX90A-NEXT: v_cmp_o_f32_e32 vcc, v6, v5 |
| ; GFX90A-NEXT: v_cmp_o_f32_e64 s[4:5], v0, v3 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v0, v4, v7, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v4, v8, vcc |
| ; GFX90A-NEXT: v_bfe_u32 v7, v0, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v9, v6, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v0 |
| ; GFX90A-NEXT: v_or_b32_e32 v10, 0x400000, v6 |
| ; GFX90A-NEXT: v_add3_u32 v7, v7, v0, s8 |
| ; GFX90A-NEXT: v_add3_u32 v9, v9, v6, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v0, v0 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v0, v7, v8, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc |
| ; GFX90A-NEXT: v_perm_b32 v0, v6, v0, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB269_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use a0 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fminimum_v2bf16_saddr_ret_a_a: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX950-NEXT: global_load_dword v1, v2, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_accvgpr_read_b32 v0, a0 |
| ; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v0 |
| ; GFX950-NEXT: .LBB269_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_and_b32_e32 v0, 0xffff0000, v1 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v1 |
| ; GFX950-NEXT: v_minimum3_f32 v0, v0, v3, v3 |
| ; GFX950-NEXT: v_minimum3_f32 v5, v5, v4, v4 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v5, v0 |
| ; GFX950-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 |
| ; GFX950-NEXT: v_accvgpr_write_b32 a0, v0 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: v_mov_b32_e32 v1, v0 |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB269_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use a0 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=a"() |
| %result = atomicrmw fminimum ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "a"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| define void @global_atomic_fminimum_v2bf16_saddr_ret_av_av(ptr addrspace(1) inreg %ptr) #0 { |
| ; GFX90A-LABEL: global_atomic_fminimum_v2bf16_saddr_ret_av_av: |
| ; GFX90A: ; %bb.0: |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: global_load_dword v4, v0, s[16:17] offset:40 |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; def v3 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v3 |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0x7fc00000 |
| ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff |
| ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 |
| ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 |
| ; GFX90A-NEXT: .LBB270_1: ; %atomicrmw.start |
| ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v4 |
| ; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v5 |
| ; GFX90A-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 |
| ; GFX90A-NEXT: v_min_f32_e32 v7, v4, v1 |
| ; GFX90A-NEXT: v_min_f32_e32 v8, v6, v3 |
| ; GFX90A-NEXT: v_cmp_o_f32_e32 vcc, v6, v3 |
| ; GFX90A-NEXT: v_cmp_o_f32_e64 s[4:5], v4, v1 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v4, v2, v7, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc |
| ; GFX90A-NEXT: v_bfe_u32 v7, v4, 16, 1 |
| ; GFX90A-NEXT: v_bfe_u32 v9, v6, 16, 1 |
| ; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v4 |
| ; GFX90A-NEXT: v_or_b32_e32 v10, 0x400000, v6 |
| ; GFX90A-NEXT: v_add3_u32 v7, v7, v4, s8 |
| ; GFX90A-NEXT: v_add3_u32 v9, v9, v6, s8 |
| ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 |
| ; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 |
| ; GFX90A-NEXT: v_cndmask_b32_e64 v4, v7, v8, s[4:5] |
| ; GFX90A-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc |
| ; GFX90A-NEXT: v_perm_b32 v4, v6, v4, s9 |
| ; GFX90A-NEXT: global_atomic_cmpswap v4, v0, v[4:5], s[16:17] offset:40 glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v5 |
| ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] |
| ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: s_cbranch_execnz .LBB270_1 |
| ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] |
| ; GFX90A-NEXT: ;;#ASMSTART |
| ; GFX90A-NEXT: ; use v4 |
| ; GFX90A-NEXT: ;;#ASMEND |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; GFX950-LABEL: global_atomic_fminimum_v2bf16_saddr_ret_av_av: |
| ; GFX950: ; %bb.0: |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX950-NEXT: global_load_dword v3, v0, s[0:1] offset:40 |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; def v2 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_mov_b64 s[2:3], 0 |
| ; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v2 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 |
| ; GFX950-NEXT: .LBB270_1: ; %atomicrmw.start |
| ; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_mov_b32_e32 v5, v3 |
| ; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v5 |
| ; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v5 |
| ; GFX950-NEXT: v_minimum3_f32 v3, v3, v1, v1 |
| ; GFX950-NEXT: v_minimum3_f32 v4, v4, v2, v2 |
| ; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v4, v3 |
| ; GFX950-NEXT: global_atomic_cmpswap v3, v0, v[4:5], s[0:1] offset:40 sc0 |
| ; GFX950-NEXT: s_waitcnt vmcnt(0) |
| ; GFX950-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5 |
| ; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3] |
| ; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: s_cbranch_execnz .LBB270_1 |
| ; GFX950-NEXT: ; %bb.2: ; %atomicrmw.end |
| ; GFX950-NEXT: s_or_b64 exec, exec, s[2:3] |
| ; GFX950-NEXT: ;;#ASMSTART |
| ; GFX950-NEXT: ; use v3 |
| ; GFX950-NEXT: ;;#ASMEND |
| ; GFX950-NEXT: s_setpc_b64 s[30:31] |
| %gep.0 = getelementptr inbounds [512 x <2 x bfloat>], ptr addrspace(1) %ptr, i64 0, i64 10 |
| %data = call <2 x bfloat> asm "; def $0", "=^VA"() |
| %result = atomicrmw fminimum ptr addrspace(1) %gep.0, <2 x bfloat> %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0 |
| call void asm "; use $0", "^VA"(<2 x bfloat> %result) |
| ret void |
| } |
| |
| attributes #0 = { nounwind "amdgpu-waves-per-eu"="10,10" } |
| |
| !0 = !{} |
| |
| ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
| ; CHECK: {{.*}} |