| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -global-isel < %s -march=amdgcn -mcpu=gfx90a -verify-machineinstrs | FileCheck %s -check-prefix=GFX90A |
| |
| declare double @llvm.amdgcn.buffer.atomic.fadd.f64(double, <4 x i32>, i32, i32, i1) |
| declare double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double, <4 x i32>, i32, i32, i32, i32 immarg) |
| declare double @llvm.amdgcn.raw.buffer.atomic.fadd.f64(double, <4 x i32>, i32, i32, i32 immarg) |
| declare double @llvm.amdgcn.struct.buffer.atomic.fmin.f64(double, <4 x i32>, i32, i32, i32, i32 immarg) |
| declare double @llvm.amdgcn.raw.buffer.atomic.fmin.f64(double, <4 x i32>, i32, i32, i32 immarg) |
| declare double @llvm.amdgcn.struct.buffer.atomic.fmax.f64(double, <4 x i32>, i32, i32, i32, i32 immarg) |
| declare double @llvm.amdgcn.raw.buffer.atomic.fmax.f64(double, <4 x i32>, i32, i32, i32 immarg) |
| declare double @llvm.amdgcn.global.atomic.fadd.f64.p1f64.f64(double addrspace(1)* %ptr, double %data) |
| declare double @llvm.amdgcn.global.atomic.fmin.f64.p1f64.f64(double addrspace(1)* %ptr, double %data) |
| declare double @llvm.amdgcn.global.atomic.fmax.f64.p1f64.f64(double addrspace(1)* %ptr, double %data) |
| declare double @llvm.amdgcn.flat.atomic.fadd.f64.p0f64.f64(double* %ptr, double %data) |
| declare double @llvm.amdgcn.flat.atomic.fmin.f64.p0f64.f64(double* %ptr, double %data) |
| declare double @llvm.amdgcn.flat.atomic.fmax.f64.p0f64.f64(double* %ptr, double %data) |
| declare double @llvm.amdgcn.ds.fadd.f64(double addrspace(3)* nocapture, double, i32, i32, i1) |
| |
| define amdgpu_kernel void @buffer_atomic_add_noret_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) { |
| ; GFX90A-LABEL: buffer_atomic_add_noret_f64: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 |
| ; GFX90A-NEXT: s_load_dword s8, s[0:1], 0x3c |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, s8 |
| ; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[4:7], 0 offen glc |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i1 0) |
| ret void |
| } |
| |
| define amdgpu_ps void @buffer_atomic_add_rtn_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) { |
| ; GFX90A-LABEL: buffer_atomic_add_rtn_f64: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 offen glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: flat_store_dwordx2 v[0:1], v[0:1] |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i1 0) |
| store double %ret, double* undef |
| ret void |
| } |
| |
| define amdgpu_kernel void @buffer_atomic_add_rtn_f64_off4_slc(<4 x i32> inreg %rsrc, double %data, i32 %vindex, double addrspace(1)* %out) { |
| ; GFX90A-LABEL: buffer_atomic_add_rtn_f64_off4_slc: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 |
| ; GFX90A-NEXT: s_load_dword s10, s[0:1], 0x3c |
| ; GFX90A-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x44 |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, s10 |
| ; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[4:7], 4 offen glc slc |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i1 1) |
| store double %ret, double addrspace(1)* %out, align 8 |
| ret void |
| } |
| |
| define amdgpu_kernel void @raw_buffer_atomic_add_noret_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) { |
| ; GFX90A-LABEL: raw_buffer_atomic_add_noret_f64: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 |
| ; GFX90A-NEXT: s_load_dword s8, s[0:1], 0x3c |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, s8 |
| ; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[4:7], 0 offen glc |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.raw.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0) |
| ret void |
| } |
| |
| define amdgpu_ps void @raw_buffer_atomic_add_rtn_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) { |
| ; GFX90A-LABEL: raw_buffer_atomic_add_rtn_f64: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 offen glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: flat_store_dwordx2 v[0:1], v[0:1] |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.raw.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0) |
| store double %ret, double* undef |
| ret void |
| } |
| |
| define amdgpu_kernel void @raw_buffer_atomic_add_rtn_f64_off4_slc(<4 x i32> inreg %rsrc, double %data, i32 %vindex, double addrspace(1)* %out) { |
| ; GFX90A-LABEL: raw_buffer_atomic_add_rtn_f64_off4_slc: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 |
| ; GFX90A-NEXT: s_load_dword s10, s[0:1], 0x3c |
| ; GFX90A-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x44 |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, s10 |
| ; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[4:7], 4 offen glc slc |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.raw.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 2) |
| store double %ret, double addrspace(1)* %out, align 8 |
| ret void |
| } |
| |
| define amdgpu_kernel void @struct_buffer_atomic_add_noret_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) { |
| ; GFX90A-LABEL: struct_buffer_atomic_add_noret_f64: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 |
| ; GFX90A-NEXT: s_load_dword s8, s[0:1], 0x3c |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, s8 |
| ; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[4:7], 0 idxen glc |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0) |
| ret void |
| } |
| |
| define amdgpu_ps void @struct_buffer_atomic_add_rtn_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) { |
| ; GFX90A-LABEL: struct_buffer_atomic_add_rtn_f64: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 idxen glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: flat_store_dwordx2 v[0:1], v[0:1] |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0) |
| store double %ret, double* undef |
| ret void |
| } |
| |
| define amdgpu_kernel void @struct_buffer_atomic_add_rtn_f64_off4_slc(<4 x i32> inreg %rsrc, double %data, i32 %vindex, double addrspace(1)* %out) { |
| ; GFX90A-LABEL: struct_buffer_atomic_add_rtn_f64_off4_slc: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 |
| ; GFX90A-NEXT: s_load_dword s10, s[0:1], 0x3c |
| ; GFX90A-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x44 |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, s10 |
| ; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[4:7], 0 idxen offset:4 glc slc |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 0, i32 2) |
| store double %ret, double addrspace(1)* %out, align 8 |
| ret void |
| } |
| |
| define amdgpu_kernel void @raw_buffer_atomic_min_noret_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) { |
| ; GFX90A-LABEL: raw_buffer_atomic_min_noret_f64: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 |
| ; GFX90A-NEXT: s_load_dword s8, s[0:1], 0x3c |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, s8 |
| ; GFX90A-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[4:7], 0 offen glc |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.raw.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0) |
| ret void |
| } |
| |
| define amdgpu_ps void @raw_buffer_atomic_min_rtn_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) { |
| ; GFX90A-LABEL: raw_buffer_atomic_min_rtn_f64: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 offen glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: flat_store_dwordx2 v[0:1], v[0:1] |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.raw.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0) |
| store double %ret, double* undef |
| ret void |
| } |
| |
| define amdgpu_kernel void @raw_buffer_atomic_min_rtn_f64_off4_slc(<4 x i32> inreg %rsrc, double %data, i32 %vindex, double addrspace(1)* %out) { |
| ; GFX90A-LABEL: raw_buffer_atomic_min_rtn_f64_off4_slc: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 |
| ; GFX90A-NEXT: s_load_dword s10, s[0:1], 0x3c |
| ; GFX90A-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x44 |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, s10 |
| ; GFX90A-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[4:7], 4 offen glc slc |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.raw.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 2) |
| store double %ret, double addrspace(1)* %out, align 8 |
| ret void |
| } |
| |
| define amdgpu_kernel void @struct_buffer_atomic_min_noret_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) { |
| ; GFX90A-LABEL: struct_buffer_atomic_min_noret_f64: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 |
| ; GFX90A-NEXT: s_load_dword s8, s[0:1], 0x3c |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, s8 |
| ; GFX90A-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[4:7], 0 idxen glc |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.struct.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0) |
| ret void |
| } |
| |
| define amdgpu_ps void @struct_buffer_atomic_min_rtn_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) { |
| ; GFX90A-LABEL: struct_buffer_atomic_min_rtn_f64: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 idxen glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: flat_store_dwordx2 v[0:1], v[0:1] |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.struct.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0) |
| store double %ret, double* undef |
| ret void |
| } |
| |
| define amdgpu_kernel void @struct_buffer_atomic_min_rtn_f64_off4_slc(<4 x i32> inreg %rsrc, double %data, i32 %vindex, double addrspace(1)* %out) { |
| ; GFX90A-LABEL: struct_buffer_atomic_min_rtn_f64_off4_slc: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 |
| ; GFX90A-NEXT: s_load_dword s10, s[0:1], 0x3c |
| ; GFX90A-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x44 |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, s10 |
| ; GFX90A-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[4:7], 0 idxen offset:4 glc slc |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.struct.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 0, i32 2) |
| store double %ret, double addrspace(1)* %out, align 8 |
| ret void |
| } |
| |
| define amdgpu_kernel void @raw_buffer_atomic_max_noret_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) { |
| ; GFX90A-LABEL: raw_buffer_atomic_max_noret_f64: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 |
| ; GFX90A-NEXT: s_load_dword s8, s[0:1], 0x3c |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, s8 |
| ; GFX90A-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[4:7], 0 offen glc |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.raw.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0) |
| ret void |
| } |
| |
| define amdgpu_ps void @raw_buffer_atomic_max_rtn_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) { |
| ; GFX90A-LABEL: raw_buffer_atomic_max_rtn_f64: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 offen glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: flat_store_dwordx2 v[0:1], v[0:1] |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.raw.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0) |
| store double %ret, double* undef |
| ret void |
| } |
| |
| define amdgpu_kernel void @raw_buffer_atomic_max_rtn_f64_off4_slc(<4 x i32> inreg %rsrc, double %data, i32 %vindex, double addrspace(1)* %out) { |
| ; GFX90A-LABEL: raw_buffer_atomic_max_rtn_f64_off4_slc: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 |
| ; GFX90A-NEXT: s_load_dword s10, s[0:1], 0x3c |
| ; GFX90A-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x44 |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, s10 |
| ; GFX90A-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[4:7], 4 offen glc slc |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.raw.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 2) |
| store double %ret, double addrspace(1)* %out, align 8 |
| ret void |
| } |
| |
| define amdgpu_kernel void @struct_buffer_atomic_max_noret_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) { |
| ; GFX90A-LABEL: struct_buffer_atomic_max_noret_f64: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 |
| ; GFX90A-NEXT: s_load_dword s8, s[0:1], 0x3c |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, s8 |
| ; GFX90A-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[4:7], 0 idxen glc |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.struct.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0) |
| ret void |
| } |
| |
| define amdgpu_ps void @struct_buffer_atomic_max_rtn_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) { |
| ; GFX90A-LABEL: struct_buffer_atomic_max_rtn_f64: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 idxen glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: flat_store_dwordx2 v[0:1], v[0:1] |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.struct.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0) |
| store double %ret, double* undef |
| ret void |
| } |
| |
| define amdgpu_kernel void @struct_buffer_atomic_max_rtn_f64_off4_slc(<4 x i32> inreg %rsrc, double %data, i32 %vindex, double addrspace(1)* %out) { |
| ; GFX90A-LABEL: struct_buffer_atomic_max_rtn_f64_off4_slc: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 |
| ; GFX90A-NEXT: s_load_dword s10, s[0:1], 0x3c |
| ; GFX90A-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x44 |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, s10 |
| ; GFX90A-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[4:7], 0 idxen offset:4 glc slc |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.struct.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 0, i32 2) |
| store double %ret, double addrspace(1)* %out, align 8 |
| ret void |
| } |
| |
| define amdgpu_kernel void @global_atomic_fadd_f64_noret(double addrspace(1)* %ptr, double %data) { |
| ; GFX90A-LABEL: global_atomic_fadd_f64_noret: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.global.atomic.fadd.f64.p1f64.f64(double addrspace(1)* %ptr, double %data) |
| ret void |
| } |
| |
| define amdgpu_kernel void @global_atomic_fmin_f64_noret(double addrspace(1)* %ptr, double %data) { |
| ; GFX90A-LABEL: global_atomic_fmin_f64_noret: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: global_atomic_min_f64 v[0:1], v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.global.atomic.fmin.f64.p1f64.f64(double addrspace(1)* %ptr, double %data) |
| ret void |
| } |
| |
| define amdgpu_kernel void @global_atomic_fmax_f64_noret(double addrspace(1)* %ptr, double %data) { |
| ; GFX90A-LABEL: global_atomic_fmax_f64_noret: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: global_atomic_max_f64 v[0:1], v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.global.atomic.fmax.f64.p1f64.f64(double addrspace(1)* %ptr, double %data) |
| ret void |
| } |
| |
| define double @global_atomic_fadd_f64_rtn(double addrspace(1)* %ptr, double %data) { |
| ; GFX90A-LABEL: global_atomic_fadd_f64_rtn: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| main_body: |
| %ret = call double @llvm.amdgcn.global.atomic.fadd.f64.p1f64.f64(double addrspace(1)* %ptr, double %data) |
| ret double %ret |
| } |
| |
| define double @global_atomic_fmax_f64_rtn(double addrspace(1)* %ptr, double %data) { |
| ; GFX90A-LABEL: global_atomic_fmax_f64_rtn: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_atomic_max_f64 v[0:1], v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| main_body: |
| %ret = call double @llvm.amdgcn.global.atomic.fmax.f64.p1f64.f64(double addrspace(1)* %ptr, double %data) |
| ret double %ret |
| } |
| |
| define double @global_atomic_fmin_f64_rtn(double addrspace(1)* %ptr, double %data) { |
| ; GFX90A-LABEL: global_atomic_fmin_f64_rtn: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: global_atomic_min_f64 v[0:1], v[0:1], v[2:3], off glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| main_body: |
| %ret = call double @llvm.amdgcn.global.atomic.fmin.f64.p1f64.f64(double addrspace(1)* %ptr, double %data) |
| ret double %ret |
| } |
| |
| define amdgpu_kernel void @flat_atomic_fadd_f64_noret(double* %ptr, double %data) { |
| ; GFX90A-LABEL: flat_atomic_fadd_f64_noret: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: flat_atomic_add_f64 v[0:1], v[0:1], v[2:3] glc |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.flat.atomic.fadd.f64.p0f64.f64(double* %ptr, double %data) |
| ret void |
| } |
| |
| define double @flat_atomic_fadd_f64_rtn(double* %ptr, double %data) { |
| ; GFX90A-LABEL: flat_atomic_fadd_f64_rtn: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: flat_atomic_add_f64 v[0:1], v[0:1], v[2:3] glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| main_body: |
| %ret = call double @llvm.amdgcn.flat.atomic.fadd.f64.p0f64.f64(double* %ptr, double %data) |
| ret double %ret |
| } |
| |
| define amdgpu_kernel void @flat_atomic_fmin_f64_noret(double* %ptr, double %data) { |
| ; GFX90A-LABEL: flat_atomic_fmin_f64_noret: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: flat_atomic_min_f64 v[0:1], v[0:1], v[2:3] glc |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.flat.atomic.fmin.f64.p0f64.f64(double* %ptr, double %data) |
| ret void |
| } |
| |
| define double @flat_atomic_fmin_f64_rtn(double* %ptr, double %data) { |
| ; GFX90A-LABEL: flat_atomic_fmin_f64_rtn: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: flat_atomic_min_f64 v[0:1], v[0:1], v[2:3] glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| main_body: |
| %ret = call double @llvm.amdgcn.flat.atomic.fmin.f64.p0f64.f64(double* %ptr, double %data) |
| ret double %ret |
| } |
| |
| define amdgpu_kernel void @flat_atomic_fmax_f64_noret(double* %ptr, double %data) { |
| ; GFX90A-LABEL: flat_atomic_fmax_f64_noret: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1] |
| ; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: flat_atomic_max_f64 v[0:1], v[0:1], v[2:3] glc |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.flat.atomic.fmax.f64.p0f64.f64(double* %ptr, double %data) |
| ret void |
| } |
| |
| define double @flat_atomic_fmax_f64_rtn(double* %ptr, double %data) { |
| ; GFX90A-LABEL: flat_atomic_fmax_f64_rtn: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: flat_atomic_max_f64 v[0:1], v[0:1], v[2:3] glc |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| main_body: |
| %ret = call double @llvm.amdgcn.flat.atomic.fmax.f64.p0f64.f64(double* %ptr, double %data) |
| ret double %ret |
| } |
| |
| define amdgpu_kernel void @local_atomic_fadd_f64_noret(double addrspace(3)* %ptr, double %data) { |
| ; GFX90A-LABEL: local_atomic_fadd_f64_noret: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dword s4, s[0:1], 0x24 |
| ; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2c |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, s4 |
| ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] |
| ; GFX90A-NEXT: ds_add_rtn_f64 v[0:1], v2, v[0:1] |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = call double @llvm.amdgcn.ds.fadd.f64(double addrspace(3)* %ptr, double %data, i32 0, i32 0, i1 0) |
| ret void |
| } |
| |
| define double @local_atomic_fadd_f64_rtn(double addrspace(3)* %ptr, double %data) { |
| ; GFX90A-LABEL: local_atomic_fadd_f64_rtn: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v4, v1 |
| ; GFX90A-NEXT: v_mov_b32_e32 v5, v2 |
| ; GFX90A-NEXT: ds_add_rtn_f64 v[0:1], v0, v[4:5] |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| main_body: |
| %ret = call double @llvm.amdgcn.ds.fadd.f64(double addrspace(3)* %ptr, double %data, i32 0, i32 0, i1 0) |
| ret double %ret |
| } |
| |
| define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(double addrspace(3)* %ptr) { |
| ; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24 |
| ; GFX90A-NEXT: v_mov_b32_e32 v0, 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000 |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, s0 |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: ds_add_rtn_f64 v[0:1], v2, v[0:1] |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: s_endpgm |
| main_body: |
| %ret = atomicrmw fadd double addrspace(3)* %ptr, double 4.0 seq_cst |
| ret void |
| } |
| |
| define double @local_atomic_fadd_f64_rtn_pat(double addrspace(3)* %ptr, double %data) { |
| ; GFX90A-LABEL: local_atomic_fadd_f64_rtn_pat: |
| ; GFX90A: ; %bb.0: ; %main_body |
| ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 |
| ; GFX90A-NEXT: v_mov_b32_e32 v3, 0x40100000 |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3] |
| ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) |
| ; GFX90A-NEXT: s_setpc_b64 s[30:31] |
| main_body: |
| %ret = atomicrmw fadd double addrspace(3)* %ptr, double 4.0 seq_cst |
| ret double %ret |
| } |