| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --filter-out "store" --filter-out "wait" --version 6 |
| ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=tahiti < %s | FileCheck %s -check-prefix=SI |
| ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=bonaire < %s | FileCheck %s -check-prefix=SI |
| ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=fiji < %s | FileCheck %s -check-prefix=VI |
| ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck %s -check-prefixes=GFX9,GFX9-SDAG |
| ; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck %s -check-prefixes=GFX9,GFX9-GISEL |
| |
| ; TODO: fneg/fabs folding for GlobalISel |
| |
| define amdgpu_kernel void @s_cvt_pknorm_i16_f32(ptr addrspace(1) %out, float %x, float %y) #0 { |
| ; SI-LABEL: s_cvt_pknorm_i16_f32: |
| ; SI: ; %bb.0: |
| ; SI: s_load_dwordx4 s[0:3], s[4:5], 0x9 |
| ; SI: s_mov_b32 s7, 0xf000 |
| ; SI: s_mov_b32 s6, -1 |
| ; SI: v_mov_b32_e32 v0, s3 |
| ; SI: s_mov_b32 s4, s0 |
| ; SI: s_mov_b32 s5, s1 |
| ; SI: v_cvt_pknorm_i16_f32_e32 v0, s2, v0 |
| ; SI: s_endpgm |
| ; |
| ; VI-LABEL: s_cvt_pknorm_i16_f32: |
| ; VI: ; %bb.0: |
| ; VI: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; VI: v_mov_b32_e32 v0, s3 |
| ; VI: v_cvt_pknorm_i16_f32 v2, s2, v0 |
| ; VI: v_mov_b32_e32 v0, s0 |
| ; VI: v_mov_b32_e32 v1, s1 |
| ; VI: s_endpgm |
| ; |
| ; GFX9-SDAG-LABEL: s_cvt_pknorm_i16_f32: |
| ; GFX9-SDAG: ; %bb.0: |
| ; GFX9-SDAG: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; GFX9-SDAG: v_mov_b32_e32 v0, 0 |
| ; GFX9-SDAG: v_mov_b32_e32 v1, s3 |
| ; GFX9-SDAG: v_cvt_pknorm_i16_f32 v1, s2, v1 |
| ; GFX9-SDAG: s_endpgm |
| ; |
| ; GFX9-GISEL-LABEL: s_cvt_pknorm_i16_f32: |
| ; GFX9-GISEL: ; %bb.0: |
| ; GFX9-GISEL: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; GFX9-GISEL: v_mov_b32_e32 v1, 0 |
| ; GFX9-GISEL: v_mov_b32_e32 v0, s3 |
| ; GFX9-GISEL: v_cvt_pknorm_i16_f32 v0, s2, v0 |
| ; GFX9-GISEL: s_endpgm |
| %result = call <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float %x, float %y) |
| %r = bitcast <2 x i16> %result to i32 |
| store i32 %r, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @s_cvt_pknorm_i16_samereg_f32(ptr addrspace(1) %out, float %x) #0 { |
| ; SI-LABEL: s_cvt_pknorm_i16_samereg_f32: |
| ; SI: ; %bb.0: |
| ; SI: s_load_dword s6, s[4:5], 0xb |
| ; SI: s_load_dwordx2 s[0:1], s[4:5], 0x9 |
| ; SI: s_mov_b32 s3, 0xf000 |
| ; SI: s_mov_b32 s2, -1 |
| ; SI: v_cvt_pknorm_i16_f32_e64 v0, s6, s6 |
| ; SI: s_endpgm |
| ; |
| ; VI-LABEL: s_cvt_pknorm_i16_samereg_f32: |
| ; VI: ; %bb.0: |
| ; VI: s_load_dword s2, s[4:5], 0x2c |
| ; VI: s_load_dwordx2 s[0:1], s[4:5], 0x24 |
| ; VI: v_cvt_pknorm_i16_f32 v2, s2, s2 |
| ; VI: v_mov_b32_e32 v0, s0 |
| ; VI: v_mov_b32_e32 v1, s1 |
| ; VI: s_endpgm |
| ; |
| ; GFX9-SDAG-LABEL: s_cvt_pknorm_i16_samereg_f32: |
| ; GFX9-SDAG: ; %bb.0: |
| ; GFX9-SDAG: s_load_dword s2, s[4:5], 0x2c |
| ; GFX9-SDAG: s_load_dwordx2 s[0:1], s[4:5], 0x24 |
| ; GFX9-SDAG: v_mov_b32_e32 v0, 0 |
| ; GFX9-SDAG: v_cvt_pknorm_i16_f32 v1, s2, s2 |
| ; GFX9-SDAG: s_endpgm |
| ; |
| ; GFX9-GISEL-LABEL: s_cvt_pknorm_i16_samereg_f32: |
| ; GFX9-GISEL: ; %bb.0: |
| ; GFX9-GISEL: s_load_dword s2, s[4:5], 0x2c |
| ; GFX9-GISEL: s_load_dwordx2 s[0:1], s[4:5], 0x24 |
| ; GFX9-GISEL: v_mov_b32_e32 v1, 0 |
| ; GFX9-GISEL: v_cvt_pknorm_i16_f32 v0, s2, s2 |
| ; GFX9-GISEL: s_endpgm |
| %result = call <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float %x, float %x) |
| %r = bitcast <2 x i16> %result to i32 |
| store i32 %r, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_cvt_pknorm_i16_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 { |
| ; SI-LABEL: v_cvt_pknorm_i16_f32: |
| ; SI: ; %bb.0: |
| ; SI: s_load_dwordx4 s[0:3], s[4:5], 0x9 |
| ; SI: s_load_dwordx2 s[8:9], s[4:5], 0xd |
| ; SI: s_mov_b32 s7, 0xf000 |
| ; SI: s_mov_b32 s6, 0 |
| ; SI: v_lshlrev_b32_e32 v0, 2, v0 |
| ; SI: s_mov_b64 s[4:5], s[2:3] |
| ; SI: v_mov_b32_e32 v1, 0 |
| ; SI: s_mov_b64 s[10:11], s[6:7] |
| ; SI: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc |
| ; SI: buffer_load_dword v3, v[0:1], s[8:11], 0 addr64 glc |
| ; SI: s_mov_b64 s[2:3], s[6:7] |
| ; SI: v_cvt_pknorm_i16_f32_e32 v2, v2, v3 |
| ; SI: s_endpgm |
| ; |
| ; VI-LABEL: v_cvt_pknorm_i16_f32: |
| ; VI: ; %bb.0: |
| ; VI: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; VI: s_load_dwordx2 s[4:5], s[4:5], 0x34 |
| ; VI: v_lshlrev_b32_e32 v4, 2, v0 |
| ; VI: v_mov_b32_e32 v1, s3 |
| ; VI: v_add_u32_e32 v0, vcc, s2, v4 |
| ; VI: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI: v_mov_b32_e32 v3, s5 |
| ; VI: v_add_u32_e32 v2, vcc, s4, v4 |
| ; VI: v_addc_u32_e32 v3, vcc, 0, v3, vcc |
| ; VI: flat_load_dword v5, v[0:1] glc |
| ; VI: flat_load_dword v2, v[2:3] glc |
| ; VI: v_mov_b32_e32 v1, s1 |
| ; VI: v_add_u32_e32 v0, vcc, s0, v4 |
| ; VI: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI: v_cvt_pknorm_i16_f32 v2, v5, v2 |
| ; VI: s_endpgm |
| ; |
| ; GFX9-LABEL: v_cvt_pknorm_i16_f32: |
| ; GFX9: ; %bb.0: |
| ; GFX9: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; GFX9: s_load_dwordx2 s[6:7], s[4:5], 0x34 |
| ; GFX9: v_lshlrev_b32_e32 v0, 2, v0 |
| ; GFX9: global_load_dword v1, v0, s[2:3] glc |
| ; GFX9: global_load_dword v2, v0, s[6:7] glc |
| ; GFX9: v_cvt_pknorm_i16_f32 v1, v1, v2 |
| ; GFX9: s_endpgm |
| %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| %tid.ext = sext i32 %tid to i64 |
| %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext |
| %b.gep = getelementptr inbounds float, ptr addrspace(1) %b.ptr, i64 %tid.ext |
| %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext |
| %a = load volatile float, ptr addrspace(1) %a.gep |
| %b = load volatile float, ptr addrspace(1) %b.gep |
| %cvt = call <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float %a, float %b) |
| %r = bitcast <2 x i16> %cvt to i32 |
| store i32 %r, ptr addrspace(1) %out.gep |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_cvt_pknorm_i16_f32_reg_imm(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 { |
| ; SI-LABEL: v_cvt_pknorm_i16_f32_reg_imm: |
| ; SI: ; %bb.0: |
| ; SI: s_load_dwordx4 s[0:3], s[4:5], 0x9 |
| ; SI: s_mov_b32 s7, 0xf000 |
| ; SI: s_mov_b32 s6, 0 |
| ; SI: v_lshlrev_b32_e32 v0, 2, v0 |
| ; SI: v_mov_b32_e32 v1, 0 |
| ; SI: s_mov_b64 s[4:5], s[2:3] |
| ; SI: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc |
| ; SI: s_mov_b64 s[2:3], s[6:7] |
| ; SI: v_cvt_pknorm_i16_f32_e64 v2, v2, 1.0 |
| ; SI: s_endpgm |
| ; |
| ; VI-LABEL: v_cvt_pknorm_i16_f32_reg_imm: |
| ; VI: ; %bb.0: |
| ; VI: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; VI: v_lshlrev_b32_e32 v2, 2, v0 |
| ; VI: v_mov_b32_e32 v1, s3 |
| ; VI: v_add_u32_e32 v0, vcc, s2, v2 |
| ; VI: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI: flat_load_dword v3, v[0:1] glc |
| ; VI: v_mov_b32_e32 v1, s1 |
| ; VI: v_add_u32_e32 v0, vcc, s0, v2 |
| ; VI: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI: v_cvt_pknorm_i16_f32 v2, v3, 1.0 |
| ; VI: s_endpgm |
| ; |
| ; GFX9-LABEL: v_cvt_pknorm_i16_f32_reg_imm: |
| ; GFX9: ; %bb.0: |
| ; GFX9: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; GFX9: v_lshlrev_b32_e32 v0, 2, v0 |
| ; GFX9: global_load_dword v1, v0, s[2:3] glc |
| ; GFX9: v_cvt_pknorm_i16_f32 v1, v1, 1.0 |
| ; GFX9: s_endpgm |
| %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| %tid.ext = sext i32 %tid to i64 |
| %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext |
| %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext |
| %a = load volatile float, ptr addrspace(1) %a.gep |
| %cvt = call <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float %a, float 1.0) |
| %r = bitcast <2 x i16> %cvt to i32 |
| store i32 %r, ptr addrspace(1) %out.gep |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_cvt_pknorm_i16_f32_imm_reg(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 { |
| ; SI-LABEL: v_cvt_pknorm_i16_f32_imm_reg: |
| ; SI: ; %bb.0: |
| ; SI: s_load_dwordx4 s[0:3], s[4:5], 0x9 |
| ; SI: s_mov_b32 s7, 0xf000 |
| ; SI: s_mov_b32 s6, 0 |
| ; SI: v_lshlrev_b32_e32 v0, 2, v0 |
| ; SI: v_mov_b32_e32 v1, 0 |
| ; SI: s_mov_b64 s[4:5], s[2:3] |
| ; SI: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc |
| ; SI: s_mov_b64 s[2:3], s[6:7] |
| ; SI: v_cvt_pknorm_i16_f32_e32 v2, 1.0, v2 |
| ; SI: s_endpgm |
| ; |
| ; VI-LABEL: v_cvt_pknorm_i16_f32_imm_reg: |
| ; VI: ; %bb.0: |
| ; VI: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; VI: v_lshlrev_b32_e32 v2, 2, v0 |
| ; VI: v_mov_b32_e32 v1, s3 |
| ; VI: v_add_u32_e32 v0, vcc, s2, v2 |
| ; VI: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI: flat_load_dword v3, v[0:1] glc |
| ; VI: v_mov_b32_e32 v1, s1 |
| ; VI: v_add_u32_e32 v0, vcc, s0, v2 |
| ; VI: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI: v_cvt_pknorm_i16_f32 v2, 1.0, v3 |
| ; VI: s_endpgm |
| ; |
| ; GFX9-LABEL: v_cvt_pknorm_i16_f32_imm_reg: |
| ; GFX9: ; %bb.0: |
| ; GFX9: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; GFX9: v_lshlrev_b32_e32 v0, 2, v0 |
| ; GFX9: global_load_dword v1, v0, s[2:3] glc |
| ; GFX9: v_cvt_pknorm_i16_f32 v1, 1.0, v1 |
| ; GFX9: s_endpgm |
| %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| %tid.ext = sext i32 %tid to i64 |
| %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext |
| %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext |
| %a = load volatile float, ptr addrspace(1) %a.gep |
| %cvt = call <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float 1.0, float %a) |
| %r = bitcast <2 x i16> %cvt to i32 |
| store i32 %r, ptr addrspace(1) %out.gep |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_cvt_pknorm_i16_f32_fneg_lo(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 { |
| ; SI-LABEL: v_cvt_pknorm_i16_f32_fneg_lo: |
| ; SI: ; %bb.0: |
| ; SI: s_load_dwordx4 s[0:3], s[4:5], 0x9 |
| ; SI: s_load_dwordx2 s[8:9], s[4:5], 0xd |
| ; SI: s_mov_b32 s7, 0xf000 |
| ; SI: s_mov_b32 s6, 0 |
| ; SI: v_lshlrev_b32_e32 v0, 2, v0 |
| ; SI: s_mov_b64 s[4:5], s[2:3] |
| ; SI: v_mov_b32_e32 v1, 0 |
| ; SI: s_mov_b64 s[10:11], s[6:7] |
| ; SI: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc |
| ; SI: buffer_load_dword v3, v[0:1], s[8:11], 0 addr64 glc |
| ; SI: s_mov_b64 s[2:3], s[6:7] |
| ; SI: v_cvt_pknorm_i16_f32_e64 v2, -v2, v3 |
| ; SI: s_endpgm |
| ; |
| ; VI-LABEL: v_cvt_pknorm_i16_f32_fneg_lo: |
| ; VI: ; %bb.0: |
| ; VI: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; VI: s_load_dwordx2 s[4:5], s[4:5], 0x34 |
| ; VI: v_lshlrev_b32_e32 v4, 2, v0 |
| ; VI: v_mov_b32_e32 v1, s3 |
| ; VI: v_add_u32_e32 v0, vcc, s2, v4 |
| ; VI: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI: v_mov_b32_e32 v3, s5 |
| ; VI: v_add_u32_e32 v2, vcc, s4, v4 |
| ; VI: v_addc_u32_e32 v3, vcc, 0, v3, vcc |
| ; VI: flat_load_dword v5, v[0:1] glc |
| ; VI: flat_load_dword v2, v[2:3] glc |
| ; VI: v_mov_b32_e32 v1, s1 |
| ; VI: v_add_u32_e32 v0, vcc, s0, v4 |
| ; VI: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI: v_cvt_pknorm_i16_f32 v2, -v5, v2 |
| ; VI: s_endpgm |
| ; |
| ; GFX9-SDAG-LABEL: v_cvt_pknorm_i16_f32_fneg_lo: |
| ; GFX9-SDAG: ; %bb.0: |
| ; GFX9-SDAG: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; GFX9-SDAG: s_load_dwordx2 s[6:7], s[4:5], 0x34 |
| ; GFX9-SDAG: v_lshlrev_b32_e32 v0, 2, v0 |
| ; GFX9-SDAG: global_load_dword v1, v0, s[2:3] glc |
| ; GFX9-SDAG: global_load_dword v2, v0, s[6:7] glc |
| ; GFX9-SDAG: v_cvt_pknorm_i16_f32 v1, -v1, v2 |
| ; GFX9-SDAG: s_endpgm |
| ; |
| ; GFX9-GISEL-LABEL: v_cvt_pknorm_i16_f32_fneg_lo: |
| ; GFX9-GISEL: ; %bb.0: |
| ; GFX9-GISEL: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; GFX9-GISEL: s_load_dwordx2 s[6:7], s[4:5], 0x34 |
| ; GFX9-GISEL: v_lshlrev_b32_e32 v0, 2, v0 |
| ; GFX9-GISEL: global_load_dword v1, v0, s[2:3] glc |
| ; GFX9-GISEL: global_load_dword v2, v0, s[6:7] glc |
| ; GFX9-GISEL: v_max_f32_e64 v1, -v1, -v1 |
| ; GFX9-GISEL: v_cvt_pknorm_i16_f32 v1, v1, v2 |
| ; GFX9-GISEL: s_endpgm |
| %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| %tid.ext = sext i32 %tid to i64 |
| %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext |
| %b.gep = getelementptr inbounds float, ptr addrspace(1) %b.ptr, i64 %tid.ext |
| %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext |
| %a = load volatile float, ptr addrspace(1) %a.gep |
| %b = load volatile float, ptr addrspace(1) %b.gep |
| %neg.a = fsub float -0.0, %a |
| %cvt = call <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float %neg.a, float %b) |
| %r = bitcast <2 x i16> %cvt to i32 |
| store i32 %r, ptr addrspace(1) %out.gep |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_cvt_pknorm_i16_f32_fneg_hi(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 { |
| ; SI-LABEL: v_cvt_pknorm_i16_f32_fneg_hi: |
| ; SI: ; %bb.0: |
| ; SI: s_load_dwordx4 s[0:3], s[4:5], 0x9 |
| ; SI: s_load_dwordx2 s[8:9], s[4:5], 0xd |
| ; SI: s_mov_b32 s7, 0xf000 |
| ; SI: s_mov_b32 s6, 0 |
| ; SI: v_lshlrev_b32_e32 v0, 2, v0 |
| ; SI: s_mov_b64 s[4:5], s[2:3] |
| ; SI: v_mov_b32_e32 v1, 0 |
| ; SI: s_mov_b64 s[10:11], s[6:7] |
| ; SI: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc |
| ; SI: buffer_load_dword v3, v[0:1], s[8:11], 0 addr64 glc |
| ; SI: s_mov_b64 s[2:3], s[6:7] |
| ; SI: v_cvt_pknorm_i16_f32_e64 v2, v2, -v3 |
| ; SI: s_endpgm |
| ; |
| ; VI-LABEL: v_cvt_pknorm_i16_f32_fneg_hi: |
| ; VI: ; %bb.0: |
| ; VI: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; VI: s_load_dwordx2 s[4:5], s[4:5], 0x34 |
| ; VI: v_lshlrev_b32_e32 v4, 2, v0 |
| ; VI: v_mov_b32_e32 v1, s3 |
| ; VI: v_add_u32_e32 v0, vcc, s2, v4 |
| ; VI: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI: v_mov_b32_e32 v3, s5 |
| ; VI: v_add_u32_e32 v2, vcc, s4, v4 |
| ; VI: v_addc_u32_e32 v3, vcc, 0, v3, vcc |
| ; VI: flat_load_dword v5, v[0:1] glc |
| ; VI: flat_load_dword v2, v[2:3] glc |
| ; VI: v_mov_b32_e32 v1, s1 |
| ; VI: v_add_u32_e32 v0, vcc, s0, v4 |
| ; VI: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI: v_cvt_pknorm_i16_f32 v2, v5, -v2 |
| ; VI: s_endpgm |
| ; |
| ; GFX9-SDAG-LABEL: v_cvt_pknorm_i16_f32_fneg_hi: |
| ; GFX9-SDAG: ; %bb.0: |
| ; GFX9-SDAG: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; GFX9-SDAG: s_load_dwordx2 s[6:7], s[4:5], 0x34 |
| ; GFX9-SDAG: v_lshlrev_b32_e32 v0, 2, v0 |
| ; GFX9-SDAG: global_load_dword v1, v0, s[2:3] glc |
| ; GFX9-SDAG: global_load_dword v2, v0, s[6:7] glc |
| ; GFX9-SDAG: v_cvt_pknorm_i16_f32 v1, v1, -v2 |
| ; GFX9-SDAG: s_endpgm |
| ; |
| ; GFX9-GISEL-LABEL: v_cvt_pknorm_i16_f32_fneg_hi: |
| ; GFX9-GISEL: ; %bb.0: |
| ; GFX9-GISEL: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; GFX9-GISEL: s_load_dwordx2 s[6:7], s[4:5], 0x34 |
| ; GFX9-GISEL: v_lshlrev_b32_e32 v0, 2, v0 |
| ; GFX9-GISEL: global_load_dword v1, v0, s[2:3] glc |
| ; GFX9-GISEL: global_load_dword v2, v0, s[6:7] glc |
| ; GFX9-GISEL: v_max_f32_e64 v2, -v2, -v2 |
| ; GFX9-GISEL: v_cvt_pknorm_i16_f32 v1, v1, v2 |
| ; GFX9-GISEL: s_endpgm |
| %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| %tid.ext = sext i32 %tid to i64 |
| %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext |
| %b.gep = getelementptr inbounds float, ptr addrspace(1) %b.ptr, i64 %tid.ext |
| %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext |
| %a = load volatile float, ptr addrspace(1) %a.gep |
| %b = load volatile float, ptr addrspace(1) %b.gep |
| %neg.b = fsub float -0.0, %b |
| %cvt = call <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float %a, float %neg.b) |
| %r = bitcast <2 x i16> %cvt to i32 |
| store i32 %r, ptr addrspace(1) %out.gep |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_cvt_pknorm_i16_f32_fneg_lo_hi(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 { |
| ; SI-LABEL: v_cvt_pknorm_i16_f32_fneg_lo_hi: |
| ; SI: ; %bb.0: |
| ; SI: s_load_dwordx4 s[0:3], s[4:5], 0x9 |
| ; SI: s_load_dwordx2 s[8:9], s[4:5], 0xd |
| ; SI: s_mov_b32 s7, 0xf000 |
| ; SI: s_mov_b32 s6, 0 |
| ; SI: v_lshlrev_b32_e32 v0, 2, v0 |
| ; SI: s_mov_b64 s[4:5], s[2:3] |
| ; SI: v_mov_b32_e32 v1, 0 |
| ; SI: s_mov_b64 s[10:11], s[6:7] |
| ; SI: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc |
| ; SI: buffer_load_dword v3, v[0:1], s[8:11], 0 addr64 glc |
| ; SI: s_mov_b64 s[2:3], s[6:7] |
| ; SI: v_cvt_pknorm_i16_f32_e64 v2, -v2, -v3 |
| ; SI: s_endpgm |
| ; |
| ; VI-LABEL: v_cvt_pknorm_i16_f32_fneg_lo_hi: |
| ; VI: ; %bb.0: |
| ; VI: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; VI: s_load_dwordx2 s[4:5], s[4:5], 0x34 |
| ; VI: v_lshlrev_b32_e32 v4, 2, v0 |
| ; VI: v_mov_b32_e32 v1, s3 |
| ; VI: v_add_u32_e32 v0, vcc, s2, v4 |
| ; VI: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI: v_mov_b32_e32 v3, s5 |
| ; VI: v_add_u32_e32 v2, vcc, s4, v4 |
| ; VI: v_addc_u32_e32 v3, vcc, 0, v3, vcc |
| ; VI: flat_load_dword v5, v[0:1] glc |
| ; VI: flat_load_dword v2, v[2:3] glc |
| ; VI: v_mov_b32_e32 v1, s1 |
| ; VI: v_add_u32_e32 v0, vcc, s0, v4 |
| ; VI: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI: v_cvt_pknorm_i16_f32 v2, -v5, -v2 |
| ; VI: s_endpgm |
| ; |
| ; GFX9-SDAG-LABEL: v_cvt_pknorm_i16_f32_fneg_lo_hi: |
| ; GFX9-SDAG: ; %bb.0: |
| ; GFX9-SDAG: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; GFX9-SDAG: s_load_dwordx2 s[6:7], s[4:5], 0x34 |
| ; GFX9-SDAG: v_lshlrev_b32_e32 v0, 2, v0 |
| ; GFX9-SDAG: global_load_dword v1, v0, s[2:3] glc |
| ; GFX9-SDAG: global_load_dword v2, v0, s[6:7] glc |
| ; GFX9-SDAG: v_cvt_pknorm_i16_f32 v1, -v1, -v2 |
| ; GFX9-SDAG: s_endpgm |
| ; |
| ; GFX9-GISEL-LABEL: v_cvt_pknorm_i16_f32_fneg_lo_hi: |
| ; GFX9-GISEL: ; %bb.0: |
| ; GFX9-GISEL: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; GFX9-GISEL: s_load_dwordx2 s[6:7], s[4:5], 0x34 |
| ; GFX9-GISEL: v_lshlrev_b32_e32 v0, 2, v0 |
| ; GFX9-GISEL: global_load_dword v1, v0, s[2:3] glc |
| ; GFX9-GISEL: global_load_dword v2, v0, s[6:7] glc |
| ; GFX9-GISEL: v_max_f32_e64 v1, -v1, -v1 |
| ; GFX9-GISEL: v_max_f32_e64 v2, -v2, -v2 |
| ; GFX9-GISEL: v_cvt_pknorm_i16_f32 v1, v1, v2 |
| ; GFX9-GISEL: s_endpgm |
| %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| %tid.ext = sext i32 %tid to i64 |
| %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext |
| %b.gep = getelementptr inbounds float, ptr addrspace(1) %b.ptr, i64 %tid.ext |
| %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext |
| %a = load volatile float, ptr addrspace(1) %a.gep |
| %b = load volatile float, ptr addrspace(1) %b.gep |
| %neg.a = fsub float -0.0, %a |
| %neg.b = fsub float -0.0, %b |
| %cvt = call <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float %neg.a, float %neg.b) |
| %r = bitcast <2 x i16> %cvt to i32 |
| store i32 %r, ptr addrspace(1) %out.gep |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_cvt_pknorm_i16_f32_fneg_fabs_lo_fneg_hi(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 { |
| ; SI-LABEL: v_cvt_pknorm_i16_f32_fneg_fabs_lo_fneg_hi: |
| ; SI: ; %bb.0: |
| ; SI: s_load_dwordx4 s[0:3], s[4:5], 0x9 |
| ; SI: s_load_dwordx2 s[8:9], s[4:5], 0xd |
| ; SI: s_mov_b32 s7, 0xf000 |
| ; SI: s_mov_b32 s6, 0 |
| ; SI: v_lshlrev_b32_e32 v0, 2, v0 |
| ; SI: s_mov_b64 s[4:5], s[2:3] |
| ; SI: v_mov_b32_e32 v1, 0 |
| ; SI: s_mov_b64 s[10:11], s[6:7] |
| ; SI: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc |
| ; SI: buffer_load_dword v3, v[0:1], s[8:11], 0 addr64 glc |
| ; SI: s_mov_b64 s[2:3], s[6:7] |
| ; SI: v_cvt_pknorm_i16_f32_e64 v2, -|v2|, -v3 |
| ; SI: s_endpgm |
| ; |
| ; VI-LABEL: v_cvt_pknorm_i16_f32_fneg_fabs_lo_fneg_hi: |
| ; VI: ; %bb.0: |
| ; VI: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; VI: s_load_dwordx2 s[4:5], s[4:5], 0x34 |
| ; VI: v_lshlrev_b32_e32 v4, 2, v0 |
| ; VI: v_mov_b32_e32 v1, s3 |
| ; VI: v_add_u32_e32 v0, vcc, s2, v4 |
| ; VI: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI: v_mov_b32_e32 v3, s5 |
| ; VI: v_add_u32_e32 v2, vcc, s4, v4 |
| ; VI: v_addc_u32_e32 v3, vcc, 0, v3, vcc |
| ; VI: flat_load_dword v5, v[0:1] glc |
| ; VI: flat_load_dword v2, v[2:3] glc |
| ; VI: v_mov_b32_e32 v1, s1 |
| ; VI: v_add_u32_e32 v0, vcc, s0, v4 |
| ; VI: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI: v_cvt_pknorm_i16_f32 v2, -|v5|, -v2 |
| ; VI: s_endpgm |
| ; |
| ; GFX9-SDAG-LABEL: v_cvt_pknorm_i16_f32_fneg_fabs_lo_fneg_hi: |
| ; GFX9-SDAG: ; %bb.0: |
| ; GFX9-SDAG: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; GFX9-SDAG: s_load_dwordx2 s[6:7], s[4:5], 0x34 |
| ; GFX9-SDAG: v_lshlrev_b32_e32 v0, 2, v0 |
| ; GFX9-SDAG: global_load_dword v1, v0, s[2:3] glc |
| ; GFX9-SDAG: global_load_dword v2, v0, s[6:7] glc |
| ; GFX9-SDAG: v_cvt_pknorm_i16_f32 v1, -|v1|, -v2 |
| ; GFX9-SDAG: s_endpgm |
| ; |
| ; GFX9-GISEL-LABEL: v_cvt_pknorm_i16_f32_fneg_fabs_lo_fneg_hi: |
| ; GFX9-GISEL: ; %bb.0: |
| ; GFX9-GISEL: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; GFX9-GISEL: s_load_dwordx2 s[6:7], s[4:5], 0x34 |
| ; GFX9-GISEL: v_lshlrev_b32_e32 v0, 2, v0 |
| ; GFX9-GISEL: global_load_dword v1, v0, s[2:3] glc |
| ; GFX9-GISEL: global_load_dword v2, v0, s[6:7] glc |
| ; GFX9-GISEL: v_max_f32_e64 v1, -|v1|, -|v1| |
| ; GFX9-GISEL: v_max_f32_e64 v2, -v2, -v2 |
| ; GFX9-GISEL: v_cvt_pknorm_i16_f32 v1, v1, v2 |
| ; GFX9-GISEL: s_endpgm |
| %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| %tid.ext = sext i32 %tid to i64 |
| %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext |
| %b.gep = getelementptr inbounds float, ptr addrspace(1) %b.ptr, i64 %tid.ext |
| %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext |
| %a = load volatile float, ptr addrspace(1) %a.gep |
| %b = load volatile float, ptr addrspace(1) %b.gep |
| %fabs.a = call float @llvm.fabs.f32(float %a) |
| %neg.fabs.a = fsub float -0.0, %fabs.a |
| %neg.b = fsub float -0.0, %b |
| %cvt = call <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float %neg.fabs.a, float %neg.b) |
| %r = bitcast <2 x i16> %cvt to i32 |
| store i32 %r, ptr addrspace(1) %out.gep |
| ret void |
| } |
| |
| declare <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float, float) #1 |
| declare float @llvm.fabs.f32(float) #1 |
| declare i32 @llvm.amdgcn.workitem.id.x() #1 |
| |
| |
| attributes #0 = { nounwind } |
| attributes #1 = { nounwind readnone } |