| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=amdgcn -mcpu=gfx1150 -verify-machineinstrs < %s | FileCheck %s |
| ; RUN: llc -mtriple=amdgcn -mcpu=gfx1150 -global-isel -verify-machineinstrs < %s | FileCheck %s |
| |
| define amdgpu_vs float @fadd_f32(float inreg %a, float inreg %b) { |
| ; CHECK-LABEL: fadd_f32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_add_f32 s0, s0, s1 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_3) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %add = fadd float %a, %b |
| ret float %add |
| } |
| |
| define amdgpu_vs float @fsub_f32(float inreg %a, float inreg %b) { |
| ; CHECK-LABEL: fsub_f32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_sub_f32 s0, s0, s1 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_3) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %sub = fsub float %a, %b |
| ret float %sub |
| } |
| |
| define amdgpu_vs float @fmul_f32(float inreg %a, float inreg %b) { |
| ; CHECK-LABEL: fmul_f32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_mul_f32 s0, s0, s1 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_3) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %mul = fmul float %a, %b |
| ret float %mul |
| } |
| |
| define amdgpu_vs float @fmin_f32(float inreg %a, float inreg %b) { |
| ; CHECK-LABEL: fmin_f32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_min_f32 s0, s0, s1 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_3) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %min = call float @llvm.minnum.f32(float %a, float %b) |
| ret float %min |
| } |
| |
| define amdgpu_vs float @fmax_f32(float inreg %a, float inreg %b) { |
| ; CHECK-LABEL: fmax_f32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_max_f32 s0, s0, s1 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_3) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %max = call float @llvm.maxnum.f32(float %a, float %b) |
| ret float %max |
| } |
| |
| define amdgpu_vs half @fadd_f16(half inreg %a, half inreg %b) { |
| ; CHECK-LABEL: fadd_f16: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_add_f16 s0, s0, s1 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_3) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %add = fadd half %a, %b |
| ret half %add |
| } |
| |
| define amdgpu_vs half @fsub_f16(half inreg %a, half inreg %b) { |
| ; CHECK-LABEL: fsub_f16: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_sub_f16 s0, s0, s1 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_3) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %sub = fsub half %a, %b |
| ret half %sub |
| } |
| |
| define amdgpu_vs half @fmul_f16(half inreg %a, half inreg %b) { |
| ; CHECK-LABEL: fmul_f16: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_mul_f16 s0, s0, s1 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_3) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %mul = fmul half %a, %b |
| ret half %mul |
| } |
| |
| define amdgpu_vs half @fmin_f16(half inreg %a, half inreg %b) { |
| ; CHECK-LABEL: fmin_f16: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_min_f16 s0, s0, s1 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_3) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %min = call half @llvm.minnum.f16(half %a, half %b) |
| ret half %min |
| } |
| |
| define amdgpu_vs half @fmax_f16(half inreg %a, half inreg %b) { |
| ; CHECK-LABEL: fmax_f16: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_max_f16 s0, s0, s1 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_3) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %max = call half @llvm.maxnum.f16(half %a, half %b) |
| ret half %max |
| } |
| |
| define amdgpu_vs <2 x half> @s_cvt_pkrtz_v2f16_f32(float inreg %x, float inreg %y) { |
| ; CHECK-LABEL: s_cvt_pkrtz_v2f16_f32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_cvt_pk_rtz_f16_f32 s0, s0, s1 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_3) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %x, float %y) |
| ret <2 x half> %result |
| } |
| |
| define amdgpu_vs float @fmac_f32(float inreg %a, float inreg %b, float inreg %c) { |
| ; CHECK-LABEL: fmac_f32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_fmac_f32 s0, s1, s2 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_3) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %res = call float @llvm.fma.f32(float %b, float %c, float %a) |
| ret float %res |
| } |
| |
| ; Check selection of mov + fmac if src2 of fmac has a use later on |
| define amdgpu_vs float @fmac_f32_with_mov(float inreg %a, float inreg %b, float inreg %c) { |
| ; CHECK-LABEL: fmac_f32_with_mov: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_mov_b32 s3, s2 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3) |
| ; CHECK-NEXT: s_fmac_f32 s3, s0, s1 |
| ; CHECK-NEXT: s_add_f32 s0, s3, s2 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_3) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %fma = call float @llvm.fma.f32(float %a, float %b, float %c) |
| %res = fadd float %fma, %c |
| ret float %res |
| } |
| |
| define amdgpu_vs half @fmac_f16(half inreg %a, half inreg %b, half inreg %c) { |
| ; CHECK-LABEL: fmac_f16: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_fmac_f16 s0, s1, s2 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_3) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %res = call half @llvm.fma.f16(half %b, half %c, half %a) |
| ret half %res |
| } |
| |
| ; Check selection of mov + fmac if src2 of fmac has a use later |
| define amdgpu_vs half @fmac_f16_with_mov(half inreg %a, half inreg %b, half inreg %c) { |
| ; CHECK-LABEL: fmac_f16_with_mov: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_mov_b32 s3, s2 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3) |
| ; CHECK-NEXT: s_fmac_f16 s3, s0, s1 |
| ; CHECK-NEXT: s_add_f16 s0, s3, s2 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_3) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %fma = call half @llvm.fma.f16(half %a, half %b, half %c) |
| %res = fadd half %fma, %c |
| ret half %res |
| } |
| |
| ; Regression test for crash in SIFoldOperands |
| define amdgpu_ps float @_amdgpu_ps_main() { |
| ; CHECK-LABEL: _amdgpu_ps_main: |
| ; CHECK: ; %bb.0: ; %bb |
| ; CHECK-NEXT: s_mov_b32 s0, 0 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) |
| ; CHECK-NEXT: s_mov_b32 s1, s0 |
| ; CHECK-NEXT: s_mov_b32 s2, s0 |
| ; CHECK-NEXT: s_mov_b32 s3, s0 |
| ; CHECK-NEXT: s_buffer_load_b64 s[0:1], s[0:3], 0x0 |
| ; CHECK-NEXT: s_waitcnt lgkmcnt(0) |
| ; CHECK-NEXT: s_fmamk_f32 s0, s1, 0x40800000, s0 |
| ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_3) |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| bb: |
| %i = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> zeroinitializer, i32 0, i32 0) |
| %i1 = bitcast i32 %i to float |
| %i2 = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> zeroinitializer, i32 4, i32 0) |
| %i3 = bitcast i32 %i2 to float |
| %i4 = fmul contract float %i3, 4.0 |
| %i5 = fadd contract float %i4, %i1 |
| ret float %i5 |
| } |
| |
| declare i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32>, i32, i32 immarg) |
| declare float @llvm.minnum.f32(float, float) |
| declare float @llvm.maxnum.f32(float, float) |
| declare half @llvm.minnum.f16(half, half) |
| declare half @llvm.maxnum.f16(half, half) |
| declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) |
| declare float @llvm.fma.f32(float, float, float) nounwind readnone |
| declare half @llvm.fma.f16(half, half, half) nounwind readnone |