| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s |
| |
| ; Test that we use SimplifyDemandedBits on copysign's sign |
| ; operand. These are somewhat simplified extractions from fast pown |
| ; expansions. |
| |
| define half @test_pown_reduced_fast_f16_known_odd(half %x, i32 %y.arg) #0 { |
| ; GFX9-LABEL: test_pown_reduced_fast_f16_known_odd: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX9-NEXT: v_or_b32_e32 v1, 1, v1 |
| ; GFX9-NEXT: v_cvt_f32_i32_e32 v1, v1 |
| ; GFX9-NEXT: s_movk_i32 s4, 0x7fff |
| ; GFX9-NEXT: v_cvt_f16_f32_e32 v1, v1 |
| ; GFX9-NEXT: v_mul_f16_e64 v1, |v0|, v1 |
| ; GFX9-NEXT: v_bfi_b32 v0, s4, v1, v0 |
| ; GFX9-NEXT: s_setpc_b64 s[30:31] |
| %y = or i32 %y.arg, 1 |
| %fabs = call half @llvm.fabs.f16(half %x) |
| %pownI2F = sitofp i32 %y to half |
| %ylogx = fmul half %fabs, %pownI2F |
| %cast_x = bitcast half %x to i16 |
| %pow_sign = and i16 %cast_x, -32768 |
| %cast_sign = bitcast i16 %pow_sign to half |
| %pow_sign1 = call half @llvm.copysign.f16(half %ylogx, half %cast_sign) |
| ret half %pow_sign1 |
| } |
| |
| define <2 x half> @test_pown_reduced_fast_v2f16_known_odd(<2 x half> %x, <2 x i32> %y.arg) #0 { |
| ; GFX9-LABEL: test_pown_reduced_fast_v2f16_known_odd: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX9-NEXT: v_or_b32_e32 v2, 1, v2 |
| ; GFX9-NEXT: v_or_b32_e32 v1, 1, v1 |
| ; GFX9-NEXT: v_cvt_f32_i32_e32 v2, v2 |
| ; GFX9-NEXT: v_cvt_f32_i32_e32 v1, v1 |
| ; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff7fff, v0 |
| ; GFX9-NEXT: s_mov_b32 s4, 0x7fff7fff |
| ; GFX9-NEXT: v_cvt_f16_f32_e32 v2, v2 |
| ; GFX9-NEXT: v_cvt_f16_f32_e32 v1, v1 |
| ; GFX9-NEXT: v_pack_b32_f16 v1, v1, v2 |
| ; GFX9-NEXT: v_pk_mul_f16 v1, v3, v1 |
| ; GFX9-NEXT: v_bfi_b32 v0, s4, v1, v0 |
| ; GFX9-NEXT: s_setpc_b64 s[30:31] |
| %y = or <2 x i32> %y.arg, <i32 1, i32 1> |
| %fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %x) |
| %pownI2F = sitofp <2 x i32> %y to <2 x half> |
| %ylogx = fmul <2 x half> %fabs, %pownI2F |
| %cast_x = bitcast <2 x half> %x to <2 x i16> |
| %pow_sign = and <2 x i16> %cast_x, <i16 -32768, i16 -32768> |
| %cast_sign = bitcast <2 x i16> %pow_sign to <2 x half> |
| %pow_sign1 = call <2 x half> @llvm.copysign.v2f16(<2 x half> %ylogx, <2 x half> %cast_sign) |
| ret <2 x half> %pow_sign1 |
| } |
| |
| define float @test_pown_reduced_fast_f32_known_odd(float %x, i32 %y.arg) #0 { |
| ; GFX9-LABEL: test_pown_reduced_fast_f32_known_odd: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX9-NEXT: v_or_b32_e32 v1, 1, v1 |
| ; GFX9-NEXT: v_cvt_f32_i32_e32 v1, v1 |
| ; GFX9-NEXT: s_brev_b32 s4, -2 |
| ; GFX9-NEXT: v_mul_f32_e64 v1, |v0|, v1 |
| ; GFX9-NEXT: v_bfi_b32 v0, s4, v1, v0 |
| ; GFX9-NEXT: s_setpc_b64 s[30:31] |
| %y = or i32 %y.arg, 1 |
| %fabs = call float @llvm.fabs.f32(float %x) |
| %pownI2F = sitofp i32 %y to float |
| %ylogx = fmul float %fabs, %pownI2F |
| %cast_x = bitcast float %x to i32 |
| %pow_sign = and i32 %cast_x, -2147483648 |
| %cast_sign = bitcast i32 %pow_sign to float |
| %pow_sign1 = call float @llvm.copysign.f32(float %ylogx, float %cast_sign) |
| ret float %pow_sign1 |
| } |
| |
| define <2 x float> @test_pown_reduced_fast_v2f32_known_odd(<2 x float> %x, <2 x i32> %y.arg) #0 { |
| ; GFX9-LABEL: test_pown_reduced_fast_v2f32_known_odd: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX9-NEXT: v_or_b32_e32 v3, 1, v3 |
| ; GFX9-NEXT: v_or_b32_e32 v2, 1, v2 |
| ; GFX9-NEXT: v_cvt_f32_i32_e32 v3, v3 |
| ; GFX9-NEXT: v_cvt_f32_i32_e32 v2, v2 |
| ; GFX9-NEXT: s_brev_b32 s4, -2 |
| ; GFX9-NEXT: v_mul_f32_e64 v3, |v1|, v3 |
| ; GFX9-NEXT: v_mul_f32_e64 v2, |v0|, v2 |
| ; GFX9-NEXT: v_bfi_b32 v0, s4, v2, v0 |
| ; GFX9-NEXT: v_bfi_b32 v1, s4, v3, v1 |
| ; GFX9-NEXT: s_setpc_b64 s[30:31] |
| %y = or <2 x i32> %y.arg, <i32 1, i32 1> |
| %fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %x) |
| %pownI2F = sitofp <2 x i32> %y to <2 x float> |
| %ylogx = fmul <2 x float> %fabs, %pownI2F |
| %cast_x = bitcast <2 x float> %x to <2 x i32> |
| %pow_sign = and <2 x i32> %cast_x, <i32 -2147483648, i32 -2147483648> |
| %cast_sign = bitcast <2 x i32> %pow_sign to <2 x float> |
| %pow_sign1 = call <2 x float> @llvm.copysign.v2f32(<2 x float> %ylogx, <2 x float> %cast_sign) |
| ret <2 x float> %pow_sign1 |
| } |
| |
| define double @test_pown_reduced_fast_f64_known_odd(double %x, i32 %y.arg) #0 { |
| ; GFX9-LABEL: test_pown_reduced_fast_f64_known_odd: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX9-NEXT: v_or_b32_e32 v2, 1, v2 |
| ; GFX9-NEXT: v_cvt_f64_i32_e32 v[2:3], v2 |
| ; GFX9-NEXT: s_brev_b32 s4, -2 |
| ; GFX9-NEXT: v_mul_f64 v[2:3], |v[0:1]|, v[2:3] |
| ; GFX9-NEXT: v_bfi_b32 v1, s4, v3, v1 |
| ; GFX9-NEXT: v_mov_b32_e32 v0, v2 |
| ; GFX9-NEXT: s_setpc_b64 s[30:31] |
| %y = or i32 %y.arg, 1 |
| %fabs = call double @llvm.fabs.f64(double %x) |
| %pownI2F = sitofp i32 %y to double |
| %ylogx = fmul double %fabs, %pownI2F |
| %cast_x = bitcast double %x to i64 |
| %pow_sign = and i64 %cast_x, -9223372036854775808 |
| %cast_sign = bitcast i64 %pow_sign to double |
| %pow_sign1 = call double @llvm.copysign.f64(double %ylogx, double %cast_sign) |
| ret double %pow_sign1 |
| } |
| |
| define <2 x double> @test_pown_reduced_fast_v2f64_known_odd(<2 x double> %x, <2 x i32> %y.arg) #0 { |
| ; GFX9-LABEL: test_pown_reduced_fast_v2f64_known_odd: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX9-NEXT: v_or_b32_e32 v6, 1, v5 |
| ; GFX9-NEXT: v_or_b32_e32 v4, 1, v4 |
| ; GFX9-NEXT: v_cvt_f64_i32_e32 v[4:5], v4 |
| ; GFX9-NEXT: v_cvt_f64_i32_e32 v[6:7], v6 |
| ; GFX9-NEXT: s_brev_b32 s4, -2 |
| ; GFX9-NEXT: v_mul_f64 v[4:5], |v[0:1]|, v[4:5] |
| ; GFX9-NEXT: v_mul_f64 v[6:7], |v[2:3]|, v[6:7] |
| ; GFX9-NEXT: v_bfi_b32 v1, s4, v5, v1 |
| ; GFX9-NEXT: v_bfi_b32 v3, s4, v7, v3 |
| ; GFX9-NEXT: v_mov_b32_e32 v0, v4 |
| ; GFX9-NEXT: v_mov_b32_e32 v2, v6 |
| ; GFX9-NEXT: s_setpc_b64 s[30:31] |
| %y = or <2 x i32> %y.arg, <i32 1, i32 1> |
| %fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %x) |
| %pownI2F = sitofp <2 x i32> %y to <2 x double> |
| %ylogx = fmul <2 x double> %fabs, %pownI2F |
| %cast_x = bitcast <2 x double> %x to <2 x i64> |
| %pow_sign = and <2 x i64> %cast_x, <i64 -9223372036854775808, i64 -9223372036854775808> |
| %cast_sign = bitcast <2 x i64> %pow_sign to <2 x double> |
| %pow_sign1 = call <2 x double> @llvm.copysign.f64(<2 x double> %ylogx, <2 x double> %cast_sign) |
| ret <2 x double> %pow_sign1 |
| } |
| |
| define float @copysign_f32_f32_sign_known_p0_or_n0(float %x, i32 %y.i) { |
| ; GFX9-LABEL: copysign_f32_f32_sign_known_p0_or_n0: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1 |
| ; GFX9-NEXT: s_brev_b32 s4, -2 |
| ; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1 |
| ; GFX9-NEXT: s_setpc_b64 s[30:31] |
| %y.even = shl i32 %y.i, 31 |
| %y.even.as.f32 = bitcast i32 %y.even to float |
| %copysign = call float @llvm.copysign.f32(float %x, float %y.even.as.f32) |
| ret float %copysign |
| } |
| |
| define double @copysign_f64_f32_sign_known_p0_or_n0(double %x, i32 %y.i) { |
| ; GFX9-LABEL: copysign_f64_f32_sign_known_p0_or_n0: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 31, v2 |
| ; GFX9-NEXT: s_brev_b32 s4, -2 |
| ; GFX9-NEXT: v_bfi_b32 v1, s4, v1, v2 |
| ; GFX9-NEXT: s_setpc_b64 s[30:31] |
| %y.even = shl i32 %y.i, 31 |
| %y.even.as.f32 = bitcast i32 %y.even to float |
| %y.even.as.f32.fpext = fpext float %y.even.as.f32 to double |
| %copysign = call double @llvm.copysign.f64(double %x, double %y.even.as.f32.fpext) |
| ret double %copysign |
| } |
| |
| define half @copysign_f16_f32_sign_known_p0_or_n0(half %x, i32 %y.i) { |
| ; GFX9-LABEL: copysign_f16_f32_sign_known_p0_or_n0: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1 |
| ; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1 |
| ; GFX9-NEXT: s_movk_i32 s4, 0x7fff |
| ; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1 |
| ; GFX9-NEXT: s_setpc_b64 s[30:31] |
| %y.even = shl i32 %y.i, 31 |
| %y.even.as.f32 = bitcast i32 %y.even to float |
| %y.even.as.f32.fptrunc = fptrunc float %y.even.as.f32 to half |
| %copysign = call half @llvm.copysign.f16(half %x, half %y.even.as.f32.fptrunc) |
| ret half %copysign |
| } |
| |
| define float @copysign_f32_f32_sign_known_p0_or_n0__mag_known_positive_fabs(float %x.arg, i32 %y.i) { |
| ; GFX9-LABEL: copysign_f32_f32_sign_known_p0_or_n0__mag_known_positive_fabs: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1 |
| ; GFX9-NEXT: s_brev_b32 s4, -2 |
| ; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1 |
| ; GFX9-NEXT: s_setpc_b64 s[30:31] |
| %x = call float @llvm.fabs.f32(float %x.arg) |
| %y.even = shl i32 %y.i, 31 |
| %y.even.as.f32 = bitcast i32 %y.even to float |
| %copysign = call float @llvm.copysign.f32(float %x, float %y.even.as.f32) |
| ret float %copysign |
| } |
| |
| define float @copysign_f32_f32_sign_known_p0_or_n0__mag_known_positive_select(float %x.arg, i32 %y.i) { |
| ; GFX9-LABEL: copysign_f32_f32_sign_known_p0_or_n0__mag_known_positive_select: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, 0, v0 |
| ; GFX9-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc |
| ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1 |
| ; GFX9-NEXT: s_brev_b32 s4, -2 |
| ; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1 |
| ; GFX9-NEXT: s_setpc_b64 s[30:31] |
| %x.ule.0 = fcmp ule float %x.arg, 0.0 |
| %x = select i1 %x.ule.0, float 0.0, float %x.arg |
| %y.even = shl i32 %y.i, 31 |
| %y.even.as.f32 = bitcast i32 %y.even to float |
| %copysign = call float @llvm.copysign.f32(float %x, float %y.even.as.f32) |
| ret float %copysign |
| } |
| |
| define float @copysign_f32_f32_sign_known_p0_or_n0__mag_known_positive_nnan_nsz_sqrt(float %x.arg, i32 %y.i) { |
| ; GFX9-LABEL: copysign_f32_f32_sign_known_p0_or_n0__mag_known_positive_nnan_nsz_sqrt: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX9-NEXT: s_mov_b32 s4, 0xf800000 |
| ; GFX9-NEXT: v_mul_f32_e32 v2, 0x4f800000, v0 |
| ; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0 |
| ; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc |
| ; GFX9-NEXT: v_sqrt_f32_e32 v2, v0 |
| ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1 |
| ; GFX9-NEXT: v_add_u32_e32 v3, -1, v2 |
| ; GFX9-NEXT: v_fma_f32 v4, -v3, v2, v0 |
| ; GFX9-NEXT: v_cmp_ge_f32_e64 s[4:5], 0, v4 |
| ; GFX9-NEXT: v_add_u32_e32 v4, 1, v2 |
| ; GFX9-NEXT: v_cndmask_b32_e64 v3, v2, v3, s[4:5] |
| ; GFX9-NEXT: v_fma_f32 v2, -v4, v2, v0 |
| ; GFX9-NEXT: v_cmp_lt_f32_e64 s[4:5], 0, v2 |
| ; GFX9-NEXT: v_cndmask_b32_e64 v2, v3, v4, s[4:5] |
| ; GFX9-NEXT: v_mul_f32_e32 v3, 0x37800000, v2 |
| ; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc |
| ; GFX9-NEXT: v_mov_b32_e32 v3, 0x260 |
| ; GFX9-NEXT: v_cmp_class_f32_e32 vcc, v0, v3 |
| ; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc |
| ; GFX9-NEXT: s_brev_b32 s4, -2 |
| ; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1 |
| ; GFX9-NEXT: s_setpc_b64 s[30:31] |
| %x = call nnan nsz float @llvm.sqrt.f32(float %x.arg) |
| %y.even = shl i32 %y.i, 31 |
| %y.even.as.f32 = bitcast i32 %y.even to float |
| %copysign = call float @llvm.copysign.f32(float %x, float %y.even.as.f32) |
| ret float %copysign |
| } |
| |
| define float @copysign_f32_f32_sign_known_p0_or_n0__mag_almost_positive_nsz_sqrt(float %x.arg, i32 %y.i) { |
| ; GFX9-LABEL: copysign_f32_f32_sign_known_p0_or_n0__mag_almost_positive_nsz_sqrt: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX9-NEXT: s_mov_b32 s4, 0xf800000 |
| ; GFX9-NEXT: v_mul_f32_e32 v2, 0x4f800000, v0 |
| ; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0 |
| ; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc |
| ; GFX9-NEXT: v_sqrt_f32_e32 v2, v0 |
| ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1 |
| ; GFX9-NEXT: v_add_u32_e32 v3, -1, v2 |
| ; GFX9-NEXT: v_fma_f32 v4, -v3, v2, v0 |
| ; GFX9-NEXT: v_cmp_ge_f32_e64 s[4:5], 0, v4 |
| ; GFX9-NEXT: v_add_u32_e32 v4, 1, v2 |
| ; GFX9-NEXT: v_cndmask_b32_e64 v3, v2, v3, s[4:5] |
| ; GFX9-NEXT: v_fma_f32 v2, -v4, v2, v0 |
| ; GFX9-NEXT: v_cmp_lt_f32_e64 s[4:5], 0, v2 |
| ; GFX9-NEXT: v_cndmask_b32_e64 v2, v3, v4, s[4:5] |
| ; GFX9-NEXT: v_mul_f32_e32 v3, 0x37800000, v2 |
| ; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc |
| ; GFX9-NEXT: v_mov_b32_e32 v3, 0x260 |
| ; GFX9-NEXT: v_cmp_class_f32_e32 vcc, v0, v3 |
| ; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc |
| ; GFX9-NEXT: s_brev_b32 s4, -2 |
| ; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1 |
| ; GFX9-NEXT: s_setpc_b64 s[30:31] |
| %x = call nsz float @llvm.sqrt.f32(float %x.arg) |
| %y.even = shl i32 %y.i, 31 |
| %y.even.as.f32 = bitcast i32 %y.even to float |
| %copysign = call float @llvm.copysign.f32(float %x, float %y.even.as.f32) |
| ret float %copysign |
| } |
| |
| define float @copysign_f32_f32_sign_known_p0_or_n0__mag_almost_positive_nnan_sqrt(float %x.arg, i32 %y.i) { |
| ; GFX9-LABEL: copysign_f32_f32_sign_known_p0_or_n0__mag_almost_positive_nnan_sqrt: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX9-NEXT: s_mov_b32 s4, 0xf800000 |
| ; GFX9-NEXT: v_mul_f32_e32 v2, 0x4f800000, v0 |
| ; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0 |
| ; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc |
| ; GFX9-NEXT: v_sqrt_f32_e32 v2, v0 |
| ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1 |
| ; GFX9-NEXT: v_add_u32_e32 v3, -1, v2 |
| ; GFX9-NEXT: v_fma_f32 v4, -v3, v2, v0 |
| ; GFX9-NEXT: v_cmp_ge_f32_e64 s[4:5], 0, v4 |
| ; GFX9-NEXT: v_add_u32_e32 v4, 1, v2 |
| ; GFX9-NEXT: v_cndmask_b32_e64 v3, v2, v3, s[4:5] |
| ; GFX9-NEXT: v_fma_f32 v2, -v4, v2, v0 |
| ; GFX9-NEXT: v_cmp_lt_f32_e64 s[4:5], 0, v2 |
| ; GFX9-NEXT: v_cndmask_b32_e64 v2, v3, v4, s[4:5] |
| ; GFX9-NEXT: v_mul_f32_e32 v3, 0x37800000, v2 |
| ; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc |
| ; GFX9-NEXT: v_mov_b32_e32 v3, 0x260 |
| ; GFX9-NEXT: v_cmp_class_f32_e32 vcc, v0, v3 |
| ; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc |
| ; GFX9-NEXT: s_brev_b32 s4, -2 |
| ; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1 |
| ; GFX9-NEXT: s_setpc_b64 s[30:31] |
| %x = call nnan float @llvm.sqrt.f32(float %x.arg) |
| %y.even = shl i32 %y.i, 31 |
| %y.even.as.f32 = bitcast i32 %y.even to float |
| %copysign = call float @llvm.copysign.f32(float %x, float %y.even.as.f32) |
| ret float %copysign |
| } |
| |
| define float @test_copysign_pow_fast_f32__integral_y(float %x, i32 %y.i) { |
| ; GFX9-LABEL: test_copysign_pow_fast_f32__integral_y: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX9-NEXT: s_mov_b32 s4, 0x800000 |
| ; GFX9-NEXT: v_cmp_lt_f32_e64 vcc, |v0|, s4 |
| ; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 32, vcc |
| ; GFX9-NEXT: v_ldexp_f32 v3, |v0|, v3 |
| ; GFX9-NEXT: v_log_f32_e32 v3, v3 |
| ; GFX9-NEXT: v_cvt_f32_i32_e32 v1, v1 |
| ; GFX9-NEXT: v_mov_b32_e32 v2, 0x42000000 |
| ; GFX9-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc |
| ; GFX9-NEXT: v_sub_f32_e32 v2, v3, v2 |
| ; GFX9-NEXT: v_mul_f32_e32 v3, v2, v1 |
| ; GFX9-NEXT: s_mov_b32 s4, 0xc2fc0000 |
| ; GFX9-NEXT: v_mov_b32_e32 v4, 0x42800000 |
| ; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, s4, v3 |
| ; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v4, vcc |
| ; GFX9-NEXT: v_fma_f32 v2, v2, v1, v3 |
| ; GFX9-NEXT: v_exp_f32_e32 v2, v2 |
| ; GFX9-NEXT: v_cvt_i32_f32_e32 v1, v1 |
| ; GFX9-NEXT: v_not_b32_e32 v3, 63 |
| ; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc |
| ; GFX9-NEXT: v_ldexp_f32 v2, v2, v3 |
| ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1 |
| ; GFX9-NEXT: v_and_or_b32 v0, v1, v0, v2 |
| ; GFX9-NEXT: s_setpc_b64 s[30:31] |
| %y = sitofp i32 %y.i to float |
| %y.fptosi = fptosi float %y to i32 |
| %fabs = call fast float @llvm.fabs.f32(float %x) |
| %log2 = call fast float @llvm.log2.f32(float %fabs) |
| %pownI2F = sitofp i32 %y.i to float |
| %ylogx = fmul fast float %log2, %pownI2F |
| %exp2 = call fast float @llvm.exp2.f32(float %ylogx) |
| %yeven = shl i32 %y.fptosi, 31 |
| %x.i32 = bitcast float %x to i32 |
| %pow_sign = and i32 %yeven, %x.i32 |
| %pow_sign.f32 = bitcast i32 %pow_sign to float |
| %pow_sign1 = call fast float @llvm.copysign.f32(float %exp2, float %pow_sign.f32) |
| ret float %pow_sign1 |
| } |
| |
| define double @test_pow_fast_f64integral_y(double %x, i32 %y.i) #0 { |
| ; GFX9-LABEL: test_pow_fast_f64integral_y: |
| ; GFX9: ; %bb.0: |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GFX9-NEXT: s_mov_b32 s16, s33 |
| ; GFX9-NEXT: s_mov_b32 s33, s32 |
| ; GFX9-NEXT: s_or_saveexec_b64 s[18:19], -1 |
| ; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill |
| ; GFX9-NEXT: s_mov_b64 exec, s[18:19] |
| ; GFX9-NEXT: v_writelane_b32 v43, s16, 14 |
| ; GFX9-NEXT: v_writelane_b32 v43, s30, 0 |
| ; GFX9-NEXT: v_writelane_b32 v43, s31, 1 |
| ; GFX9-NEXT: v_writelane_b32 v43, s34, 2 |
| ; GFX9-NEXT: v_writelane_b32 v43, s35, 3 |
| ; GFX9-NEXT: v_writelane_b32 v43, s36, 4 |
| ; GFX9-NEXT: v_writelane_b32 v43, s37, 5 |
| ; GFX9-NEXT: v_writelane_b32 v43, s38, 6 |
| ; GFX9-NEXT: v_writelane_b32 v43, s39, 7 |
| ; GFX9-NEXT: v_writelane_b32 v43, s48, 8 |
| ; GFX9-NEXT: v_writelane_b32 v43, s49, 9 |
| ; GFX9-NEXT: v_writelane_b32 v43, s50, 10 |
| ; GFX9-NEXT: s_addk_i32 s32, 0x800 |
| ; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill |
| ; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill |
| ; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s33 ; 4-byte Folded Spill |
| ; GFX9-NEXT: v_writelane_b32 v43, s51, 11 |
| ; GFX9-NEXT: v_mov_b32_e32 v42, v1 |
| ; GFX9-NEXT: v_writelane_b32 v43, s52, 12 |
| ; GFX9-NEXT: v_and_b32_e32 v1, 0x7fffffff, v42 |
| ; GFX9-NEXT: s_getpc_b64 s[16:17] |
| ; GFX9-NEXT: s_add_u32 s16, s16, _Z4log2d@rel32@lo+4 |
| ; GFX9-NEXT: s_addc_u32 s17, s17, _Z4log2d@rel32@hi+12 |
| ; GFX9-NEXT: v_writelane_b32 v43, s53, 13 |
| ; GFX9-NEXT: v_mov_b32_e32 v40, v31 |
| ; GFX9-NEXT: v_mov_b32_e32 v41, v2 |
| ; GFX9-NEXT: s_mov_b32 s50, s15 |
| ; GFX9-NEXT: s_mov_b32 s51, s14 |
| ; GFX9-NEXT: s_mov_b32 s52, s13 |
| ; GFX9-NEXT: s_mov_b32 s53, s12 |
| ; GFX9-NEXT: s_mov_b64 s[34:35], s[10:11] |
| ; GFX9-NEXT: s_mov_b64 s[36:37], s[8:9] |
| ; GFX9-NEXT: s_mov_b64 s[38:39], s[6:7] |
| ; GFX9-NEXT: s_mov_b64 s[48:49], s[4:5] |
| ; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17] |
| ; GFX9-NEXT: v_cvt_f64_i32_e32 v[2:3], v41 |
| ; GFX9-NEXT: s_getpc_b64 s[16:17] |
| ; GFX9-NEXT: s_add_u32 s16, s16, _Z4exp2d@rel32@lo+4 |
| ; GFX9-NEXT: s_addc_u32 s17, s17, _Z4exp2d@rel32@hi+12 |
| ; GFX9-NEXT: s_mov_b64 s[4:5], s[48:49] |
| ; GFX9-NEXT: s_mov_b64 s[6:7], s[38:39] |
| ; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] |
| ; GFX9-NEXT: s_mov_b64 s[8:9], s[36:37] |
| ; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35] |
| ; GFX9-NEXT: s_mov_b32 s12, s53 |
| ; GFX9-NEXT: s_mov_b32 s13, s52 |
| ; GFX9-NEXT: s_mov_b32 s14, s51 |
| ; GFX9-NEXT: s_mov_b32 s15, s50 |
| ; GFX9-NEXT: v_mov_b32_e32 v31, v40 |
| ; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17] |
| ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 31, v41 |
| ; GFX9-NEXT: v_and_b32_e32 v2, v2, v42 |
| ; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s33 ; 4-byte Folded Reload |
| ; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload |
| ; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload |
| ; GFX9-NEXT: v_or_b32_e32 v1, v1, v2 |
| ; GFX9-NEXT: v_readlane_b32 s53, v43, 13 |
| ; GFX9-NEXT: v_readlane_b32 s52, v43, 12 |
| ; GFX9-NEXT: v_readlane_b32 s51, v43, 11 |
| ; GFX9-NEXT: v_readlane_b32 s50, v43, 10 |
| ; GFX9-NEXT: v_readlane_b32 s49, v43, 9 |
| ; GFX9-NEXT: v_readlane_b32 s48, v43, 8 |
| ; GFX9-NEXT: v_readlane_b32 s39, v43, 7 |
| ; GFX9-NEXT: v_readlane_b32 s38, v43, 6 |
| ; GFX9-NEXT: v_readlane_b32 s37, v43, 5 |
| ; GFX9-NEXT: v_readlane_b32 s36, v43, 4 |
| ; GFX9-NEXT: v_readlane_b32 s35, v43, 3 |
| ; GFX9-NEXT: v_readlane_b32 s34, v43, 2 |
| ; GFX9-NEXT: v_readlane_b32 s31, v43, 1 |
| ; GFX9-NEXT: v_readlane_b32 s30, v43, 0 |
| ; GFX9-NEXT: s_mov_b32 s32, s33 |
| ; GFX9-NEXT: v_readlane_b32 s4, v43, 14 |
| ; GFX9-NEXT: s_or_saveexec_b64 s[6:7], -1 |
| ; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s33 offset:12 ; 4-byte Folded Reload |
| ; GFX9-NEXT: s_mov_b64 exec, s[6:7] |
| ; GFX9-NEXT: s_mov_b32 s33, s4 |
| ; GFX9-NEXT: s_waitcnt vmcnt(0) |
| ; GFX9-NEXT: s_setpc_b64 s[30:31] |
| %fabs = call fast double @llvm.fabs.f64(double %x) |
| %log2 = call fast double @_Z4log2d(double %fabs) |
| %pownI2F = sitofp i32 %y.i to double |
| %ylogx = fmul fast double %log2, %pownI2F |
| %exp2 = call fast nofpclass(nan ninf nzero nsub nnorm) double @_Z4exp2d(double %ylogx) |
| %ytou = zext i32 %y.i to i64 |
| %yeven = shl i64 %ytou, 63 |
| %x.i64 = bitcast double %x to i64 |
| %pow_sign = and i64 %yeven, %x.i64 |
| %pow_sign.f64 = bitcast i64 %pow_sign to double |
| %pow_sign1 = call fast double @llvm.copysign.f64(double %exp2, double %pow_sign.f64) |
| ret double %pow_sign1 |
| } |
| |
| declare hidden double @_Z4exp2d(double) #1 |
| declare hidden double @_Z4log2d(double) #1 |
| |
| attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } |
| attributes #1 = { norecurse nounwind memory(read) } |