blob: e387586cd46c27e7a4ed3dad8ebc3d328e5acdbe [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -target-abi=lp64 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefixes=CHECK,RV64I
; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64f -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefixes=CHECK,RV64IF
; RUN: llc -mtriple=riscv64 -mattr=+zfinx -target-abi=lp64 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefixes=CHECK,RV64IZFINX
define i128 @fptosi_f32_to_i128(float %a) nounwind {
; CHECK-LABEL: fptosi_f32_to_i128:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; CHECK-NEXT: call __fixsfti
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%1 = fptosi float %a to i128
ret i128 %1
}
define i128 @fptoui_f32_to_i128(float %a) nounwind {
; CHECK-LABEL: fptoui_f32_to_i128:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; CHECK-NEXT: call __fixunssfti
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%1 = fptoui float %a to i128
ret i128 %1
}
define float @sitofp_i128_to_f32(i128 %a) nounwind {
; CHECK-LABEL: sitofp_i128_to_f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; CHECK-NEXT: call __floattisf
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%1 = sitofp i128 %a to float
ret float %1
}
define float @uitofp_i128_to_f32(i128 %a) nounwind {
; CHECK-LABEL: uitofp_i128_to_f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; CHECK-NEXT: call __floatuntisf
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%1 = uitofp i128 %a to float
ret float %1
}
define i128 @fptosi_sat_f32_to_i128(float %a) nounwind {
; RV64I-LABEL: fptosi_sat_f32_to_i128:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -64
; RV64I-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s2, 32(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s3, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s4, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s5, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a1, 1044480
; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: call __fixsfti
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv s3, a1
; RV64I-NEXT: li s5, -1
; RV64I-NEXT: bgez s2, .LBB4_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: slli s3, s5, 63
; RV64I-NEXT: .LBB4_2:
; RV64I-NEXT: lui a1, 520192
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: mv s4, a0
; RV64I-NEXT: blez a0, .LBB4_4
; RV64I-NEXT: # %bb.3:
; RV64I-NEXT: srli s3, s5, 1
; RV64I-NEXT: .LBB4_4:
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s1
; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: slti a1, s2, 0
; RV64I-NEXT: sgtz a2, s4
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: addi a3, a1, -1
; RV64I-NEXT: and a1, a0, s3
; RV64I-NEXT: and a3, a3, s0
; RV64I-NEXT: neg a2, a2
; RV64I-NEXT: or a2, a2, a3
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s2, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s3, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s4, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s5, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
;
; RV64IF-LABEL: fptosi_sat_f32_to_i128:
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -32
; RV64IF-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64IF-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IF-NEXT: fsw fs0, 12(sp) # 4-byte Folded Spill
; RV64IF-NEXT: fmv.s fs0, fa0
; RV64IF-NEXT: lui a0, 1044480
; RV64IF-NEXT: fmv.w.x fa5, a0
; RV64IF-NEXT: fle.s s0, fa5, fa0
; RV64IF-NEXT: call __fixsfti
; RV64IF-NEXT: li a3, -1
; RV64IF-NEXT: bnez s0, .LBB4_2
; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: slli a1, a3, 63
; RV64IF-NEXT: .LBB4_2:
; RV64IF-NEXT: lui a2, %hi(.LCPI4_0)
; RV64IF-NEXT: flw fa5, %lo(.LCPI4_0)(a2)
; RV64IF-NEXT: flt.s a2, fa5, fs0
; RV64IF-NEXT: beqz a2, .LBB4_4
; RV64IF-NEXT: # %bb.3:
; RV64IF-NEXT: srli a1, a3, 1
; RV64IF-NEXT: .LBB4_4:
; RV64IF-NEXT: feq.s a3, fs0, fs0
; RV64IF-NEXT: neg a4, s0
; RV64IF-NEXT: neg a2, a2
; RV64IF-NEXT: neg a3, a3
; RV64IF-NEXT: and a0, a4, a0
; RV64IF-NEXT: and a1, a3, a1
; RV64IF-NEXT: or a0, a2, a0
; RV64IF-NEXT: and a0, a3, a0
; RV64IF-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IF-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64IF-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 32
; RV64IF-NEXT: ret
;
; RV64IZFINX-LABEL: fptosi_sat_f32_to_i128:
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -32
; RV64IZFINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: mv s0, a0
; RV64IZFINX-NEXT: lui a0, 1044480
; RV64IZFINX-NEXT: fle.s s1, a0, s0
; RV64IZFINX-NEXT: mv a0, s0
; RV64IZFINX-NEXT: call __fixsfti
; RV64IZFINX-NEXT: li a2, -1
; RV64IZFINX-NEXT: bnez s1, .LBB4_2
; RV64IZFINX-NEXT: # %bb.1:
; RV64IZFINX-NEXT: slli a1, a2, 63
; RV64IZFINX-NEXT: .LBB4_2:
; RV64IZFINX-NEXT: lui a3, 520192
; RV64IZFINX-NEXT: addiw a3, a3, -1
; RV64IZFINX-NEXT: flt.s a3, a3, s0
; RV64IZFINX-NEXT: beqz a3, .LBB4_4
; RV64IZFINX-NEXT: # %bb.3:
; RV64IZFINX-NEXT: srli a1, a2, 1
; RV64IZFINX-NEXT: .LBB4_4:
; RV64IZFINX-NEXT: feq.s a2, s0, s0
; RV64IZFINX-NEXT: neg a4, s1
; RV64IZFINX-NEXT: neg a3, a3
; RV64IZFINX-NEXT: neg a2, a2
; RV64IZFINX-NEXT: and a0, a4, a0
; RV64IZFINX-NEXT: and a1, a2, a1
; RV64IZFINX-NEXT: or a0, a3, a0
; RV64IZFINX-NEXT: and a0, a2, a0
; RV64IZFINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 32
; RV64IZFINX-NEXT: ret
%1 = tail call i128 @llvm.fptosi.sat.i128.f32(float %a)
ret i128 %1
}
declare i128 @llvm.fptosi.sat.i128.f32(float)
define i128 @fptoui_sat_f32_to_i128(float %a) nounwind {
; RV64I-LABEL: fptoui_sat_f32_to_i128:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -32
; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lui a1, 522240
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: neg s1, a0
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __gesf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: addi s2, a0, -1
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __fixunssfti
; RV64I-NEXT: and a0, s2, a0
; RV64I-NEXT: and a1, s2, a1
; RV64I-NEXT: or a0, s1, a0
; RV64I-NEXT: or a1, s1, a1
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
; RV64IF-LABEL: fptoui_sat_f32_to_i128:
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -32
; RV64IF-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64IF-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IF-NEXT: fsw fs0, 12(sp) # 4-byte Folded Spill
; RV64IF-NEXT: fmv.s fs0, fa0
; RV64IF-NEXT: fmv.w.x fa5, zero
; RV64IF-NEXT: fle.s a0, fa5, fa0
; RV64IF-NEXT: neg s0, a0
; RV64IF-NEXT: call __fixunssfti
; RV64IF-NEXT: lui a2, %hi(.LCPI5_0)
; RV64IF-NEXT: flw fa5, %lo(.LCPI5_0)(a2)
; RV64IF-NEXT: and a0, s0, a0
; RV64IF-NEXT: and a1, s0, a1
; RV64IF-NEXT: flt.s a2, fa5, fs0
; RV64IF-NEXT: neg a2, a2
; RV64IF-NEXT: or a0, a2, a0
; RV64IF-NEXT: or a1, a2, a1
; RV64IF-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IF-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64IF-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 32
; RV64IF-NEXT: ret
;
; RV64IZFINX-LABEL: fptoui_sat_f32_to_i128:
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -32
; RV64IZFINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: mv s0, a0
; RV64IZFINX-NEXT: fle.s a0, zero, a0
; RV64IZFINX-NEXT: neg s1, a0
; RV64IZFINX-NEXT: mv a0, s0
; RV64IZFINX-NEXT: call __fixunssfti
; RV64IZFINX-NEXT: and a0, s1, a0
; RV64IZFINX-NEXT: lui a2, 522240
; RV64IZFINX-NEXT: and a1, s1, a1
; RV64IZFINX-NEXT: addiw a2, a2, -1
; RV64IZFINX-NEXT: flt.s a2, a2, s0
; RV64IZFINX-NEXT: neg a2, a2
; RV64IZFINX-NEXT: or a0, a2, a0
; RV64IZFINX-NEXT: or a1, a2, a1
; RV64IZFINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 32
; RV64IZFINX-NEXT: ret
%1 = tail call i128 @llvm.fptoui.sat.i128.f32(float %a)
ret i128 %1
}
declare i128 @llvm.fptoui.sat.i128.f32(float)