| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ |
| ; RUN: -disable-strictnode-mutation -target-abi=ilp32d \ |
| ; RUN: | FileCheck -check-prefix=CHECKIFD %s |
| ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ |
| ; RUN: -disable-strictnode-mutation -target-abi=lp64d \ |
| ; RUN: | FileCheck -check-prefix=CHECKIFD %s |
| ; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ |
| ; RUN: -disable-strictnode-mutation -target-abi=ilp32 \ |
| ; RUN: | FileCheck -check-prefix=RV32IZFINXZDINX %s |
| ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ |
| ; RUN: -disable-strictnode-mutation -target-abi=lp64 \ |
| ; RUN: | FileCheck -check-prefix=RV64IZFINXZDINX %s |
| ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ |
| ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s |
| ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ |
| ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s |
| |
| define i32 @fcmp_oeq(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmp_oeq: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: feq.d a0, fa0, fa1 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmp_oeq: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmp_oeq: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmp_oeq: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __eqdf2 |
| ; RV32I-NEXT: seqz a0, a0 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmp_oeq: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __eqdf2 |
| ; RV64I-NEXT: seqz a0, a0 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata) |
| |
| define i32 @fcmp_ogt(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmp_ogt: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: frflags a1 |
| ; CHECKIFD-NEXT: flt.d a0, fa1, fa0 |
| ; CHECKIFD-NEXT: fsflags a1 |
| ; CHECKIFD-NEXT: feq.d zero, fa1, fa0 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmp_ogt: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: frflags a5 |
| ; RV32IZFINXZDINX-NEXT: flt.d a4, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: fsflags a5 |
| ; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: mv a0, a4 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmp_ogt: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: frflags a3 |
| ; RV64IZFINXZDINX-NEXT: flt.d a2, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: fsflags a3 |
| ; RV64IZFINXZDINX-NEXT: feq.d zero, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: mv a0, a2 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmp_ogt: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __gtdf2 |
| ; RV32I-NEXT: sgtz a0, a0 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmp_ogt: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __gtdf2 |
| ; RV64I-NEXT: sgtz a0, a0 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmp_oge(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmp_oge: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: frflags a1 |
| ; CHECKIFD-NEXT: fle.d a0, fa1, fa0 |
| ; CHECKIFD-NEXT: fsflags a1 |
| ; CHECKIFD-NEXT: feq.d zero, fa1, fa0 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmp_oge: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: frflags a5 |
| ; RV32IZFINXZDINX-NEXT: fle.d a4, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: fsflags a5 |
| ; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: mv a0, a4 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmp_oge: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: frflags a3 |
| ; RV64IZFINXZDINX-NEXT: fle.d a2, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: fsflags a3 |
| ; RV64IZFINXZDINX-NEXT: feq.d zero, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: mv a0, a2 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmp_oge: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __gedf2 |
| ; RV32I-NEXT: slti a0, a0, 0 |
| ; RV32I-NEXT: xori a0, a0, 1 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmp_oge: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __gedf2 |
| ; RV64I-NEXT: slti a0, a0, 0 |
| ; RV64I-NEXT: xori a0, a0, 1 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmp_olt(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmp_olt: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: frflags a1 |
| ; CHECKIFD-NEXT: flt.d a0, fa0, fa1 |
| ; CHECKIFD-NEXT: fsflags a1 |
| ; CHECKIFD-NEXT: feq.d zero, fa0, fa1 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmp_olt: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: frflags a5 |
| ; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: fsflags a5 |
| ; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: mv a0, a4 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmp_olt: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: frflags a3 |
| ; RV64IZFINXZDINX-NEXT: flt.d a2, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: fsflags a3 |
| ; RV64IZFINXZDINX-NEXT: feq.d zero, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: mv a0, a2 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmp_olt: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __ltdf2 |
| ; RV32I-NEXT: slti a0, a0, 0 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmp_olt: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __ltdf2 |
| ; RV64I-NEXT: slti a0, a0, 0 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmp_ole(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmp_ole: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: frflags a1 |
| ; CHECKIFD-NEXT: fle.d a0, fa0, fa1 |
| ; CHECKIFD-NEXT: fsflags a1 |
| ; CHECKIFD-NEXT: feq.d zero, fa0, fa1 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmp_ole: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: frflags a5 |
| ; RV32IZFINXZDINX-NEXT: fle.d a4, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: fsflags a5 |
| ; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: mv a0, a4 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmp_ole: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: frflags a3 |
| ; RV64IZFINXZDINX-NEXT: fle.d a2, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: fsflags a3 |
| ; RV64IZFINXZDINX-NEXT: feq.d zero, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: mv a0, a2 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmp_ole: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __ledf2 |
| ; RV32I-NEXT: slti a0, a0, 1 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmp_ole: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __ledf2 |
| ; RV64I-NEXT: slti a0, a0, 1 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| ; FIXME: We only need one frflags before the two flts and one fsflags after the |
| ; two flts. |
| define i32 @fcmp_one(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmp_one: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: frflags a0 |
| ; CHECKIFD-NEXT: flt.d a1, fa0, fa1 |
| ; CHECKIFD-NEXT: fsflags a0 |
| ; CHECKIFD-NEXT: feq.d zero, fa0, fa1 |
| ; CHECKIFD-NEXT: frflags a0 |
| ; CHECKIFD-NEXT: flt.d a2, fa1, fa0 |
| ; CHECKIFD-NEXT: fsflags a0 |
| ; CHECKIFD-NEXT: or a0, a2, a1 |
| ; CHECKIFD-NEXT: feq.d zero, fa1, fa0 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmp_one: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: frflags a4 |
| ; RV32IZFINXZDINX-NEXT: flt.d a5, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: fsflags a4 |
| ; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: frflags a4 |
| ; RV32IZFINXZDINX-NEXT: flt.d a6, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: fsflags a4 |
| ; RV32IZFINXZDINX-NEXT: or a4, a6, a5 |
| ; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: mv a0, a4 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmp_one: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: frflags a2 |
| ; RV64IZFINXZDINX-NEXT: flt.d a3, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: fsflags a2 |
| ; RV64IZFINXZDINX-NEXT: feq.d zero, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: frflags a2 |
| ; RV64IZFINXZDINX-NEXT: flt.d a4, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: fsflags a2 |
| ; RV64IZFINXZDINX-NEXT: or a2, a4, a3 |
| ; RV64IZFINXZDINX-NEXT: feq.d zero, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: mv a0, a2 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmp_one: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -32 |
| ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: mv s0, a3 |
| ; RV32I-NEXT: mv s1, a2 |
| ; RV32I-NEXT: mv s2, a1 |
| ; RV32I-NEXT: mv s3, a0 |
| ; RV32I-NEXT: call __eqdf2 |
| ; RV32I-NEXT: snez s4, a0 |
| ; RV32I-NEXT: mv a0, s3 |
| ; RV32I-NEXT: mv a1, s2 |
| ; RV32I-NEXT: mv a2, s1 |
| ; RV32I-NEXT: mv a3, s0 |
| ; RV32I-NEXT: call __unorddf2 |
| ; RV32I-NEXT: seqz a0, a0 |
| ; RV32I-NEXT: and a0, a0, s4 |
| ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 32 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmp_one: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -32 |
| ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: mv s0, a1 |
| ; RV64I-NEXT: mv s1, a0 |
| ; RV64I-NEXT: call __eqdf2 |
| ; RV64I-NEXT: snez s2, a0 |
| ; RV64I-NEXT: mv a0, s1 |
| ; RV64I-NEXT: mv a1, s0 |
| ; RV64I-NEXT: call __unorddf2 |
| ; RV64I-NEXT: seqz a0, a0 |
| ; RV64I-NEXT: and a0, a0, s2 |
| ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 32 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmp_ord(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmp_ord: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: feq.d a0, fa1, fa1 |
| ; CHECKIFD-NEXT: feq.d a1, fa0, fa0 |
| ; CHECKIFD-NEXT: and a0, a1, a0 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmp_ord: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: feq.d a2, a2, a2 |
| ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 |
| ; RV32IZFINXZDINX-NEXT: and a0, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmp_ord: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: feq.d a1, a1, a1 |
| ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 |
| ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmp_ord: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __unorddf2 |
| ; RV32I-NEXT: seqz a0, a0 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmp_ord: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __unorddf2 |
| ; RV64I-NEXT: seqz a0, a0 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ord", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| ; FIXME: We only need one frflags before the two flts and one fsflags after the |
| ; two flts. |
| define i32 @fcmp_ueq(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmp_ueq: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: frflags a0 |
| ; CHECKIFD-NEXT: flt.d a1, fa0, fa1 |
| ; CHECKIFD-NEXT: fsflags a0 |
| ; CHECKIFD-NEXT: feq.d zero, fa0, fa1 |
| ; CHECKIFD-NEXT: frflags a0 |
| ; CHECKIFD-NEXT: flt.d a2, fa1, fa0 |
| ; CHECKIFD-NEXT: fsflags a0 |
| ; CHECKIFD-NEXT: or a1, a2, a1 |
| ; CHECKIFD-NEXT: xori a0, a1, 1 |
| ; CHECKIFD-NEXT: feq.d zero, fa1, fa0 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmp_ueq: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: frflags a4 |
| ; RV32IZFINXZDINX-NEXT: flt.d a5, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: fsflags a4 |
| ; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: frflags a4 |
| ; RV32IZFINXZDINX-NEXT: flt.d a6, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: fsflags a4 |
| ; RV32IZFINXZDINX-NEXT: or a4, a6, a5 |
| ; RV32IZFINXZDINX-NEXT: xori a4, a4, 1 |
| ; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: mv a0, a4 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmp_ueq: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: frflags a2 |
| ; RV64IZFINXZDINX-NEXT: flt.d a3, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: fsflags a2 |
| ; RV64IZFINXZDINX-NEXT: feq.d zero, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: frflags a2 |
| ; RV64IZFINXZDINX-NEXT: flt.d a4, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: fsflags a2 |
| ; RV64IZFINXZDINX-NEXT: or a3, a4, a3 |
| ; RV64IZFINXZDINX-NEXT: xori a2, a3, 1 |
| ; RV64IZFINXZDINX-NEXT: feq.d zero, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: mv a0, a2 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmp_ueq: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -32 |
| ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: mv s0, a3 |
| ; RV32I-NEXT: mv s1, a2 |
| ; RV32I-NEXT: mv s2, a1 |
| ; RV32I-NEXT: mv s3, a0 |
| ; RV32I-NEXT: call __eqdf2 |
| ; RV32I-NEXT: seqz s4, a0 |
| ; RV32I-NEXT: mv a0, s3 |
| ; RV32I-NEXT: mv a1, s2 |
| ; RV32I-NEXT: mv a2, s1 |
| ; RV32I-NEXT: mv a3, s0 |
| ; RV32I-NEXT: call __unorddf2 |
| ; RV32I-NEXT: snez a0, a0 |
| ; RV32I-NEXT: or a0, a0, s4 |
| ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 32 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmp_ueq: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -32 |
| ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: mv s0, a1 |
| ; RV64I-NEXT: mv s1, a0 |
| ; RV64I-NEXT: call __eqdf2 |
| ; RV64I-NEXT: seqz s2, a0 |
| ; RV64I-NEXT: mv a0, s1 |
| ; RV64I-NEXT: mv a1, s0 |
| ; RV64I-NEXT: call __unorddf2 |
| ; RV64I-NEXT: snez a0, a0 |
| ; RV64I-NEXT: or a0, a0, s2 |
| ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 32 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmp_ugt(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmp_ugt: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: frflags a0 |
| ; CHECKIFD-NEXT: fle.d a1, fa0, fa1 |
| ; CHECKIFD-NEXT: fsflags a0 |
| ; CHECKIFD-NEXT: xori a0, a1, 1 |
| ; CHECKIFD-NEXT: feq.d zero, fa0, fa1 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmp_ugt: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: frflags a4 |
| ; RV32IZFINXZDINX-NEXT: fle.d a5, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: fsflags a4 |
| ; RV32IZFINXZDINX-NEXT: xori a4, a5, 1 |
| ; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: mv a0, a4 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmp_ugt: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: frflags a2 |
| ; RV64IZFINXZDINX-NEXT: fle.d a3, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: fsflags a2 |
| ; RV64IZFINXZDINX-NEXT: xori a2, a3, 1 |
| ; RV64IZFINXZDINX-NEXT: feq.d zero, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: mv a0, a2 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmp_ugt: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __ledf2 |
| ; RV32I-NEXT: sgtz a0, a0 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmp_ugt: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __ledf2 |
| ; RV64I-NEXT: sgtz a0, a0 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmp_uge(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmp_uge: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: frflags a0 |
| ; CHECKIFD-NEXT: flt.d a1, fa0, fa1 |
| ; CHECKIFD-NEXT: fsflags a0 |
| ; CHECKIFD-NEXT: xori a0, a1, 1 |
| ; CHECKIFD-NEXT: feq.d zero, fa0, fa1 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmp_uge: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: frflags a4 |
| ; RV32IZFINXZDINX-NEXT: flt.d a5, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: fsflags a4 |
| ; RV32IZFINXZDINX-NEXT: xori a4, a5, 1 |
| ; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: mv a0, a4 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmp_uge: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: frflags a2 |
| ; RV64IZFINXZDINX-NEXT: flt.d a3, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: fsflags a2 |
| ; RV64IZFINXZDINX-NEXT: xori a2, a3, 1 |
| ; RV64IZFINXZDINX-NEXT: feq.d zero, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: mv a0, a2 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmp_uge: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __ltdf2 |
| ; RV32I-NEXT: slti a0, a0, 0 |
| ; RV32I-NEXT: xori a0, a0, 1 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmp_uge: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __ltdf2 |
| ; RV64I-NEXT: slti a0, a0, 0 |
| ; RV64I-NEXT: xori a0, a0, 1 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmp_ult(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmp_ult: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: frflags a0 |
| ; CHECKIFD-NEXT: fle.d a1, fa1, fa0 |
| ; CHECKIFD-NEXT: fsflags a0 |
| ; CHECKIFD-NEXT: xori a0, a1, 1 |
| ; CHECKIFD-NEXT: feq.d zero, fa1, fa0 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmp_ult: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: frflags a4 |
| ; RV32IZFINXZDINX-NEXT: fle.d a5, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: fsflags a4 |
| ; RV32IZFINXZDINX-NEXT: xori a4, a5, 1 |
| ; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: mv a0, a4 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmp_ult: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: frflags a2 |
| ; RV64IZFINXZDINX-NEXT: fle.d a3, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: fsflags a2 |
| ; RV64IZFINXZDINX-NEXT: xori a2, a3, 1 |
| ; RV64IZFINXZDINX-NEXT: feq.d zero, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: mv a0, a2 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmp_ult: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __gedf2 |
| ; RV32I-NEXT: slti a0, a0, 0 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmp_ult: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __gedf2 |
| ; RV64I-NEXT: slti a0, a0, 0 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmp_ule(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmp_ule: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: frflags a0 |
| ; CHECKIFD-NEXT: flt.d a1, fa1, fa0 |
| ; CHECKIFD-NEXT: fsflags a0 |
| ; CHECKIFD-NEXT: xori a0, a1, 1 |
| ; CHECKIFD-NEXT: feq.d zero, fa1, fa0 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmp_ule: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: frflags a4 |
| ; RV32IZFINXZDINX-NEXT: flt.d a5, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: fsflags a4 |
| ; RV32IZFINXZDINX-NEXT: xori a4, a5, 1 |
| ; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: mv a0, a4 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmp_ule: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: frflags a2 |
| ; RV64IZFINXZDINX-NEXT: flt.d a3, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: fsflags a2 |
| ; RV64IZFINXZDINX-NEXT: xori a2, a3, 1 |
| ; RV64IZFINXZDINX-NEXT: feq.d zero, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: mv a0, a2 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmp_ule: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __gtdf2 |
| ; RV32I-NEXT: slti a0, a0, 1 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmp_ule: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __gtdf2 |
| ; RV64I-NEXT: slti a0, a0, 1 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmp_une(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmp_une: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: feq.d a0, fa0, fa1 |
| ; CHECKIFD-NEXT: xori a0, a0, 1 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmp_une: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmp_une: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmp_une: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __nedf2 |
| ; RV32I-NEXT: snez a0, a0 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmp_une: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __nedf2 |
| ; RV64I-NEXT: snez a0, a0 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmp_uno(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmp_uno: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: feq.d a0, fa1, fa1 |
| ; CHECKIFD-NEXT: feq.d a1, fa0, fa0 |
| ; CHECKIFD-NEXT: and a0, a1, a0 |
| ; CHECKIFD-NEXT: xori a0, a0, 1 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmp_uno: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: feq.d a2, a2, a2 |
| ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 |
| ; RV32IZFINXZDINX-NEXT: and a0, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmp_uno: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: feq.d a1, a1, a1 |
| ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 |
| ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmp_uno: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __unorddf2 |
| ; RV32I-NEXT: snez a0, a0 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmp_uno: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __unorddf2 |
| ; RV64I-NEXT: snez a0, a0 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uno", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmps_oeq(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmps_oeq: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: fle.d a0, fa1, fa0 |
| ; CHECKIFD-NEXT: fle.d a1, fa0, fa1 |
| ; CHECKIFD-NEXT: and a0, a1, a0 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmps_oeq: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: fle.d a4, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: and a0, a0, a4 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmps_oeq: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: fle.d a2, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: fle.d a0, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: and a0, a0, a2 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmps_oeq: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __eqdf2 |
| ; RV32I-NEXT: seqz a0, a0 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmps_oeq: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __eqdf2 |
| ; RV64I-NEXT: seqz a0, a0 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata) |
| |
| define i32 @fcmps_ogt(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmps_ogt: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: flt.d a0, fa1, fa0 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmps_ogt: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmps_ogt: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: flt.d a0, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmps_ogt: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __gtdf2 |
| ; RV32I-NEXT: sgtz a0, a0 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmps_ogt: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __gtdf2 |
| ; RV64I-NEXT: sgtz a0, a0 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmps_oge(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmps_oge: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: fle.d a0, fa1, fa0 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmps_oge: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmps_oge: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: fle.d a0, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmps_oge: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __gedf2 |
| ; RV32I-NEXT: slti a0, a0, 0 |
| ; RV32I-NEXT: xori a0, a0, 1 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmps_oge: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __gedf2 |
| ; RV64I-NEXT: slti a0, a0, 0 |
| ; RV64I-NEXT: xori a0, a0, 1 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmps_olt(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmps_olt: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: flt.d a0, fa0, fa1 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmps_olt: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: flt.d a0, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmps_olt: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: flt.d a0, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmps_olt: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __ltdf2 |
| ; RV32I-NEXT: slti a0, a0, 0 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmps_olt: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __ltdf2 |
| ; RV64I-NEXT: slti a0, a0, 0 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmps_ole(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmps_ole: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: fle.d a0, fa0, fa1 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmps_ole: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmps_ole: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: fle.d a0, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmps_ole: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __ledf2 |
| ; RV32I-NEXT: slti a0, a0, 1 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmps_ole: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __ledf2 |
| ; RV64I-NEXT: slti a0, a0, 1 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmps_one(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmps_one: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: flt.d a0, fa0, fa1 |
| ; CHECKIFD-NEXT: flt.d a1, fa1, fa0 |
| ; CHECKIFD-NEXT: or a0, a1, a0 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmps_one: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: or a0, a0, a4 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmps_one: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: flt.d a2, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: flt.d a0, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: or a0, a0, a2 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmps_one: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -32 |
| ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: mv s0, a3 |
| ; RV32I-NEXT: mv s1, a2 |
| ; RV32I-NEXT: mv s2, a1 |
| ; RV32I-NEXT: mv s3, a0 |
| ; RV32I-NEXT: call __eqdf2 |
| ; RV32I-NEXT: snez s4, a0 |
| ; RV32I-NEXT: mv a0, s3 |
| ; RV32I-NEXT: mv a1, s2 |
| ; RV32I-NEXT: mv a2, s1 |
| ; RV32I-NEXT: mv a3, s0 |
| ; RV32I-NEXT: call __unorddf2 |
| ; RV32I-NEXT: seqz a0, a0 |
| ; RV32I-NEXT: and a0, a0, s4 |
| ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 32 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmps_one: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -32 |
| ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: mv s0, a1 |
| ; RV64I-NEXT: mv s1, a0 |
| ; RV64I-NEXT: call __eqdf2 |
| ; RV64I-NEXT: snez s2, a0 |
| ; RV64I-NEXT: mv a0, s1 |
| ; RV64I-NEXT: mv a1, s0 |
| ; RV64I-NEXT: call __unorddf2 |
| ; RV64I-NEXT: seqz a0, a0 |
| ; RV64I-NEXT: and a0, a0, s2 |
| ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 32 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmps_ord(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmps_ord: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: fle.d a0, fa1, fa1 |
| ; CHECKIFD-NEXT: fle.d a1, fa0, fa0 |
| ; CHECKIFD-NEXT: and a0, a1, a0 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmps_ord: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, a2 |
| ; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a0 |
| ; RV32IZFINXZDINX-NEXT: and a0, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmps_ord: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: fle.d a1, a1, a1 |
| ; RV64IZFINXZDINX-NEXT: fle.d a0, a0, a0 |
| ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmps_ord: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __unorddf2 |
| ; RV32I-NEXT: seqz a0, a0 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmps_ord: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __unorddf2 |
| ; RV64I-NEXT: seqz a0, a0 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ord", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmps_ueq(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmps_ueq: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: flt.d a0, fa0, fa1 |
| ; CHECKIFD-NEXT: flt.d a1, fa1, fa0 |
| ; CHECKIFD-NEXT: or a0, a1, a0 |
| ; CHECKIFD-NEXT: xori a0, a0, 1 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmps_ueq: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: or a0, a0, a4 |
| ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmps_ueq: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: flt.d a2, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: flt.d a0, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: or a0, a0, a2 |
| ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmps_ueq: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -32 |
| ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: mv s0, a3 |
| ; RV32I-NEXT: mv s1, a2 |
| ; RV32I-NEXT: mv s2, a1 |
| ; RV32I-NEXT: mv s3, a0 |
| ; RV32I-NEXT: call __eqdf2 |
| ; RV32I-NEXT: seqz s4, a0 |
| ; RV32I-NEXT: mv a0, s3 |
| ; RV32I-NEXT: mv a1, s2 |
| ; RV32I-NEXT: mv a2, s1 |
| ; RV32I-NEXT: mv a3, s0 |
| ; RV32I-NEXT: call __unorddf2 |
| ; RV32I-NEXT: snez a0, a0 |
| ; RV32I-NEXT: or a0, a0, s4 |
| ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 32 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmps_ueq: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -32 |
| ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: mv s0, a1 |
| ; RV64I-NEXT: mv s1, a0 |
| ; RV64I-NEXT: call __eqdf2 |
| ; RV64I-NEXT: seqz s2, a0 |
| ; RV64I-NEXT: mv a0, s1 |
| ; RV64I-NEXT: mv a1, s0 |
| ; RV64I-NEXT: call __unorddf2 |
| ; RV64I-NEXT: snez a0, a0 |
| ; RV64I-NEXT: or a0, a0, s2 |
| ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 32 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmps_ugt(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmps_ugt: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: fle.d a0, fa0, fa1 |
| ; CHECKIFD-NEXT: xori a0, a0, 1 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmps_ugt: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmps_ugt: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: fle.d a0, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmps_ugt: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __ledf2 |
| ; RV32I-NEXT: sgtz a0, a0 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmps_ugt: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __ledf2 |
| ; RV64I-NEXT: sgtz a0, a0 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmps_uge(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmps_uge: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: flt.d a0, fa0, fa1 |
| ; CHECKIFD-NEXT: xori a0, a0, 1 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmps_uge: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: flt.d a0, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmps_uge: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: flt.d a0, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmps_uge: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __ltdf2 |
| ; RV32I-NEXT: slti a0, a0, 0 |
| ; RV32I-NEXT: xori a0, a0, 1 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmps_uge: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __ltdf2 |
| ; RV64I-NEXT: slti a0, a0, 0 |
| ; RV64I-NEXT: xori a0, a0, 1 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmps_ult(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmps_ult: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: fle.d a0, fa1, fa0 |
| ; CHECKIFD-NEXT: xori a0, a0, 1 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmps_ult: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmps_ult: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: fle.d a0, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmps_ult: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __gedf2 |
| ; RV32I-NEXT: slti a0, a0, 0 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmps_ult: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __gedf2 |
| ; RV64I-NEXT: slti a0, a0, 0 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmps_ule(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmps_ule: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: flt.d a0, fa1, fa0 |
| ; CHECKIFD-NEXT: xori a0, a0, 1 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmps_ule: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmps_ule: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: flt.d a0, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmps_ule: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __gtdf2 |
| ; RV32I-NEXT: slti a0, a0, 1 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmps_ule: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __gtdf2 |
| ; RV64I-NEXT: slti a0, a0, 1 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmps_une(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmps_une: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: fle.d a0, fa1, fa0 |
| ; CHECKIFD-NEXT: fle.d a1, fa0, fa1 |
| ; CHECKIFD-NEXT: and a0, a1, a0 |
| ; CHECKIFD-NEXT: xori a0, a0, 1 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmps_une: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: fle.d a4, a2, a0 |
| ; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: and a0, a0, a4 |
| ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmps_une: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: fle.d a2, a1, a0 |
| ; RV64IZFINXZDINX-NEXT: fle.d a0, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: and a0, a0, a2 |
| ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmps_une: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __nedf2 |
| ; RV32I-NEXT: snez a0, a0 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmps_une: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __nedf2 |
| ; RV64I-NEXT: snez a0, a0 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |
| |
| define i32 @fcmps_uno(double %a, double %b) nounwind strictfp { |
| ; CHECKIFD-LABEL: fcmps_uno: |
| ; CHECKIFD: # %bb.0: |
| ; CHECKIFD-NEXT: fle.d a0, fa1, fa1 |
| ; CHECKIFD-NEXT: fle.d a1, fa0, fa0 |
| ; CHECKIFD-NEXT: and a0, a1, a0 |
| ; CHECKIFD-NEXT: xori a0, a0, 1 |
| ; CHECKIFD-NEXT: ret |
| ; |
| ; RV32IZFINXZDINX-LABEL: fcmps_uno: |
| ; RV32IZFINXZDINX: # %bb.0: |
| ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, a2 |
| ; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a0 |
| ; RV32IZFINXZDINX-NEXT: and a0, a0, a2 |
| ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV32IZFINXZDINX-NEXT: ret |
| ; |
| ; RV64IZFINXZDINX-LABEL: fcmps_uno: |
| ; RV64IZFINXZDINX: # %bb.0: |
| ; RV64IZFINXZDINX-NEXT: fle.d a1, a1, a1 |
| ; RV64IZFINXZDINX-NEXT: fle.d a0, a0, a0 |
| ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 |
| ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1 |
| ; RV64IZFINXZDINX-NEXT: ret |
| ; |
| ; RV32I-LABEL: fcmps_uno: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -16 |
| ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: call __unorddf2 |
| ; RV32I-NEXT: snez a0, a0 |
| ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 16 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: fcmps_uno: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __unorddf2 |
| ; RV64I-NEXT: snez a0, a0 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"uno", metadata !"fpexcept.strict") strictfp |
| %2 = zext i1 %1 to i32 |
| ret i32 %2 |
| } |