| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck -check-prefixes=RV32,RV32IF %s |
| ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck -check-prefixes=RV64,RV64IF %s |
| ; RUN: llc -mtriple=riscv32 -mattr=+f,+d -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck -check-prefixes=RV32,RV32IFD %s |
| ; RUN: llc -mtriple=riscv64 -mattr=+f,+d -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck -check-prefixes=RV64,RV64IFD %s |
| |
| ; i32 saturate |
| |
| define i32 @stest_f64i32(double %x) { |
| ; RV32-LABEL: stest_f64i32: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __fixdfdi@plt |
| ; RV32-NEXT: lui a2, 524288 |
| ; RV32-NEXT: addi a3, a2, -1 |
| ; RV32-NEXT: beqz a1, .LBB0_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: slti a4, a1, 0 |
| ; RV32-NEXT: beqz a4, .LBB0_3 |
| ; RV32-NEXT: j .LBB0_4 |
| ; RV32-NEXT: .LBB0_2: |
| ; RV32-NEXT: sltu a4, a0, a3 |
| ; RV32-NEXT: bnez a4, .LBB0_4 |
| ; RV32-NEXT: .LBB0_3: # %entry |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: mv a0, a3 |
| ; RV32-NEXT: .LBB0_4: # %entry |
| ; RV32-NEXT: li a3, -1 |
| ; RV32-NEXT: beq a1, a3, .LBB0_6 |
| ; RV32-NEXT: # %bb.5: # %entry |
| ; RV32-NEXT: slt a1, a3, a1 |
| ; RV32-NEXT: beqz a1, .LBB0_7 |
| ; RV32-NEXT: j .LBB0_8 |
| ; RV32-NEXT: .LBB0_6: |
| ; RV32-NEXT: sltu a1, a2, a0 |
| ; RV32-NEXT: bnez a1, .LBB0_8 |
| ; RV32-NEXT: .LBB0_7: # %entry |
| ; RV32-NEXT: lui a0, 524288 |
| ; RV32-NEXT: .LBB0_8: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64IF-LABEL: stest_f64i32: |
| ; RV64IF: # %bb.0: # %entry |
| ; RV64IF-NEXT: addi sp, sp, -16 |
| ; RV64IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IF-NEXT: .cfi_offset ra, -8 |
| ; RV64IF-NEXT: call __fixdfdi@plt |
| ; RV64IF-NEXT: lui a1, 524288 |
| ; RV64IF-NEXT: addiw a2, a1, -1 |
| ; RV64IF-NEXT: blt a0, a2, .LBB0_2 |
| ; RV64IF-NEXT: # %bb.1: # %entry |
| ; RV64IF-NEXT: mv a0, a2 |
| ; RV64IF-NEXT: .LBB0_2: # %entry |
| ; RV64IF-NEXT: blt a1, a0, .LBB0_4 |
| ; RV64IF-NEXT: # %bb.3: # %entry |
| ; RV64IF-NEXT: lui a0, 524288 |
| ; RV64IF-NEXT: .LBB0_4: # %entry |
| ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IF-NEXT: addi sp, sp, 16 |
| ; RV64IF-NEXT: ret |
| ; |
| ; RV64IFD-LABEL: stest_f64i32: |
| ; RV64IFD: # %bb.0: # %entry |
| ; RV64IFD-NEXT: fmv.d.x ft0, a0 |
| ; RV64IFD-NEXT: fcvt.l.d a0, ft0, rtz |
| ; RV64IFD-NEXT: lui a1, 524288 |
| ; RV64IFD-NEXT: addiw a2, a1, -1 |
| ; RV64IFD-NEXT: bge a0, a2, .LBB0_3 |
| ; RV64IFD-NEXT: # %bb.1: # %entry |
| ; RV64IFD-NEXT: bge a1, a0, .LBB0_4 |
| ; RV64IFD-NEXT: .LBB0_2: # %entry |
| ; RV64IFD-NEXT: ret |
| ; RV64IFD-NEXT: .LBB0_3: # %entry |
| ; RV64IFD-NEXT: mv a0, a2 |
| ; RV64IFD-NEXT: blt a1, a0, .LBB0_2 |
| ; RV64IFD-NEXT: .LBB0_4: # %entry |
| ; RV64IFD-NEXT: lui a0, 524288 |
| ; RV64IFD-NEXT: ret |
| entry: |
| %conv = fptosi double %x to i64 |
| %0 = icmp slt i64 %conv, 2147483647 |
| %spec.store.select = select i1 %0, i64 %conv, i64 2147483647 |
| %1 = icmp sgt i64 %spec.store.select, -2147483648 |
| %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 -2147483648 |
| %conv6 = trunc i64 %spec.store.select7 to i32 |
| ret i32 %conv6 |
| } |
| |
| define i32 @utest_f64i32(double %x) { |
| ; RV32-LABEL: utest_f64i32: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __fixunsdfdi@plt |
| ; RV32-NEXT: beqz a1, .LBB1_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: beqz a1, .LBB1_3 |
| ; RV32-NEXT: j .LBB1_4 |
| ; RV32-NEXT: .LBB1_2: |
| ; RV32-NEXT: addi a1, a0, 1 |
| ; RV32-NEXT: snez a1, a1 |
| ; RV32-NEXT: bnez a1, .LBB1_4 |
| ; RV32-NEXT: .LBB1_3: # %entry |
| ; RV32-NEXT: li a0, -1 |
| ; RV32-NEXT: .LBB1_4: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64IF-LABEL: utest_f64i32: |
| ; RV64IF: # %bb.0: # %entry |
| ; RV64IF-NEXT: addi sp, sp, -16 |
| ; RV64IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IF-NEXT: .cfi_offset ra, -8 |
| ; RV64IF-NEXT: call __fixunsdfdi@plt |
| ; RV64IF-NEXT: li a1, -1 |
| ; RV64IF-NEXT: srli a1, a1, 32 |
| ; RV64IF-NEXT: bltu a0, a1, .LBB1_2 |
| ; RV64IF-NEXT: # %bb.1: # %entry |
| ; RV64IF-NEXT: mv a0, a1 |
| ; RV64IF-NEXT: .LBB1_2: # %entry |
| ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IF-NEXT: addi sp, sp, 16 |
| ; RV64IF-NEXT: ret |
| ; |
| ; RV64IFD-LABEL: utest_f64i32: |
| ; RV64IFD: # %bb.0: # %entry |
| ; RV64IFD-NEXT: fmv.d.x ft0, a0 |
| ; RV64IFD-NEXT: fcvt.lu.d a0, ft0, rtz |
| ; RV64IFD-NEXT: li a1, -1 |
| ; RV64IFD-NEXT: srli a1, a1, 32 |
| ; RV64IFD-NEXT: bltu a0, a1, .LBB1_2 |
| ; RV64IFD-NEXT: # %bb.1: # %entry |
| ; RV64IFD-NEXT: mv a0, a1 |
| ; RV64IFD-NEXT: .LBB1_2: # %entry |
| ; RV64IFD-NEXT: ret |
| entry: |
| %conv = fptoui double %x to i64 |
| %0 = icmp ult i64 %conv, 4294967295 |
| %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 |
| %conv6 = trunc i64 %spec.store.select to i32 |
| ret i32 %conv6 |
| } |
| |
| define i32 @ustest_f64i32(double %x) { |
| ; RV32-LABEL: ustest_f64i32: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __fixdfdi@plt |
| ; RV32-NEXT: beqz a1, .LBB2_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: slti a2, a1, 0 |
| ; RV32-NEXT: beqz a2, .LBB2_3 |
| ; RV32-NEXT: j .LBB2_4 |
| ; RV32-NEXT: .LBB2_2: |
| ; RV32-NEXT: addi a2, a0, 1 |
| ; RV32-NEXT: snez a2, a2 |
| ; RV32-NEXT: bnez a2, .LBB2_4 |
| ; RV32-NEXT: .LBB2_3: # %entry |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: li a0, -1 |
| ; RV32-NEXT: .LBB2_4: # %entry |
| ; RV32-NEXT: beqz a1, .LBB2_6 |
| ; RV32-NEXT: # %bb.5: # %entry |
| ; RV32-NEXT: sgtz a1, a1 |
| ; RV32-NEXT: beqz a1, .LBB2_7 |
| ; RV32-NEXT: j .LBB2_8 |
| ; RV32-NEXT: .LBB2_6: |
| ; RV32-NEXT: snez a1, a0 |
| ; RV32-NEXT: bnez a1, .LBB2_8 |
| ; RV32-NEXT: .LBB2_7: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: .LBB2_8: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64IF-LABEL: ustest_f64i32: |
| ; RV64IF: # %bb.0: # %entry |
| ; RV64IF-NEXT: addi sp, sp, -16 |
| ; RV64IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IF-NEXT: .cfi_offset ra, -8 |
| ; RV64IF-NEXT: call __fixdfdi@plt |
| ; RV64IF-NEXT: li a1, -1 |
| ; RV64IF-NEXT: srli a1, a1, 32 |
| ; RV64IF-NEXT: blt a0, a1, .LBB2_2 |
| ; RV64IF-NEXT: # %bb.1: # %entry |
| ; RV64IF-NEXT: mv a0, a1 |
| ; RV64IF-NEXT: .LBB2_2: # %entry |
| ; RV64IF-NEXT: bgtz a0, .LBB2_4 |
| ; RV64IF-NEXT: # %bb.3: # %entry |
| ; RV64IF-NEXT: li a0, 0 |
| ; RV64IF-NEXT: .LBB2_4: # %entry |
| ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IF-NEXT: addi sp, sp, 16 |
| ; RV64IF-NEXT: ret |
| ; |
| ; RV64IFD-LABEL: ustest_f64i32: |
| ; RV64IFD: # %bb.0: # %entry |
| ; RV64IFD-NEXT: fmv.d.x ft0, a0 |
| ; RV64IFD-NEXT: fcvt.l.d a0, ft0, rtz |
| ; RV64IFD-NEXT: li a1, -1 |
| ; RV64IFD-NEXT: srli a1, a1, 32 |
| ; RV64IFD-NEXT: bge a0, a1, .LBB2_3 |
| ; RV64IFD-NEXT: # %bb.1: # %entry |
| ; RV64IFD-NEXT: blez a0, .LBB2_4 |
| ; RV64IFD-NEXT: .LBB2_2: # %entry |
| ; RV64IFD-NEXT: ret |
| ; RV64IFD-NEXT: .LBB2_3: # %entry |
| ; RV64IFD-NEXT: mv a0, a1 |
| ; RV64IFD-NEXT: bgtz a0, .LBB2_2 |
| ; RV64IFD-NEXT: .LBB2_4: # %entry |
| ; RV64IFD-NEXT: li a0, 0 |
| ; RV64IFD-NEXT: ret |
| entry: |
| %conv = fptosi double %x to i64 |
| %0 = icmp slt i64 %conv, 4294967295 |
| %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 |
| %1 = icmp sgt i64 %spec.store.select, 0 |
| %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 0 |
| %conv6 = trunc i64 %spec.store.select7 to i32 |
| ret i32 %conv6 |
| } |
| |
| define i32 @stest_f32i32(float %x) { |
| ; RV32-LABEL: stest_f32i32: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __fixsfdi@plt |
| ; RV32-NEXT: lui a2, 524288 |
| ; RV32-NEXT: addi a3, a2, -1 |
| ; RV32-NEXT: beqz a1, .LBB3_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: slti a4, a1, 0 |
| ; RV32-NEXT: beqz a4, .LBB3_3 |
| ; RV32-NEXT: j .LBB3_4 |
| ; RV32-NEXT: .LBB3_2: |
| ; RV32-NEXT: sltu a4, a0, a3 |
| ; RV32-NEXT: bnez a4, .LBB3_4 |
| ; RV32-NEXT: .LBB3_3: # %entry |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: mv a0, a3 |
| ; RV32-NEXT: .LBB3_4: # %entry |
| ; RV32-NEXT: li a3, -1 |
| ; RV32-NEXT: beq a1, a3, .LBB3_6 |
| ; RV32-NEXT: # %bb.5: # %entry |
| ; RV32-NEXT: slt a1, a3, a1 |
| ; RV32-NEXT: beqz a1, .LBB3_7 |
| ; RV32-NEXT: j .LBB3_8 |
| ; RV32-NEXT: .LBB3_6: |
| ; RV32-NEXT: sltu a1, a2, a0 |
| ; RV32-NEXT: bnez a1, .LBB3_8 |
| ; RV32-NEXT: .LBB3_7: # %entry |
| ; RV32-NEXT: lui a0, 524288 |
| ; RV32-NEXT: .LBB3_8: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: stest_f32i32: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.l.s a0, ft0, rtz |
| ; RV64-NEXT: lui a1, 524288 |
| ; RV64-NEXT: addiw a2, a1, -1 |
| ; RV64-NEXT: bge a0, a2, .LBB3_3 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: bge a1, a0, .LBB3_4 |
| ; RV64-NEXT: .LBB3_2: # %entry |
| ; RV64-NEXT: ret |
| ; RV64-NEXT: .LBB3_3: # %entry |
| ; RV64-NEXT: mv a0, a2 |
| ; RV64-NEXT: blt a1, a0, .LBB3_2 |
| ; RV64-NEXT: .LBB3_4: # %entry |
| ; RV64-NEXT: lui a0, 524288 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi float %x to i64 |
| %0 = icmp slt i64 %conv, 2147483647 |
| %spec.store.select = select i1 %0, i64 %conv, i64 2147483647 |
| %1 = icmp sgt i64 %spec.store.select, -2147483648 |
| %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 -2147483648 |
| %conv6 = trunc i64 %spec.store.select7 to i32 |
| ret i32 %conv6 |
| } |
| |
| define i32 @utest_f32i32(float %x) { |
| ; RV32-LABEL: utest_f32i32: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __fixunssfdi@plt |
| ; RV32-NEXT: beqz a1, .LBB4_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: beqz a1, .LBB4_3 |
| ; RV32-NEXT: j .LBB4_4 |
| ; RV32-NEXT: .LBB4_2: |
| ; RV32-NEXT: addi a1, a0, 1 |
| ; RV32-NEXT: snez a1, a1 |
| ; RV32-NEXT: bnez a1, .LBB4_4 |
| ; RV32-NEXT: .LBB4_3: # %entry |
| ; RV32-NEXT: li a0, -1 |
| ; RV32-NEXT: .LBB4_4: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: utest_f32i32: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.lu.s a0, ft0, rtz |
| ; RV64-NEXT: li a1, -1 |
| ; RV64-NEXT: srli a1, a1, 32 |
| ; RV64-NEXT: bltu a0, a1, .LBB4_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: .LBB4_2: # %entry |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptoui float %x to i64 |
| %0 = icmp ult i64 %conv, 4294967295 |
| %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 |
| %conv6 = trunc i64 %spec.store.select to i32 |
| ret i32 %conv6 |
| } |
| |
| define i32 @ustest_f32i32(float %x) { |
| ; RV32-LABEL: ustest_f32i32: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __fixsfdi@plt |
| ; RV32-NEXT: beqz a1, .LBB5_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: slti a2, a1, 0 |
| ; RV32-NEXT: beqz a2, .LBB5_3 |
| ; RV32-NEXT: j .LBB5_4 |
| ; RV32-NEXT: .LBB5_2: |
| ; RV32-NEXT: addi a2, a0, 1 |
| ; RV32-NEXT: snez a2, a2 |
| ; RV32-NEXT: bnez a2, .LBB5_4 |
| ; RV32-NEXT: .LBB5_3: # %entry |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: li a0, -1 |
| ; RV32-NEXT: .LBB5_4: # %entry |
| ; RV32-NEXT: beqz a1, .LBB5_6 |
| ; RV32-NEXT: # %bb.5: # %entry |
| ; RV32-NEXT: sgtz a1, a1 |
| ; RV32-NEXT: beqz a1, .LBB5_7 |
| ; RV32-NEXT: j .LBB5_8 |
| ; RV32-NEXT: .LBB5_6: |
| ; RV32-NEXT: snez a1, a0 |
| ; RV32-NEXT: bnez a1, .LBB5_8 |
| ; RV32-NEXT: .LBB5_7: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: .LBB5_8: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: ustest_f32i32: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.l.s a0, ft0, rtz |
| ; RV64-NEXT: li a1, -1 |
| ; RV64-NEXT: srli a1, a1, 32 |
| ; RV64-NEXT: bge a0, a1, .LBB5_3 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: blez a0, .LBB5_4 |
| ; RV64-NEXT: .LBB5_2: # %entry |
| ; RV64-NEXT: ret |
| ; RV64-NEXT: .LBB5_3: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: bgtz a0, .LBB5_2 |
| ; RV64-NEXT: .LBB5_4: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi float %x to i64 |
| %0 = icmp slt i64 %conv, 4294967295 |
| %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 |
| %1 = icmp sgt i64 %spec.store.select, 0 |
| %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 0 |
| %conv6 = trunc i64 %spec.store.select7 to i32 |
| ret i32 %conv6 |
| } |
| |
| define i32 @stest_f16i32(half %x) { |
| ; RV32-LABEL: stest_f16i32: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: call __fixsfdi@plt |
| ; RV32-NEXT: lui a2, 524288 |
| ; RV32-NEXT: addi a3, a2, -1 |
| ; RV32-NEXT: beqz a1, .LBB6_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: slti a4, a1, 0 |
| ; RV32-NEXT: beqz a4, .LBB6_3 |
| ; RV32-NEXT: j .LBB6_4 |
| ; RV32-NEXT: .LBB6_2: |
| ; RV32-NEXT: sltu a4, a0, a3 |
| ; RV32-NEXT: bnez a4, .LBB6_4 |
| ; RV32-NEXT: .LBB6_3: # %entry |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: mv a0, a3 |
| ; RV32-NEXT: .LBB6_4: # %entry |
| ; RV32-NEXT: li a3, -1 |
| ; RV32-NEXT: beq a1, a3, .LBB6_6 |
| ; RV32-NEXT: # %bb.5: # %entry |
| ; RV32-NEXT: slt a1, a3, a1 |
| ; RV32-NEXT: beqz a1, .LBB6_7 |
| ; RV32-NEXT: j .LBB6_8 |
| ; RV32-NEXT: .LBB6_6: |
| ; RV32-NEXT: sltu a1, a2, a0 |
| ; RV32-NEXT: bnez a1, .LBB6_8 |
| ; RV32-NEXT: .LBB6_7: # %entry |
| ; RV32-NEXT: lui a0, 524288 |
| ; RV32-NEXT: .LBB6_8: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: stest_f16i32: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.l.s a0, ft0, rtz |
| ; RV64-NEXT: lui a1, 524288 |
| ; RV64-NEXT: addiw a2, a1, -1 |
| ; RV64-NEXT: blt a0, a2, .LBB6_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a0, a2 |
| ; RV64-NEXT: .LBB6_2: # %entry |
| ; RV64-NEXT: blt a1, a0, .LBB6_4 |
| ; RV64-NEXT: # %bb.3: # %entry |
| ; RV64-NEXT: lui a0, 524288 |
| ; RV64-NEXT: .LBB6_4: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi half %x to i64 |
| %0 = icmp slt i64 %conv, 2147483647 |
| %spec.store.select = select i1 %0, i64 %conv, i64 2147483647 |
| %1 = icmp sgt i64 %spec.store.select, -2147483648 |
| %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 -2147483648 |
| %conv6 = trunc i64 %spec.store.select7 to i32 |
| ret i32 %conv6 |
| } |
| |
| define i32 @utesth_f16i32(half %x) { |
| ; RV32-LABEL: utesth_f16i32: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: call __fixunssfdi@plt |
| ; RV32-NEXT: beqz a1, .LBB7_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: beqz a1, .LBB7_3 |
| ; RV32-NEXT: j .LBB7_4 |
| ; RV32-NEXT: .LBB7_2: |
| ; RV32-NEXT: addi a1, a0, 1 |
| ; RV32-NEXT: snez a1, a1 |
| ; RV32-NEXT: bnez a1, .LBB7_4 |
| ; RV32-NEXT: .LBB7_3: # %entry |
| ; RV32-NEXT: li a0, -1 |
| ; RV32-NEXT: .LBB7_4: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: utesth_f16i32: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.lu.s a0, ft0, rtz |
| ; RV64-NEXT: li a1, -1 |
| ; RV64-NEXT: srli a1, a1, 32 |
| ; RV64-NEXT: bltu a0, a1, .LBB7_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: .LBB7_2: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptoui half %x to i64 |
| %0 = icmp ult i64 %conv, 4294967295 |
| %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 |
| %conv6 = trunc i64 %spec.store.select to i32 |
| ret i32 %conv6 |
| } |
| |
| define i32 @ustest_f16i32(half %x) { |
| ; RV32-LABEL: ustest_f16i32: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: call __fixsfdi@plt |
| ; RV32-NEXT: beqz a1, .LBB8_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: slti a2, a1, 0 |
| ; RV32-NEXT: beqz a2, .LBB8_3 |
| ; RV32-NEXT: j .LBB8_4 |
| ; RV32-NEXT: .LBB8_2: |
| ; RV32-NEXT: addi a2, a0, 1 |
| ; RV32-NEXT: snez a2, a2 |
| ; RV32-NEXT: bnez a2, .LBB8_4 |
| ; RV32-NEXT: .LBB8_3: # %entry |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: li a0, -1 |
| ; RV32-NEXT: .LBB8_4: # %entry |
| ; RV32-NEXT: beqz a1, .LBB8_6 |
| ; RV32-NEXT: # %bb.5: # %entry |
| ; RV32-NEXT: sgtz a1, a1 |
| ; RV32-NEXT: beqz a1, .LBB8_7 |
| ; RV32-NEXT: j .LBB8_8 |
| ; RV32-NEXT: .LBB8_6: |
| ; RV32-NEXT: snez a1, a0 |
| ; RV32-NEXT: bnez a1, .LBB8_8 |
| ; RV32-NEXT: .LBB8_7: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: .LBB8_8: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: ustest_f16i32: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.l.s a0, ft0, rtz |
| ; RV64-NEXT: li a1, -1 |
| ; RV64-NEXT: srli a1, a1, 32 |
| ; RV64-NEXT: blt a0, a1, .LBB8_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: .LBB8_2: # %entry |
| ; RV64-NEXT: bgtz a0, .LBB8_4 |
| ; RV64-NEXT: # %bb.3: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: .LBB8_4: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi half %x to i64 |
| %0 = icmp slt i64 %conv, 4294967295 |
| %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 |
| %1 = icmp sgt i64 %spec.store.select, 0 |
| %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 0 |
| %conv6 = trunc i64 %spec.store.select7 to i32 |
| ret i32 %conv6 |
| } |
| |
| ; i16 saturate |
| |
| define i16 @stest_f64i16(double %x) { |
| ; RV32IF-LABEL: stest_f64i16: |
| ; RV32IF: # %bb.0: # %entry |
| ; RV32IF-NEXT: addi sp, sp, -16 |
| ; RV32IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32IF-NEXT: .cfi_offset ra, -4 |
| ; RV32IF-NEXT: call __fixdfsi@plt |
| ; RV32IF-NEXT: lui a1, 8 |
| ; RV32IF-NEXT: addi a1, a1, -1 |
| ; RV32IF-NEXT: blt a0, a1, .LBB9_2 |
| ; RV32IF-NEXT: # %bb.1: # %entry |
| ; RV32IF-NEXT: mv a0, a1 |
| ; RV32IF-NEXT: .LBB9_2: # %entry |
| ; RV32IF-NEXT: lui a1, 1048568 |
| ; RV32IF-NEXT: blt a1, a0, .LBB9_4 |
| ; RV32IF-NEXT: # %bb.3: # %entry |
| ; RV32IF-NEXT: lui a0, 1048568 |
| ; RV32IF-NEXT: .LBB9_4: # %entry |
| ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32IF-NEXT: addi sp, sp, 16 |
| ; RV32IF-NEXT: ret |
| ; |
| ; RV64IF-LABEL: stest_f64i16: |
| ; RV64IF: # %bb.0: # %entry |
| ; RV64IF-NEXT: addi sp, sp, -16 |
| ; RV64IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IF-NEXT: .cfi_offset ra, -8 |
| ; RV64IF-NEXT: call __fixdfsi@plt |
| ; RV64IF-NEXT: lui a1, 8 |
| ; RV64IF-NEXT: addiw a1, a1, -1 |
| ; RV64IF-NEXT: blt a0, a1, .LBB9_2 |
| ; RV64IF-NEXT: # %bb.1: # %entry |
| ; RV64IF-NEXT: mv a0, a1 |
| ; RV64IF-NEXT: .LBB9_2: # %entry |
| ; RV64IF-NEXT: lui a1, 1048568 |
| ; RV64IF-NEXT: blt a1, a0, .LBB9_4 |
| ; RV64IF-NEXT: # %bb.3: # %entry |
| ; RV64IF-NEXT: lui a0, 1048568 |
| ; RV64IF-NEXT: .LBB9_4: # %entry |
| ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IF-NEXT: addi sp, sp, 16 |
| ; RV64IF-NEXT: ret |
| ; |
| ; RV32IFD-LABEL: stest_f64i16: |
| ; RV32IFD: # %bb.0: # %entry |
| ; RV32IFD-NEXT: addi sp, sp, -16 |
| ; RV32IFD-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IFD-NEXT: sw a0, 8(sp) |
| ; RV32IFD-NEXT: sw a1, 12(sp) |
| ; RV32IFD-NEXT: fld ft0, 8(sp) |
| ; RV32IFD-NEXT: fcvt.w.d a0, ft0, rtz |
| ; RV32IFD-NEXT: lui a1, 8 |
| ; RV32IFD-NEXT: addi a1, a1, -1 |
| ; RV32IFD-NEXT: bge a0, a1, .LBB9_3 |
| ; RV32IFD-NEXT: # %bb.1: # %entry |
| ; RV32IFD-NEXT: lui a1, 1048568 |
| ; RV32IFD-NEXT: bge a1, a0, .LBB9_4 |
| ; RV32IFD-NEXT: .LBB9_2: # %entry |
| ; RV32IFD-NEXT: addi sp, sp, 16 |
| ; RV32IFD-NEXT: ret |
| ; RV32IFD-NEXT: .LBB9_3: # %entry |
| ; RV32IFD-NEXT: mv a0, a1 |
| ; RV32IFD-NEXT: lui a1, 1048568 |
| ; RV32IFD-NEXT: blt a1, a0, .LBB9_2 |
| ; RV32IFD-NEXT: .LBB9_4: # %entry |
| ; RV32IFD-NEXT: lui a0, 1048568 |
| ; RV32IFD-NEXT: addi sp, sp, 16 |
| ; RV32IFD-NEXT: ret |
| ; |
| ; RV64IFD-LABEL: stest_f64i16: |
| ; RV64IFD: # %bb.0: # %entry |
| ; RV64IFD-NEXT: fmv.d.x ft0, a0 |
| ; RV64IFD-NEXT: fcvt.w.d a0, ft0, rtz |
| ; RV64IFD-NEXT: lui a1, 8 |
| ; RV64IFD-NEXT: addiw a1, a1, -1 |
| ; RV64IFD-NEXT: bge a0, a1, .LBB9_3 |
| ; RV64IFD-NEXT: # %bb.1: # %entry |
| ; RV64IFD-NEXT: lui a1, 1048568 |
| ; RV64IFD-NEXT: bge a1, a0, .LBB9_4 |
| ; RV64IFD-NEXT: .LBB9_2: # %entry |
| ; RV64IFD-NEXT: ret |
| ; RV64IFD-NEXT: .LBB9_3: # %entry |
| ; RV64IFD-NEXT: mv a0, a1 |
| ; RV64IFD-NEXT: lui a1, 1048568 |
| ; RV64IFD-NEXT: blt a1, a0, .LBB9_2 |
| ; RV64IFD-NEXT: .LBB9_4: # %entry |
| ; RV64IFD-NEXT: lui a0, 1048568 |
| ; RV64IFD-NEXT: ret |
| entry: |
| %conv = fptosi double %x to i32 |
| %0 = icmp slt i32 %conv, 32767 |
| %spec.store.select = select i1 %0, i32 %conv, i32 32767 |
| %1 = icmp sgt i32 %spec.store.select, -32768 |
| %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 -32768 |
| %conv6 = trunc i32 %spec.store.select7 to i16 |
| ret i16 %conv6 |
| } |
| |
| define i16 @utest_f64i16(double %x) { |
| ; RV32IF-LABEL: utest_f64i16: |
| ; RV32IF: # %bb.0: # %entry |
| ; RV32IF-NEXT: addi sp, sp, -16 |
| ; RV32IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32IF-NEXT: .cfi_offset ra, -4 |
| ; RV32IF-NEXT: call __fixunsdfsi@plt |
| ; RV32IF-NEXT: lui a1, 16 |
| ; RV32IF-NEXT: addi a1, a1, -1 |
| ; RV32IF-NEXT: bltu a0, a1, .LBB10_2 |
| ; RV32IF-NEXT: # %bb.1: # %entry |
| ; RV32IF-NEXT: mv a0, a1 |
| ; RV32IF-NEXT: .LBB10_2: # %entry |
| ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32IF-NEXT: addi sp, sp, 16 |
| ; RV32IF-NEXT: ret |
| ; |
| ; RV64IF-LABEL: utest_f64i16: |
| ; RV64IF: # %bb.0: # %entry |
| ; RV64IF-NEXT: addi sp, sp, -16 |
| ; RV64IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IF-NEXT: .cfi_offset ra, -8 |
| ; RV64IF-NEXT: call __fixunsdfsi@plt |
| ; RV64IF-NEXT: lui a1, 16 |
| ; RV64IF-NEXT: addiw a1, a1, -1 |
| ; RV64IF-NEXT: bltu a0, a1, .LBB10_2 |
| ; RV64IF-NEXT: # %bb.1: # %entry |
| ; RV64IF-NEXT: mv a0, a1 |
| ; RV64IF-NEXT: .LBB10_2: # %entry |
| ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IF-NEXT: addi sp, sp, 16 |
| ; RV64IF-NEXT: ret |
| ; |
| ; RV32IFD-LABEL: utest_f64i16: |
| ; RV32IFD: # %bb.0: # %entry |
| ; RV32IFD-NEXT: addi sp, sp, -16 |
| ; RV32IFD-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IFD-NEXT: sw a0, 8(sp) |
| ; RV32IFD-NEXT: sw a1, 12(sp) |
| ; RV32IFD-NEXT: fld ft0, 8(sp) |
| ; RV32IFD-NEXT: fcvt.wu.d a0, ft0, rtz |
| ; RV32IFD-NEXT: lui a1, 16 |
| ; RV32IFD-NEXT: addi a1, a1, -1 |
| ; RV32IFD-NEXT: bltu a0, a1, .LBB10_2 |
| ; RV32IFD-NEXT: # %bb.1: # %entry |
| ; RV32IFD-NEXT: mv a0, a1 |
| ; RV32IFD-NEXT: .LBB10_2: # %entry |
| ; RV32IFD-NEXT: addi sp, sp, 16 |
| ; RV32IFD-NEXT: ret |
| ; |
| ; RV64IFD-LABEL: utest_f64i16: |
| ; RV64IFD: # %bb.0: # %entry |
| ; RV64IFD-NEXT: fmv.d.x ft0, a0 |
| ; RV64IFD-NEXT: fcvt.wu.d a0, ft0, rtz |
| ; RV64IFD-NEXT: lui a1, 16 |
| ; RV64IFD-NEXT: addiw a1, a1, -1 |
| ; RV64IFD-NEXT: bltu a0, a1, .LBB10_2 |
| ; RV64IFD-NEXT: # %bb.1: # %entry |
| ; RV64IFD-NEXT: mv a0, a1 |
| ; RV64IFD-NEXT: .LBB10_2: # %entry |
| ; RV64IFD-NEXT: ret |
| entry: |
| %conv = fptoui double %x to i32 |
| %0 = icmp ult i32 %conv, 65535 |
| %spec.store.select = select i1 %0, i32 %conv, i32 65535 |
| %conv6 = trunc i32 %spec.store.select to i16 |
| ret i16 %conv6 |
| } |
| |
| define i16 @ustest_f64i16(double %x) { |
| ; RV32IF-LABEL: ustest_f64i16: |
| ; RV32IF: # %bb.0: # %entry |
| ; RV32IF-NEXT: addi sp, sp, -16 |
| ; RV32IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32IF-NEXT: .cfi_offset ra, -4 |
| ; RV32IF-NEXT: call __fixdfsi@plt |
| ; RV32IF-NEXT: lui a1, 16 |
| ; RV32IF-NEXT: addi a1, a1, -1 |
| ; RV32IF-NEXT: blt a0, a1, .LBB11_2 |
| ; RV32IF-NEXT: # %bb.1: # %entry |
| ; RV32IF-NEXT: mv a0, a1 |
| ; RV32IF-NEXT: .LBB11_2: # %entry |
| ; RV32IF-NEXT: bgtz a0, .LBB11_4 |
| ; RV32IF-NEXT: # %bb.3: # %entry |
| ; RV32IF-NEXT: li a0, 0 |
| ; RV32IF-NEXT: .LBB11_4: # %entry |
| ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32IF-NEXT: addi sp, sp, 16 |
| ; RV32IF-NEXT: ret |
| ; |
| ; RV64IF-LABEL: ustest_f64i16: |
| ; RV64IF: # %bb.0: # %entry |
| ; RV64IF-NEXT: addi sp, sp, -16 |
| ; RV64IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IF-NEXT: .cfi_offset ra, -8 |
| ; RV64IF-NEXT: call __fixdfsi@plt |
| ; RV64IF-NEXT: lui a1, 16 |
| ; RV64IF-NEXT: addiw a1, a1, -1 |
| ; RV64IF-NEXT: blt a0, a1, .LBB11_2 |
| ; RV64IF-NEXT: # %bb.1: # %entry |
| ; RV64IF-NEXT: mv a0, a1 |
| ; RV64IF-NEXT: .LBB11_2: # %entry |
| ; RV64IF-NEXT: bgtz a0, .LBB11_4 |
| ; RV64IF-NEXT: # %bb.3: # %entry |
| ; RV64IF-NEXT: li a0, 0 |
| ; RV64IF-NEXT: .LBB11_4: # %entry |
| ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IF-NEXT: addi sp, sp, 16 |
| ; RV64IF-NEXT: ret |
| ; |
| ; RV32IFD-LABEL: ustest_f64i16: |
| ; RV32IFD: # %bb.0: # %entry |
| ; RV32IFD-NEXT: addi sp, sp, -16 |
| ; RV32IFD-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IFD-NEXT: sw a0, 8(sp) |
| ; RV32IFD-NEXT: sw a1, 12(sp) |
| ; RV32IFD-NEXT: fld ft0, 8(sp) |
| ; RV32IFD-NEXT: fcvt.w.d a0, ft0, rtz |
| ; RV32IFD-NEXT: lui a1, 16 |
| ; RV32IFD-NEXT: addi a1, a1, -1 |
| ; RV32IFD-NEXT: bge a0, a1, .LBB11_3 |
| ; RV32IFD-NEXT: # %bb.1: # %entry |
| ; RV32IFD-NEXT: blez a0, .LBB11_4 |
| ; RV32IFD-NEXT: .LBB11_2: # %entry |
| ; RV32IFD-NEXT: addi sp, sp, 16 |
| ; RV32IFD-NEXT: ret |
| ; RV32IFD-NEXT: .LBB11_3: # %entry |
| ; RV32IFD-NEXT: mv a0, a1 |
| ; RV32IFD-NEXT: bgtz a0, .LBB11_2 |
| ; RV32IFD-NEXT: .LBB11_4: # %entry |
| ; RV32IFD-NEXT: li a0, 0 |
| ; RV32IFD-NEXT: addi sp, sp, 16 |
| ; RV32IFD-NEXT: ret |
| ; |
| ; RV64IFD-LABEL: ustest_f64i16: |
| ; RV64IFD: # %bb.0: # %entry |
| ; RV64IFD-NEXT: fmv.d.x ft0, a0 |
| ; RV64IFD-NEXT: fcvt.w.d a0, ft0, rtz |
| ; RV64IFD-NEXT: lui a1, 16 |
| ; RV64IFD-NEXT: addiw a1, a1, -1 |
| ; RV64IFD-NEXT: bge a0, a1, .LBB11_3 |
| ; RV64IFD-NEXT: # %bb.1: # %entry |
| ; RV64IFD-NEXT: blez a0, .LBB11_4 |
| ; RV64IFD-NEXT: .LBB11_2: # %entry |
| ; RV64IFD-NEXT: ret |
| ; RV64IFD-NEXT: .LBB11_3: # %entry |
| ; RV64IFD-NEXT: mv a0, a1 |
| ; RV64IFD-NEXT: bgtz a0, .LBB11_2 |
| ; RV64IFD-NEXT: .LBB11_4: # %entry |
| ; RV64IFD-NEXT: li a0, 0 |
| ; RV64IFD-NEXT: ret |
| entry: |
| %conv = fptosi double %x to i32 |
| %0 = icmp slt i32 %conv, 65535 |
| %spec.store.select = select i1 %0, i32 %conv, i32 65535 |
| %1 = icmp sgt i32 %spec.store.select, 0 |
| %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 0 |
| %conv6 = trunc i32 %spec.store.select7 to i16 |
| ret i16 %conv6 |
| } |
| |
| define i16 @stest_f32i16(float %x) { |
| ; RV32-LABEL: stest_f32i16: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: fmv.w.x ft0, a0 |
| ; RV32-NEXT: fcvt.w.s a0, ft0, rtz |
| ; RV32-NEXT: lui a1, 8 |
| ; RV32-NEXT: addi a1, a1, -1 |
| ; RV32-NEXT: bge a0, a1, .LBB12_3 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: lui a1, 1048568 |
| ; RV32-NEXT: bge a1, a0, .LBB12_4 |
| ; RV32-NEXT: .LBB12_2: # %entry |
| ; RV32-NEXT: ret |
| ; RV32-NEXT: .LBB12_3: # %entry |
| ; RV32-NEXT: mv a0, a1 |
| ; RV32-NEXT: lui a1, 1048568 |
| ; RV32-NEXT: blt a1, a0, .LBB12_2 |
| ; RV32-NEXT: .LBB12_4: # %entry |
| ; RV32-NEXT: lui a0, 1048568 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: stest_f32i16: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.w.s a0, ft0, rtz |
| ; RV64-NEXT: lui a1, 8 |
| ; RV64-NEXT: addiw a1, a1, -1 |
| ; RV64-NEXT: bge a0, a1, .LBB12_3 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: lui a1, 1048568 |
| ; RV64-NEXT: bge a1, a0, .LBB12_4 |
| ; RV64-NEXT: .LBB12_2: # %entry |
| ; RV64-NEXT: ret |
| ; RV64-NEXT: .LBB12_3: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: lui a1, 1048568 |
| ; RV64-NEXT: blt a1, a0, .LBB12_2 |
| ; RV64-NEXT: .LBB12_4: # %entry |
| ; RV64-NEXT: lui a0, 1048568 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi float %x to i32 |
| %0 = icmp slt i32 %conv, 32767 |
| %spec.store.select = select i1 %0, i32 %conv, i32 32767 |
| %1 = icmp sgt i32 %spec.store.select, -32768 |
| %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 -32768 |
| %conv6 = trunc i32 %spec.store.select7 to i16 |
| ret i16 %conv6 |
| } |
| |
| define i16 @utest_f32i16(float %x) { |
| ; RV32-LABEL: utest_f32i16: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: fmv.w.x ft0, a0 |
| ; RV32-NEXT: fcvt.wu.s a0, ft0, rtz |
| ; RV32-NEXT: lui a1, 16 |
| ; RV32-NEXT: addi a1, a1, -1 |
| ; RV32-NEXT: bltu a0, a1, .LBB13_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: mv a0, a1 |
| ; RV32-NEXT: .LBB13_2: # %entry |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: utest_f32i16: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.wu.s a0, ft0, rtz |
| ; RV64-NEXT: lui a1, 16 |
| ; RV64-NEXT: addiw a1, a1, -1 |
| ; RV64-NEXT: bltu a0, a1, .LBB13_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: .LBB13_2: # %entry |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptoui float %x to i32 |
| %0 = icmp ult i32 %conv, 65535 |
| %spec.store.select = select i1 %0, i32 %conv, i32 65535 |
| %conv6 = trunc i32 %spec.store.select to i16 |
| ret i16 %conv6 |
| } |
| |
| define i16 @ustest_f32i16(float %x) { |
| ; RV32-LABEL: ustest_f32i16: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: fmv.w.x ft0, a0 |
| ; RV32-NEXT: fcvt.w.s a0, ft0, rtz |
| ; RV32-NEXT: lui a1, 16 |
| ; RV32-NEXT: addi a1, a1, -1 |
| ; RV32-NEXT: bge a0, a1, .LBB14_3 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: blez a0, .LBB14_4 |
| ; RV32-NEXT: .LBB14_2: # %entry |
| ; RV32-NEXT: ret |
| ; RV32-NEXT: .LBB14_3: # %entry |
| ; RV32-NEXT: mv a0, a1 |
| ; RV32-NEXT: bgtz a0, .LBB14_2 |
| ; RV32-NEXT: .LBB14_4: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: ustest_f32i16: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.w.s a0, ft0, rtz |
| ; RV64-NEXT: lui a1, 16 |
| ; RV64-NEXT: addiw a1, a1, -1 |
| ; RV64-NEXT: bge a0, a1, .LBB14_3 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: blez a0, .LBB14_4 |
| ; RV64-NEXT: .LBB14_2: # %entry |
| ; RV64-NEXT: ret |
| ; RV64-NEXT: .LBB14_3: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: bgtz a0, .LBB14_2 |
| ; RV64-NEXT: .LBB14_4: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi float %x to i32 |
| %0 = icmp slt i32 %conv, 65535 |
| %spec.store.select = select i1 %0, i32 %conv, i32 65535 |
| %1 = icmp sgt i32 %spec.store.select, 0 |
| %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 0 |
| %conv6 = trunc i32 %spec.store.select7 to i16 |
| ret i16 %conv6 |
| } |
| |
| define i16 @stest_f16i16(half %x) { |
| ; RV32-LABEL: stest_f16i16: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: fmv.w.x ft0, a0 |
| ; RV32-NEXT: fcvt.w.s a0, ft0, rtz |
| ; RV32-NEXT: lui a1, 8 |
| ; RV32-NEXT: addi a1, a1, -1 |
| ; RV32-NEXT: blt a0, a1, .LBB15_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: mv a0, a1 |
| ; RV32-NEXT: .LBB15_2: # %entry |
| ; RV32-NEXT: lui a1, 1048568 |
| ; RV32-NEXT: blt a1, a0, .LBB15_4 |
| ; RV32-NEXT: # %bb.3: # %entry |
| ; RV32-NEXT: lui a0, 1048568 |
| ; RV32-NEXT: .LBB15_4: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: stest_f16i16: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.l.s a0, ft0, rtz |
| ; RV64-NEXT: lui a1, 8 |
| ; RV64-NEXT: addiw a1, a1, -1 |
| ; RV64-NEXT: blt a0, a1, .LBB15_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: .LBB15_2: # %entry |
| ; RV64-NEXT: lui a1, 1048568 |
| ; RV64-NEXT: blt a1, a0, .LBB15_4 |
| ; RV64-NEXT: # %bb.3: # %entry |
| ; RV64-NEXT: lui a0, 1048568 |
| ; RV64-NEXT: .LBB15_4: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi half %x to i32 |
| %0 = icmp slt i32 %conv, 32767 |
| %spec.store.select = select i1 %0, i32 %conv, i32 32767 |
| %1 = icmp sgt i32 %spec.store.select, -32768 |
| %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 -32768 |
| %conv6 = trunc i32 %spec.store.select7 to i16 |
| ret i16 %conv6 |
| } |
| |
| define i16 @utesth_f16i16(half %x) { |
| ; RV32-LABEL: utesth_f16i16: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: fmv.w.x ft0, a0 |
| ; RV32-NEXT: fcvt.wu.s a0, ft0, rtz |
| ; RV32-NEXT: lui a1, 16 |
| ; RV32-NEXT: addi a1, a1, -1 |
| ; RV32-NEXT: bltu a0, a1, .LBB16_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: mv a0, a1 |
| ; RV32-NEXT: .LBB16_2: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: utesth_f16i16: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.lu.s a0, ft0, rtz |
| ; RV64-NEXT: sext.w a2, a0 |
| ; RV64-NEXT: lui a1, 16 |
| ; RV64-NEXT: addiw a1, a1, -1 |
| ; RV64-NEXT: bltu a2, a1, .LBB16_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: .LBB16_2: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptoui half %x to i32 |
| %0 = icmp ult i32 %conv, 65535 |
| %spec.store.select = select i1 %0, i32 %conv, i32 65535 |
| %conv6 = trunc i32 %spec.store.select to i16 |
| ret i16 %conv6 |
| } |
| |
| define i16 @ustest_f16i16(half %x) { |
| ; RV32-LABEL: ustest_f16i16: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: fmv.w.x ft0, a0 |
| ; RV32-NEXT: fcvt.w.s a0, ft0, rtz |
| ; RV32-NEXT: lui a1, 16 |
| ; RV32-NEXT: addi a1, a1, -1 |
| ; RV32-NEXT: blt a0, a1, .LBB17_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: mv a0, a1 |
| ; RV32-NEXT: .LBB17_2: # %entry |
| ; RV32-NEXT: bgtz a0, .LBB17_4 |
| ; RV32-NEXT: # %bb.3: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: .LBB17_4: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: ustest_f16i16: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.l.s a0, ft0, rtz |
| ; RV64-NEXT: lui a1, 16 |
| ; RV64-NEXT: addiw a1, a1, -1 |
| ; RV64-NEXT: blt a0, a1, .LBB17_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: .LBB17_2: # %entry |
| ; RV64-NEXT: bgtz a0, .LBB17_4 |
| ; RV64-NEXT: # %bb.3: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: .LBB17_4: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi half %x to i32 |
| %0 = icmp slt i32 %conv, 65535 |
| %spec.store.select = select i1 %0, i32 %conv, i32 65535 |
| %1 = icmp sgt i32 %spec.store.select, 0 |
| %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 0 |
| %conv6 = trunc i32 %spec.store.select7 to i16 |
| ret i16 %conv6 |
| } |
| |
| ; i64 saturate |
| |
| define i64 @stest_f64i64(double %x) { |
| ; RV32-LABEL: stest_f64i64: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a2, a1 |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixdfti@plt |
| ; RV32-NEXT: lw a2, 20(sp) |
| ; RV32-NEXT: lw t0, 16(sp) |
| ; RV32-NEXT: lw a1, 12(sp) |
| ; RV32-NEXT: lw a0, 8(sp) |
| ; RV32-NEXT: lui a7, 524288 |
| ; RV32-NEXT: addi a5, a7, -1 |
| ; RV32-NEXT: beq a1, a5, .LBB18_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: sltu a4, a1, a5 |
| ; RV32-NEXT: or a3, t0, a2 |
| ; RV32-NEXT: bnez a3, .LBB18_3 |
| ; RV32-NEXT: j .LBB18_4 |
| ; RV32-NEXT: .LBB18_2: |
| ; RV32-NEXT: addi a4, a0, 1 |
| ; RV32-NEXT: snez a4, a4 |
| ; RV32-NEXT: or a3, t0, a2 |
| ; RV32-NEXT: beqz a3, .LBB18_4 |
| ; RV32-NEXT: .LBB18_3: # %entry |
| ; RV32-NEXT: slti a4, a2, 0 |
| ; RV32-NEXT: .LBB18_4: # %entry |
| ; RV32-NEXT: li a6, -1 |
| ; RV32-NEXT: beqz a4, .LBB18_7 |
| ; RV32-NEXT: # %bb.5: # %entry |
| ; RV32-NEXT: beq a1, a7, .LBB18_8 |
| ; RV32-NEXT: .LBB18_6: # %entry |
| ; RV32-NEXT: sltu a4, a7, a1 |
| ; RV32-NEXT: and a3, t0, a2 |
| ; RV32-NEXT: bne a3, a6, .LBB18_9 |
| ; RV32-NEXT: j .LBB18_10 |
| ; RV32-NEXT: .LBB18_7: # %entry |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: li t0, 0 |
| ; RV32-NEXT: li a0, -1 |
| ; RV32-NEXT: mv a1, a5 |
| ; RV32-NEXT: bne a1, a7, .LBB18_6 |
| ; RV32-NEXT: .LBB18_8: |
| ; RV32-NEXT: snez a4, a0 |
| ; RV32-NEXT: and a3, t0, a2 |
| ; RV32-NEXT: beq a3, a6, .LBB18_10 |
| ; RV32-NEXT: .LBB18_9: # %entry |
| ; RV32-NEXT: slt a4, a6, a2 |
| ; RV32-NEXT: .LBB18_10: # %entry |
| ; RV32-NEXT: bnez a4, .LBB18_12 |
| ; RV32-NEXT: # %bb.11: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: lui a1, 524288 |
| ; RV32-NEXT: .LBB18_12: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: stest_f64i64: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __fixdfti@plt |
| ; RV64-NEXT: li a2, -1 |
| ; RV64-NEXT: srli a3, a2, 1 |
| ; RV64-NEXT: beqz a1, .LBB18_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: slti a4, a1, 0 |
| ; RV64-NEXT: beqz a4, .LBB18_3 |
| ; RV64-NEXT: j .LBB18_4 |
| ; RV64-NEXT: .LBB18_2: |
| ; RV64-NEXT: sltu a4, a0, a3 |
| ; RV64-NEXT: bnez a4, .LBB18_4 |
| ; RV64-NEXT: .LBB18_3: # %entry |
| ; RV64-NEXT: li a1, 0 |
| ; RV64-NEXT: mv a0, a3 |
| ; RV64-NEXT: .LBB18_4: # %entry |
| ; RV64-NEXT: slli a3, a2, 63 |
| ; RV64-NEXT: beq a1, a2, .LBB18_6 |
| ; RV64-NEXT: # %bb.5: # %entry |
| ; RV64-NEXT: slt a1, a2, a1 |
| ; RV64-NEXT: beqz a1, .LBB18_7 |
| ; RV64-NEXT: j .LBB18_8 |
| ; RV64-NEXT: .LBB18_6: |
| ; RV64-NEXT: sltu a1, a3, a0 |
| ; RV64-NEXT: bnez a1, .LBB18_8 |
| ; RV64-NEXT: .LBB18_7: # %entry |
| ; RV64-NEXT: mv a0, a3 |
| ; RV64-NEXT: .LBB18_8: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi double %x to i128 |
| %0 = icmp slt i128 %conv, 9223372036854775807 |
| %spec.store.select = select i1 %0, i128 %conv, i128 9223372036854775807 |
| %1 = icmp sgt i128 %spec.store.select, -9223372036854775808 |
| %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 -9223372036854775808 |
| %conv6 = trunc i128 %spec.store.select7 to i64 |
| ret i64 %conv6 |
| } |
| |
| define i64 @utest_f64i64(double %x) { |
| ; RV32-LABEL: utest_f64i64: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a2, a1 |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixunsdfti@plt |
| ; RV32-NEXT: lw a0, 20(sp) |
| ; RV32-NEXT: lw a1, 16(sp) |
| ; RV32-NEXT: beqz a0, .LBB19_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: j .LBB19_3 |
| ; RV32-NEXT: .LBB19_2: |
| ; RV32-NEXT: seqz a2, a1 |
| ; RV32-NEXT: .LBB19_3: # %entry |
| ; RV32-NEXT: xori a1, a1, 1 |
| ; RV32-NEXT: or a1, a1, a0 |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: beqz a1, .LBB19_5 |
| ; RV32-NEXT: # %bb.4: # %entry |
| ; RV32-NEXT: mv a0, a2 |
| ; RV32-NEXT: .LBB19_5: # %entry |
| ; RV32-NEXT: bnez a0, .LBB19_7 |
| ; RV32-NEXT: # %bb.6: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: j .LBB19_8 |
| ; RV32-NEXT: .LBB19_7: |
| ; RV32-NEXT: lw a1, 12(sp) |
| ; RV32-NEXT: lw a0, 8(sp) |
| ; RV32-NEXT: .LBB19_8: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: utest_f64i64: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __fixunsdfti@plt |
| ; RV64-NEXT: beqz a1, .LBB19_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: .LBB19_2: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptoui double %x to i128 |
| %0 = icmp ult i128 %conv, 18446744073709551616 |
| %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 |
| %conv6 = trunc i128 %spec.store.select to i64 |
| ret i64 %conv6 |
| } |
| |
| define i64 @ustest_f64i64(double %x) { |
| ; RV32-LABEL: ustest_f64i64: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a2, a1 |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixdfti@plt |
| ; RV32-NEXT: lw a2, 20(sp) |
| ; RV32-NEXT: lw a3, 16(sp) |
| ; RV32-NEXT: beqz a2, .LBB20_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: slti a0, a2, 0 |
| ; RV32-NEXT: j .LBB20_3 |
| ; RV32-NEXT: .LBB20_2: |
| ; RV32-NEXT: seqz a0, a3 |
| ; RV32-NEXT: .LBB20_3: # %entry |
| ; RV32-NEXT: xori a1, a3, 1 |
| ; RV32-NEXT: or a4, a1, a2 |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: beqz a4, .LBB20_5 |
| ; RV32-NEXT: # %bb.4: # %entry |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: .LBB20_5: # %entry |
| ; RV32-NEXT: bnez a1, .LBB20_9 |
| ; RV32-NEXT: # %bb.6: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: li a3, 1 |
| ; RV32-NEXT: bnez a2, .LBB20_10 |
| ; RV32-NEXT: .LBB20_7: |
| ; RV32-NEXT: snez a4, a3 |
| ; RV32-NEXT: bnez a1, .LBB20_11 |
| ; RV32-NEXT: .LBB20_8: |
| ; RV32-NEXT: snez a5, a0 |
| ; RV32-NEXT: or a2, a3, a2 |
| ; RV32-NEXT: bnez a2, .LBB20_12 |
| ; RV32-NEXT: j .LBB20_13 |
| ; RV32-NEXT: .LBB20_9: |
| ; RV32-NEXT: lw a1, 12(sp) |
| ; RV32-NEXT: lw a0, 8(sp) |
| ; RV32-NEXT: beqz a2, .LBB20_7 |
| ; RV32-NEXT: .LBB20_10: # %entry |
| ; RV32-NEXT: sgtz a4, a2 |
| ; RV32-NEXT: beqz a1, .LBB20_8 |
| ; RV32-NEXT: .LBB20_11: # %entry |
| ; RV32-NEXT: snez a5, a1 |
| ; RV32-NEXT: or a2, a3, a2 |
| ; RV32-NEXT: beqz a2, .LBB20_13 |
| ; RV32-NEXT: .LBB20_12: # %entry |
| ; RV32-NEXT: mv a5, a4 |
| ; RV32-NEXT: .LBB20_13: # %entry |
| ; RV32-NEXT: bnez a5, .LBB20_15 |
| ; RV32-NEXT: # %bb.14: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: .LBB20_15: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: ustest_f64i64: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __fixdfti@plt |
| ; RV64-NEXT: blez a1, .LBB20_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: li a1, 1 |
| ; RV64-NEXT: .LBB20_2: # %entry |
| ; RV64-NEXT: beqz a1, .LBB20_4 |
| ; RV64-NEXT: # %bb.3: # %entry |
| ; RV64-NEXT: sgtz a1, a1 |
| ; RV64-NEXT: beqz a1, .LBB20_5 |
| ; RV64-NEXT: j .LBB20_6 |
| ; RV64-NEXT: .LBB20_4: |
| ; RV64-NEXT: snez a1, a0 |
| ; RV64-NEXT: bnez a1, .LBB20_6 |
| ; RV64-NEXT: .LBB20_5: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: .LBB20_6: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi double %x to i128 |
| %0 = icmp slt i128 %conv, 18446744073709551616 |
| %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 |
| %1 = icmp sgt i128 %spec.store.select, 0 |
| %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 0 |
| %conv6 = trunc i128 %spec.store.select7 to i64 |
| ret i64 %conv6 |
| } |
| |
| define i64 @stest_f32i64(float %x) { |
| ; RV32-LABEL: stest_f32i64: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixsfti@plt |
| ; RV32-NEXT: lw a2, 20(sp) |
| ; RV32-NEXT: lw t0, 16(sp) |
| ; RV32-NEXT: lw a1, 12(sp) |
| ; RV32-NEXT: lw a0, 8(sp) |
| ; RV32-NEXT: lui a7, 524288 |
| ; RV32-NEXT: addi a5, a7, -1 |
| ; RV32-NEXT: beq a1, a5, .LBB21_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: sltu a4, a1, a5 |
| ; RV32-NEXT: or a3, t0, a2 |
| ; RV32-NEXT: bnez a3, .LBB21_3 |
| ; RV32-NEXT: j .LBB21_4 |
| ; RV32-NEXT: .LBB21_2: |
| ; RV32-NEXT: addi a4, a0, 1 |
| ; RV32-NEXT: snez a4, a4 |
| ; RV32-NEXT: or a3, t0, a2 |
| ; RV32-NEXT: beqz a3, .LBB21_4 |
| ; RV32-NEXT: .LBB21_3: # %entry |
| ; RV32-NEXT: slti a4, a2, 0 |
| ; RV32-NEXT: .LBB21_4: # %entry |
| ; RV32-NEXT: li a6, -1 |
| ; RV32-NEXT: beqz a4, .LBB21_7 |
| ; RV32-NEXT: # %bb.5: # %entry |
| ; RV32-NEXT: beq a1, a7, .LBB21_8 |
| ; RV32-NEXT: .LBB21_6: # %entry |
| ; RV32-NEXT: sltu a4, a7, a1 |
| ; RV32-NEXT: and a3, t0, a2 |
| ; RV32-NEXT: bne a3, a6, .LBB21_9 |
| ; RV32-NEXT: j .LBB21_10 |
| ; RV32-NEXT: .LBB21_7: # %entry |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: li t0, 0 |
| ; RV32-NEXT: li a0, -1 |
| ; RV32-NEXT: mv a1, a5 |
| ; RV32-NEXT: bne a1, a7, .LBB21_6 |
| ; RV32-NEXT: .LBB21_8: |
| ; RV32-NEXT: snez a4, a0 |
| ; RV32-NEXT: and a3, t0, a2 |
| ; RV32-NEXT: beq a3, a6, .LBB21_10 |
| ; RV32-NEXT: .LBB21_9: # %entry |
| ; RV32-NEXT: slt a4, a6, a2 |
| ; RV32-NEXT: .LBB21_10: # %entry |
| ; RV32-NEXT: bnez a4, .LBB21_12 |
| ; RV32-NEXT: # %bb.11: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: lui a1, 524288 |
| ; RV32-NEXT: .LBB21_12: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: stest_f32i64: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __fixsfti@plt |
| ; RV64-NEXT: li a2, -1 |
| ; RV64-NEXT: srli a3, a2, 1 |
| ; RV64-NEXT: beqz a1, .LBB21_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: slti a4, a1, 0 |
| ; RV64-NEXT: beqz a4, .LBB21_3 |
| ; RV64-NEXT: j .LBB21_4 |
| ; RV64-NEXT: .LBB21_2: |
| ; RV64-NEXT: sltu a4, a0, a3 |
| ; RV64-NEXT: bnez a4, .LBB21_4 |
| ; RV64-NEXT: .LBB21_3: # %entry |
| ; RV64-NEXT: li a1, 0 |
| ; RV64-NEXT: mv a0, a3 |
| ; RV64-NEXT: .LBB21_4: # %entry |
| ; RV64-NEXT: slli a3, a2, 63 |
| ; RV64-NEXT: beq a1, a2, .LBB21_6 |
| ; RV64-NEXT: # %bb.5: # %entry |
| ; RV64-NEXT: slt a1, a2, a1 |
| ; RV64-NEXT: beqz a1, .LBB21_7 |
| ; RV64-NEXT: j .LBB21_8 |
| ; RV64-NEXT: .LBB21_6: |
| ; RV64-NEXT: sltu a1, a3, a0 |
| ; RV64-NEXT: bnez a1, .LBB21_8 |
| ; RV64-NEXT: .LBB21_7: # %entry |
| ; RV64-NEXT: mv a0, a3 |
| ; RV64-NEXT: .LBB21_8: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi float %x to i128 |
| %0 = icmp slt i128 %conv, 9223372036854775807 |
| %spec.store.select = select i1 %0, i128 %conv, i128 9223372036854775807 |
| %1 = icmp sgt i128 %spec.store.select, -9223372036854775808 |
| %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 -9223372036854775808 |
| %conv6 = trunc i128 %spec.store.select7 to i64 |
| ret i64 %conv6 |
| } |
| |
| define i64 @utest_f32i64(float %x) { |
| ; RV32-LABEL: utest_f32i64: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixunssfti@plt |
| ; RV32-NEXT: lw a0, 20(sp) |
| ; RV32-NEXT: lw a1, 16(sp) |
| ; RV32-NEXT: beqz a0, .LBB22_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: j .LBB22_3 |
| ; RV32-NEXT: .LBB22_2: |
| ; RV32-NEXT: seqz a2, a1 |
| ; RV32-NEXT: .LBB22_3: # %entry |
| ; RV32-NEXT: xori a1, a1, 1 |
| ; RV32-NEXT: or a1, a1, a0 |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: beqz a1, .LBB22_5 |
| ; RV32-NEXT: # %bb.4: # %entry |
| ; RV32-NEXT: mv a0, a2 |
| ; RV32-NEXT: .LBB22_5: # %entry |
| ; RV32-NEXT: bnez a0, .LBB22_7 |
| ; RV32-NEXT: # %bb.6: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: j .LBB22_8 |
| ; RV32-NEXT: .LBB22_7: |
| ; RV32-NEXT: lw a1, 12(sp) |
| ; RV32-NEXT: lw a0, 8(sp) |
| ; RV32-NEXT: .LBB22_8: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: utest_f32i64: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __fixunssfti@plt |
| ; RV64-NEXT: beqz a1, .LBB22_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: .LBB22_2: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptoui float %x to i128 |
| %0 = icmp ult i128 %conv, 18446744073709551616 |
| %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 |
| %conv6 = trunc i128 %spec.store.select to i64 |
| ret i64 %conv6 |
| } |
| |
| define i64 @ustest_f32i64(float %x) { |
| ; RV32-LABEL: ustest_f32i64: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixsfti@plt |
| ; RV32-NEXT: lw a2, 20(sp) |
| ; RV32-NEXT: lw a3, 16(sp) |
| ; RV32-NEXT: beqz a2, .LBB23_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: slti a0, a2, 0 |
| ; RV32-NEXT: j .LBB23_3 |
| ; RV32-NEXT: .LBB23_2: |
| ; RV32-NEXT: seqz a0, a3 |
| ; RV32-NEXT: .LBB23_3: # %entry |
| ; RV32-NEXT: xori a1, a3, 1 |
| ; RV32-NEXT: or a4, a1, a2 |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: beqz a4, .LBB23_5 |
| ; RV32-NEXT: # %bb.4: # %entry |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: .LBB23_5: # %entry |
| ; RV32-NEXT: bnez a1, .LBB23_9 |
| ; RV32-NEXT: # %bb.6: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: li a3, 1 |
| ; RV32-NEXT: bnez a2, .LBB23_10 |
| ; RV32-NEXT: .LBB23_7: |
| ; RV32-NEXT: snez a4, a3 |
| ; RV32-NEXT: bnez a1, .LBB23_11 |
| ; RV32-NEXT: .LBB23_8: |
| ; RV32-NEXT: snez a5, a0 |
| ; RV32-NEXT: or a2, a3, a2 |
| ; RV32-NEXT: bnez a2, .LBB23_12 |
| ; RV32-NEXT: j .LBB23_13 |
| ; RV32-NEXT: .LBB23_9: |
| ; RV32-NEXT: lw a1, 12(sp) |
| ; RV32-NEXT: lw a0, 8(sp) |
| ; RV32-NEXT: beqz a2, .LBB23_7 |
| ; RV32-NEXT: .LBB23_10: # %entry |
| ; RV32-NEXT: sgtz a4, a2 |
| ; RV32-NEXT: beqz a1, .LBB23_8 |
| ; RV32-NEXT: .LBB23_11: # %entry |
| ; RV32-NEXT: snez a5, a1 |
| ; RV32-NEXT: or a2, a3, a2 |
| ; RV32-NEXT: beqz a2, .LBB23_13 |
| ; RV32-NEXT: .LBB23_12: # %entry |
| ; RV32-NEXT: mv a5, a4 |
| ; RV32-NEXT: .LBB23_13: # %entry |
| ; RV32-NEXT: bnez a5, .LBB23_15 |
| ; RV32-NEXT: # %bb.14: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: .LBB23_15: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: ustest_f32i64: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __fixsfti@plt |
| ; RV64-NEXT: blez a1, .LBB23_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: li a1, 1 |
| ; RV64-NEXT: .LBB23_2: # %entry |
| ; RV64-NEXT: beqz a1, .LBB23_4 |
| ; RV64-NEXT: # %bb.3: # %entry |
| ; RV64-NEXT: sgtz a1, a1 |
| ; RV64-NEXT: beqz a1, .LBB23_5 |
| ; RV64-NEXT: j .LBB23_6 |
| ; RV64-NEXT: .LBB23_4: |
| ; RV64-NEXT: snez a1, a0 |
| ; RV64-NEXT: bnez a1, .LBB23_6 |
| ; RV64-NEXT: .LBB23_5: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: .LBB23_6: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi float %x to i128 |
| %0 = icmp slt i128 %conv, 18446744073709551616 |
| %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 |
| %1 = icmp sgt i128 %spec.store.select, 0 |
| %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 0 |
| %conv6 = trunc i128 %spec.store.select7 to i64 |
| ret i64 %conv6 |
| } |
| |
| define i64 @stest_f16i64(half %x) { |
| ; RV32-LABEL: stest_f16i64: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixsfti@plt |
| ; RV32-NEXT: lw a2, 20(sp) |
| ; RV32-NEXT: lw t0, 16(sp) |
| ; RV32-NEXT: lw a1, 12(sp) |
| ; RV32-NEXT: lw a0, 8(sp) |
| ; RV32-NEXT: lui a7, 524288 |
| ; RV32-NEXT: addi a5, a7, -1 |
| ; RV32-NEXT: beq a1, a5, .LBB24_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: sltu a4, a1, a5 |
| ; RV32-NEXT: or a3, t0, a2 |
| ; RV32-NEXT: bnez a3, .LBB24_3 |
| ; RV32-NEXT: j .LBB24_4 |
| ; RV32-NEXT: .LBB24_2: |
| ; RV32-NEXT: addi a4, a0, 1 |
| ; RV32-NEXT: snez a4, a4 |
| ; RV32-NEXT: or a3, t0, a2 |
| ; RV32-NEXT: beqz a3, .LBB24_4 |
| ; RV32-NEXT: .LBB24_3: # %entry |
| ; RV32-NEXT: slti a4, a2, 0 |
| ; RV32-NEXT: .LBB24_4: # %entry |
| ; RV32-NEXT: li a6, -1 |
| ; RV32-NEXT: beqz a4, .LBB24_7 |
| ; RV32-NEXT: # %bb.5: # %entry |
| ; RV32-NEXT: beq a1, a7, .LBB24_8 |
| ; RV32-NEXT: .LBB24_6: # %entry |
| ; RV32-NEXT: sltu a4, a7, a1 |
| ; RV32-NEXT: and a3, t0, a2 |
| ; RV32-NEXT: bne a3, a6, .LBB24_9 |
| ; RV32-NEXT: j .LBB24_10 |
| ; RV32-NEXT: .LBB24_7: # %entry |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: li t0, 0 |
| ; RV32-NEXT: li a0, -1 |
| ; RV32-NEXT: mv a1, a5 |
| ; RV32-NEXT: bne a1, a7, .LBB24_6 |
| ; RV32-NEXT: .LBB24_8: |
| ; RV32-NEXT: snez a4, a0 |
| ; RV32-NEXT: and a3, t0, a2 |
| ; RV32-NEXT: beq a3, a6, .LBB24_10 |
| ; RV32-NEXT: .LBB24_9: # %entry |
| ; RV32-NEXT: slt a4, a6, a2 |
| ; RV32-NEXT: .LBB24_10: # %entry |
| ; RV32-NEXT: bnez a4, .LBB24_12 |
| ; RV32-NEXT: # %bb.11: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: lui a1, 524288 |
| ; RV32-NEXT: .LBB24_12: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: stest_f16i64: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: call __fixsfti@plt |
| ; RV64-NEXT: li a2, -1 |
| ; RV64-NEXT: srli a3, a2, 1 |
| ; RV64-NEXT: beqz a1, .LBB24_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: slti a4, a1, 0 |
| ; RV64-NEXT: beqz a4, .LBB24_3 |
| ; RV64-NEXT: j .LBB24_4 |
| ; RV64-NEXT: .LBB24_2: |
| ; RV64-NEXT: sltu a4, a0, a3 |
| ; RV64-NEXT: bnez a4, .LBB24_4 |
| ; RV64-NEXT: .LBB24_3: # %entry |
| ; RV64-NEXT: li a1, 0 |
| ; RV64-NEXT: mv a0, a3 |
| ; RV64-NEXT: .LBB24_4: # %entry |
| ; RV64-NEXT: slli a3, a2, 63 |
| ; RV64-NEXT: beq a1, a2, .LBB24_6 |
| ; RV64-NEXT: # %bb.5: # %entry |
| ; RV64-NEXT: slt a1, a2, a1 |
| ; RV64-NEXT: beqz a1, .LBB24_7 |
| ; RV64-NEXT: j .LBB24_8 |
| ; RV64-NEXT: .LBB24_6: |
| ; RV64-NEXT: sltu a1, a3, a0 |
| ; RV64-NEXT: bnez a1, .LBB24_8 |
| ; RV64-NEXT: .LBB24_7: # %entry |
| ; RV64-NEXT: mv a0, a3 |
| ; RV64-NEXT: .LBB24_8: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi half %x to i128 |
| %0 = icmp slt i128 %conv, 9223372036854775807 |
| %spec.store.select = select i1 %0, i128 %conv, i128 9223372036854775807 |
| %1 = icmp sgt i128 %spec.store.select, -9223372036854775808 |
| %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 -9223372036854775808 |
| %conv6 = trunc i128 %spec.store.select7 to i64 |
| ret i64 %conv6 |
| } |
| |
| define i64 @utesth_f16i64(half %x) { |
| ; RV32-LABEL: utesth_f16i64: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixunssfti@plt |
| ; RV32-NEXT: lw a0, 20(sp) |
| ; RV32-NEXT: lw a1, 16(sp) |
| ; RV32-NEXT: beqz a0, .LBB25_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: j .LBB25_3 |
| ; RV32-NEXT: .LBB25_2: |
| ; RV32-NEXT: seqz a2, a1 |
| ; RV32-NEXT: .LBB25_3: # %entry |
| ; RV32-NEXT: xori a1, a1, 1 |
| ; RV32-NEXT: or a1, a1, a0 |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: beqz a1, .LBB25_5 |
| ; RV32-NEXT: # %bb.4: # %entry |
| ; RV32-NEXT: mv a0, a2 |
| ; RV32-NEXT: .LBB25_5: # %entry |
| ; RV32-NEXT: bnez a0, .LBB25_7 |
| ; RV32-NEXT: # %bb.6: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: j .LBB25_8 |
| ; RV32-NEXT: .LBB25_7: |
| ; RV32-NEXT: lw a1, 12(sp) |
| ; RV32-NEXT: lw a0, 8(sp) |
| ; RV32-NEXT: .LBB25_8: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: utesth_f16i64: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: call __fixunssfti@plt |
| ; RV64-NEXT: beqz a1, .LBB25_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: .LBB25_2: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptoui half %x to i128 |
| %0 = icmp ult i128 %conv, 18446744073709551616 |
| %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 |
| %conv6 = trunc i128 %spec.store.select to i64 |
| ret i64 %conv6 |
| } |
| |
| define i64 @ustest_f16i64(half %x) { |
| ; RV32-LABEL: ustest_f16i64: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixsfti@plt |
| ; RV32-NEXT: lw a2, 20(sp) |
| ; RV32-NEXT: lw a3, 16(sp) |
| ; RV32-NEXT: beqz a2, .LBB26_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: slti a0, a2, 0 |
| ; RV32-NEXT: j .LBB26_3 |
| ; RV32-NEXT: .LBB26_2: |
| ; RV32-NEXT: seqz a0, a3 |
| ; RV32-NEXT: .LBB26_3: # %entry |
| ; RV32-NEXT: xori a1, a3, 1 |
| ; RV32-NEXT: or a4, a1, a2 |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: beqz a4, .LBB26_5 |
| ; RV32-NEXT: # %bb.4: # %entry |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: .LBB26_5: # %entry |
| ; RV32-NEXT: bnez a1, .LBB26_9 |
| ; RV32-NEXT: # %bb.6: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: li a3, 1 |
| ; RV32-NEXT: bnez a2, .LBB26_10 |
| ; RV32-NEXT: .LBB26_7: |
| ; RV32-NEXT: snez a4, a3 |
| ; RV32-NEXT: bnez a1, .LBB26_11 |
| ; RV32-NEXT: .LBB26_8: |
| ; RV32-NEXT: snez a5, a0 |
| ; RV32-NEXT: or a2, a3, a2 |
| ; RV32-NEXT: bnez a2, .LBB26_12 |
| ; RV32-NEXT: j .LBB26_13 |
| ; RV32-NEXT: .LBB26_9: |
| ; RV32-NEXT: lw a1, 12(sp) |
| ; RV32-NEXT: lw a0, 8(sp) |
| ; RV32-NEXT: beqz a2, .LBB26_7 |
| ; RV32-NEXT: .LBB26_10: # %entry |
| ; RV32-NEXT: sgtz a4, a2 |
| ; RV32-NEXT: beqz a1, .LBB26_8 |
| ; RV32-NEXT: .LBB26_11: # %entry |
| ; RV32-NEXT: snez a5, a1 |
| ; RV32-NEXT: or a2, a3, a2 |
| ; RV32-NEXT: beqz a2, .LBB26_13 |
| ; RV32-NEXT: .LBB26_12: # %entry |
| ; RV32-NEXT: mv a5, a4 |
| ; RV32-NEXT: .LBB26_13: # %entry |
| ; RV32-NEXT: bnez a5, .LBB26_15 |
| ; RV32-NEXT: # %bb.14: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: .LBB26_15: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: ustest_f16i64: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: call __fixsfti@plt |
| ; RV64-NEXT: blez a1, .LBB26_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: li a1, 1 |
| ; RV64-NEXT: .LBB26_2: # %entry |
| ; RV64-NEXT: beqz a1, .LBB26_4 |
| ; RV64-NEXT: # %bb.3: # %entry |
| ; RV64-NEXT: sgtz a1, a1 |
| ; RV64-NEXT: beqz a1, .LBB26_5 |
| ; RV64-NEXT: j .LBB26_6 |
| ; RV64-NEXT: .LBB26_4: |
| ; RV64-NEXT: snez a1, a0 |
| ; RV64-NEXT: bnez a1, .LBB26_6 |
| ; RV64-NEXT: .LBB26_5: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: .LBB26_6: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi half %x to i128 |
| %0 = icmp slt i128 %conv, 18446744073709551616 |
| %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 |
| %1 = icmp sgt i128 %spec.store.select, 0 |
| %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 0 |
| %conv6 = trunc i128 %spec.store.select7 to i64 |
| ret i64 %conv6 |
| } |
| |
| |
| |
| |
| ; i32 saturate |
| |
| define i32 @stest_f64i32_mm(double %x) { |
| ; RV32-LABEL: stest_f64i32_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __fixdfdi@plt |
| ; RV32-NEXT: lui a2, 524288 |
| ; RV32-NEXT: addi a4, a2, -1 |
| ; RV32-NEXT: mv a3, a0 |
| ; RV32-NEXT: bgez a1, .LBB27_9 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: bgeu a0, a4, .LBB27_10 |
| ; RV32-NEXT: .LBB27_2: # %entry |
| ; RV32-NEXT: bnez a1, .LBB27_11 |
| ; RV32-NEXT: .LBB27_3: # %entry |
| ; RV32-NEXT: bgez a1, .LBB27_12 |
| ; RV32-NEXT: .LBB27_4: # %entry |
| ; RV32-NEXT: mv a3, a0 |
| ; RV32-NEXT: bltz a1, .LBB27_13 |
| ; RV32-NEXT: .LBB27_5: # %entry |
| ; RV32-NEXT: bgeu a2, a0, .LBB27_14 |
| ; RV32-NEXT: .LBB27_6: # %entry |
| ; RV32-NEXT: li a2, -1 |
| ; RV32-NEXT: beq a1, a2, .LBB27_8 |
| ; RV32-NEXT: .LBB27_7: # %entry |
| ; RV32-NEXT: mv a0, a3 |
| ; RV32-NEXT: .LBB27_8: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; RV32-NEXT: .LBB27_9: # %entry |
| ; RV32-NEXT: mv a3, a4 |
| ; RV32-NEXT: bltu a0, a4, .LBB27_2 |
| ; RV32-NEXT: .LBB27_10: # %entry |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: beqz a1, .LBB27_3 |
| ; RV32-NEXT: .LBB27_11: # %entry |
| ; RV32-NEXT: mv a0, a3 |
| ; RV32-NEXT: bltz a1, .LBB27_4 |
| ; RV32-NEXT: .LBB27_12: # %entry |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: mv a3, a0 |
| ; RV32-NEXT: bgez a1, .LBB27_5 |
| ; RV32-NEXT: .LBB27_13: # %entry |
| ; RV32-NEXT: lui a3, 524288 |
| ; RV32-NEXT: bltu a2, a0, .LBB27_6 |
| ; RV32-NEXT: .LBB27_14: # %entry |
| ; RV32-NEXT: lui a0, 524288 |
| ; RV32-NEXT: li a2, -1 |
| ; RV32-NEXT: bne a1, a2, .LBB27_7 |
| ; RV32-NEXT: j .LBB27_8 |
| ; |
| ; RV64IF-LABEL: stest_f64i32_mm: |
| ; RV64IF: # %bb.0: # %entry |
| ; RV64IF-NEXT: addi sp, sp, -16 |
| ; RV64IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IF-NEXT: .cfi_offset ra, -8 |
| ; RV64IF-NEXT: call __fixdfdi@plt |
| ; RV64IF-NEXT: lui a1, 524288 |
| ; RV64IF-NEXT: addiw a2, a1, -1 |
| ; RV64IF-NEXT: blt a0, a2, .LBB27_2 |
| ; RV64IF-NEXT: # %bb.1: # %entry |
| ; RV64IF-NEXT: mv a0, a2 |
| ; RV64IF-NEXT: .LBB27_2: # %entry |
| ; RV64IF-NEXT: blt a1, a0, .LBB27_4 |
| ; RV64IF-NEXT: # %bb.3: # %entry |
| ; RV64IF-NEXT: lui a0, 524288 |
| ; RV64IF-NEXT: .LBB27_4: # %entry |
| ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IF-NEXT: addi sp, sp, 16 |
| ; RV64IF-NEXT: ret |
| ; |
| ; RV64IFD-LABEL: stest_f64i32_mm: |
| ; RV64IFD: # %bb.0: # %entry |
| ; RV64IFD-NEXT: fmv.d.x ft0, a0 |
| ; RV64IFD-NEXT: fcvt.l.d a0, ft0, rtz |
| ; RV64IFD-NEXT: lui a1, 524288 |
| ; RV64IFD-NEXT: addiw a2, a1, -1 |
| ; RV64IFD-NEXT: bge a0, a2, .LBB27_3 |
| ; RV64IFD-NEXT: # %bb.1: # %entry |
| ; RV64IFD-NEXT: bge a1, a0, .LBB27_4 |
| ; RV64IFD-NEXT: .LBB27_2: # %entry |
| ; RV64IFD-NEXT: ret |
| ; RV64IFD-NEXT: .LBB27_3: # %entry |
| ; RV64IFD-NEXT: mv a0, a2 |
| ; RV64IFD-NEXT: blt a1, a0, .LBB27_2 |
| ; RV64IFD-NEXT: .LBB27_4: # %entry |
| ; RV64IFD-NEXT: lui a0, 524288 |
| ; RV64IFD-NEXT: ret |
| entry: |
| %conv = fptosi double %x to i64 |
| %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 2147483647) |
| %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 -2147483648) |
| %conv6 = trunc i64 %spec.store.select7 to i32 |
| ret i32 %conv6 |
| } |
| |
| define i32 @utest_f64i32_mm(double %x) { |
| ; RV32-LABEL: utest_f64i32_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __fixunsdfdi@plt |
| ; RV32-NEXT: beqz a1, .LBB28_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: li a0, -1 |
| ; RV32-NEXT: .LBB28_2: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64IF-LABEL: utest_f64i32_mm: |
| ; RV64IF: # %bb.0: # %entry |
| ; RV64IF-NEXT: addi sp, sp, -16 |
| ; RV64IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IF-NEXT: .cfi_offset ra, -8 |
| ; RV64IF-NEXT: call __fixunsdfdi@plt |
| ; RV64IF-NEXT: li a1, -1 |
| ; RV64IF-NEXT: srli a1, a1, 32 |
| ; RV64IF-NEXT: bltu a0, a1, .LBB28_2 |
| ; RV64IF-NEXT: # %bb.1: # %entry |
| ; RV64IF-NEXT: mv a0, a1 |
| ; RV64IF-NEXT: .LBB28_2: # %entry |
| ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IF-NEXT: addi sp, sp, 16 |
| ; RV64IF-NEXT: ret |
| ; |
| ; RV64IFD-LABEL: utest_f64i32_mm: |
| ; RV64IFD: # %bb.0: # %entry |
| ; RV64IFD-NEXT: fmv.d.x ft0, a0 |
| ; RV64IFD-NEXT: fcvt.lu.d a0, ft0, rtz |
| ; RV64IFD-NEXT: li a1, -1 |
| ; RV64IFD-NEXT: srli a1, a1, 32 |
| ; RV64IFD-NEXT: bltu a0, a1, .LBB28_2 |
| ; RV64IFD-NEXT: # %bb.1: # %entry |
| ; RV64IFD-NEXT: mv a0, a1 |
| ; RV64IFD-NEXT: .LBB28_2: # %entry |
| ; RV64IFD-NEXT: ret |
| entry: |
| %conv = fptoui double %x to i64 |
| %spec.store.select = call i64 @llvm.umin.i64(i64 %conv, i64 4294967295) |
| %conv6 = trunc i64 %spec.store.select to i32 |
| ret i32 %conv6 |
| } |
| |
| define i32 @ustest_f64i32_mm(double %x) { |
| ; RV32-LABEL: ustest_f64i32_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __fixdfdi@plt |
| ; RV32-NEXT: mv a2, a0 |
| ; RV32-NEXT: bgez a1, .LBB29_7 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: bnez a1, .LBB29_8 |
| ; RV32-NEXT: .LBB29_2: # %entry |
| ; RV32-NEXT: bgez a1, .LBB29_9 |
| ; RV32-NEXT: .LBB29_3: # %entry |
| ; RV32-NEXT: mv a2, a0 |
| ; RV32-NEXT: blez a1, .LBB29_10 |
| ; RV32-NEXT: .LBB29_4: # %entry |
| ; RV32-NEXT: beqz a1, .LBB29_6 |
| ; RV32-NEXT: .LBB29_5: # %entry |
| ; RV32-NEXT: mv a0, a2 |
| ; RV32-NEXT: .LBB29_6: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; RV32-NEXT: .LBB29_7: # %entry |
| ; RV32-NEXT: li a2, -1 |
| ; RV32-NEXT: beqz a1, .LBB29_2 |
| ; RV32-NEXT: .LBB29_8: # %entry |
| ; RV32-NEXT: mv a0, a2 |
| ; RV32-NEXT: bltz a1, .LBB29_3 |
| ; RV32-NEXT: .LBB29_9: # %entry |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: mv a2, a0 |
| ; RV32-NEXT: bgtz a1, .LBB29_4 |
| ; RV32-NEXT: .LBB29_10: # %entry |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: bnez a1, .LBB29_5 |
| ; RV32-NEXT: j .LBB29_6 |
| ; |
| ; RV64IF-LABEL: ustest_f64i32_mm: |
| ; RV64IF: # %bb.0: # %entry |
| ; RV64IF-NEXT: addi sp, sp, -16 |
| ; RV64IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IF-NEXT: .cfi_offset ra, -8 |
| ; RV64IF-NEXT: call __fixdfdi@plt |
| ; RV64IF-NEXT: li a1, -1 |
| ; RV64IF-NEXT: srli a1, a1, 32 |
| ; RV64IF-NEXT: blt a0, a1, .LBB29_2 |
| ; RV64IF-NEXT: # %bb.1: # %entry |
| ; RV64IF-NEXT: mv a0, a1 |
| ; RV64IF-NEXT: .LBB29_2: # %entry |
| ; RV64IF-NEXT: bgtz a0, .LBB29_4 |
| ; RV64IF-NEXT: # %bb.3: # %entry |
| ; RV64IF-NEXT: li a0, 0 |
| ; RV64IF-NEXT: .LBB29_4: # %entry |
| ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IF-NEXT: addi sp, sp, 16 |
| ; RV64IF-NEXT: ret |
| ; |
| ; RV64IFD-LABEL: ustest_f64i32_mm: |
| ; RV64IFD: # %bb.0: # %entry |
| ; RV64IFD-NEXT: fmv.d.x ft0, a0 |
| ; RV64IFD-NEXT: fcvt.l.d a0, ft0, rtz |
| ; RV64IFD-NEXT: li a1, -1 |
| ; RV64IFD-NEXT: srli a1, a1, 32 |
| ; RV64IFD-NEXT: bge a0, a1, .LBB29_3 |
| ; RV64IFD-NEXT: # %bb.1: # %entry |
| ; RV64IFD-NEXT: blez a0, .LBB29_4 |
| ; RV64IFD-NEXT: .LBB29_2: # %entry |
| ; RV64IFD-NEXT: ret |
| ; RV64IFD-NEXT: .LBB29_3: # %entry |
| ; RV64IFD-NEXT: mv a0, a1 |
| ; RV64IFD-NEXT: bgtz a0, .LBB29_2 |
| ; RV64IFD-NEXT: .LBB29_4: # %entry |
| ; RV64IFD-NEXT: li a0, 0 |
| ; RV64IFD-NEXT: ret |
| entry: |
| %conv = fptosi double %x to i64 |
| %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 4294967295) |
| %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 0) |
| %conv6 = trunc i64 %spec.store.select7 to i32 |
| ret i32 %conv6 |
| } |
| |
| define i32 @stest_f32i32_mm(float %x) { |
| ; RV32-LABEL: stest_f32i32_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __fixsfdi@plt |
| ; RV32-NEXT: lui a2, 524288 |
| ; RV32-NEXT: addi a4, a2, -1 |
| ; RV32-NEXT: mv a3, a0 |
| ; RV32-NEXT: bgez a1, .LBB30_9 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: bgeu a0, a4, .LBB30_10 |
| ; RV32-NEXT: .LBB30_2: # %entry |
| ; RV32-NEXT: bnez a1, .LBB30_11 |
| ; RV32-NEXT: .LBB30_3: # %entry |
| ; RV32-NEXT: bgez a1, .LBB30_12 |
| ; RV32-NEXT: .LBB30_4: # %entry |
| ; RV32-NEXT: mv a3, a0 |
| ; RV32-NEXT: bltz a1, .LBB30_13 |
| ; RV32-NEXT: .LBB30_5: # %entry |
| ; RV32-NEXT: bgeu a2, a0, .LBB30_14 |
| ; RV32-NEXT: .LBB30_6: # %entry |
| ; RV32-NEXT: li a2, -1 |
| ; RV32-NEXT: beq a1, a2, .LBB30_8 |
| ; RV32-NEXT: .LBB30_7: # %entry |
| ; RV32-NEXT: mv a0, a3 |
| ; RV32-NEXT: .LBB30_8: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; RV32-NEXT: .LBB30_9: # %entry |
| ; RV32-NEXT: mv a3, a4 |
| ; RV32-NEXT: bltu a0, a4, .LBB30_2 |
| ; RV32-NEXT: .LBB30_10: # %entry |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: beqz a1, .LBB30_3 |
| ; RV32-NEXT: .LBB30_11: # %entry |
| ; RV32-NEXT: mv a0, a3 |
| ; RV32-NEXT: bltz a1, .LBB30_4 |
| ; RV32-NEXT: .LBB30_12: # %entry |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: mv a3, a0 |
| ; RV32-NEXT: bgez a1, .LBB30_5 |
| ; RV32-NEXT: .LBB30_13: # %entry |
| ; RV32-NEXT: lui a3, 524288 |
| ; RV32-NEXT: bltu a2, a0, .LBB30_6 |
| ; RV32-NEXT: .LBB30_14: # %entry |
| ; RV32-NEXT: lui a0, 524288 |
| ; RV32-NEXT: li a2, -1 |
| ; RV32-NEXT: bne a1, a2, .LBB30_7 |
| ; RV32-NEXT: j .LBB30_8 |
| ; |
| ; RV64-LABEL: stest_f32i32_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.l.s a0, ft0, rtz |
| ; RV64-NEXT: lui a1, 524288 |
| ; RV64-NEXT: addiw a2, a1, -1 |
| ; RV64-NEXT: bge a0, a2, .LBB30_3 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: bge a1, a0, .LBB30_4 |
| ; RV64-NEXT: .LBB30_2: # %entry |
| ; RV64-NEXT: ret |
| ; RV64-NEXT: .LBB30_3: # %entry |
| ; RV64-NEXT: mv a0, a2 |
| ; RV64-NEXT: blt a1, a0, .LBB30_2 |
| ; RV64-NEXT: .LBB30_4: # %entry |
| ; RV64-NEXT: lui a0, 524288 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi float %x to i64 |
| %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 2147483647) |
| %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 -2147483648) |
| %conv6 = trunc i64 %spec.store.select7 to i32 |
| ret i32 %conv6 |
| } |
| |
| define i32 @utest_f32i32_mm(float %x) { |
| ; RV32-LABEL: utest_f32i32_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __fixunssfdi@plt |
| ; RV32-NEXT: beqz a1, .LBB31_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: li a0, -1 |
| ; RV32-NEXT: .LBB31_2: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: utest_f32i32_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.lu.s a0, ft0, rtz |
| ; RV64-NEXT: li a1, -1 |
| ; RV64-NEXT: srli a1, a1, 32 |
| ; RV64-NEXT: bltu a0, a1, .LBB31_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: .LBB31_2: # %entry |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptoui float %x to i64 |
| %spec.store.select = call i64 @llvm.umin.i64(i64 %conv, i64 4294967295) |
| %conv6 = trunc i64 %spec.store.select to i32 |
| ret i32 %conv6 |
| } |
| |
| define i32 @ustest_f32i32_mm(float %x) { |
| ; RV32-LABEL: ustest_f32i32_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __fixsfdi@plt |
| ; RV32-NEXT: mv a2, a0 |
| ; RV32-NEXT: bgez a1, .LBB32_7 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: bnez a1, .LBB32_8 |
| ; RV32-NEXT: .LBB32_2: # %entry |
| ; RV32-NEXT: bgez a1, .LBB32_9 |
| ; RV32-NEXT: .LBB32_3: # %entry |
| ; RV32-NEXT: mv a2, a0 |
| ; RV32-NEXT: blez a1, .LBB32_10 |
| ; RV32-NEXT: .LBB32_4: # %entry |
| ; RV32-NEXT: beqz a1, .LBB32_6 |
| ; RV32-NEXT: .LBB32_5: # %entry |
| ; RV32-NEXT: mv a0, a2 |
| ; RV32-NEXT: .LBB32_6: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; RV32-NEXT: .LBB32_7: # %entry |
| ; RV32-NEXT: li a2, -1 |
| ; RV32-NEXT: beqz a1, .LBB32_2 |
| ; RV32-NEXT: .LBB32_8: # %entry |
| ; RV32-NEXT: mv a0, a2 |
| ; RV32-NEXT: bltz a1, .LBB32_3 |
| ; RV32-NEXT: .LBB32_9: # %entry |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: mv a2, a0 |
| ; RV32-NEXT: bgtz a1, .LBB32_4 |
| ; RV32-NEXT: .LBB32_10: # %entry |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: bnez a1, .LBB32_5 |
| ; RV32-NEXT: j .LBB32_6 |
| ; |
| ; RV64-LABEL: ustest_f32i32_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.l.s a0, ft0, rtz |
| ; RV64-NEXT: li a1, -1 |
| ; RV64-NEXT: srli a1, a1, 32 |
| ; RV64-NEXT: bge a0, a1, .LBB32_3 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: blez a0, .LBB32_4 |
| ; RV64-NEXT: .LBB32_2: # %entry |
| ; RV64-NEXT: ret |
| ; RV64-NEXT: .LBB32_3: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: bgtz a0, .LBB32_2 |
| ; RV64-NEXT: .LBB32_4: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi float %x to i64 |
| %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 4294967295) |
| %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 0) |
| %conv6 = trunc i64 %spec.store.select7 to i32 |
| ret i32 %conv6 |
| } |
| |
| define i32 @stest_f16i32_mm(half %x) { |
| ; RV32-LABEL: stest_f16i32_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: call __fixsfdi@plt |
| ; RV32-NEXT: lui a2, 524288 |
| ; RV32-NEXT: addi a4, a2, -1 |
| ; RV32-NEXT: mv a3, a0 |
| ; RV32-NEXT: bgez a1, .LBB33_9 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: bgeu a0, a4, .LBB33_10 |
| ; RV32-NEXT: .LBB33_2: # %entry |
| ; RV32-NEXT: bnez a1, .LBB33_11 |
| ; RV32-NEXT: .LBB33_3: # %entry |
| ; RV32-NEXT: bgez a1, .LBB33_12 |
| ; RV32-NEXT: .LBB33_4: # %entry |
| ; RV32-NEXT: mv a3, a0 |
| ; RV32-NEXT: bltz a1, .LBB33_13 |
| ; RV32-NEXT: .LBB33_5: # %entry |
| ; RV32-NEXT: bgeu a2, a0, .LBB33_14 |
| ; RV32-NEXT: .LBB33_6: # %entry |
| ; RV32-NEXT: li a2, -1 |
| ; RV32-NEXT: beq a1, a2, .LBB33_8 |
| ; RV32-NEXT: .LBB33_7: # %entry |
| ; RV32-NEXT: mv a0, a3 |
| ; RV32-NEXT: .LBB33_8: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; RV32-NEXT: .LBB33_9: # %entry |
| ; RV32-NEXT: mv a3, a4 |
| ; RV32-NEXT: bltu a0, a4, .LBB33_2 |
| ; RV32-NEXT: .LBB33_10: # %entry |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: beqz a1, .LBB33_3 |
| ; RV32-NEXT: .LBB33_11: # %entry |
| ; RV32-NEXT: mv a0, a3 |
| ; RV32-NEXT: bltz a1, .LBB33_4 |
| ; RV32-NEXT: .LBB33_12: # %entry |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: mv a3, a0 |
| ; RV32-NEXT: bgez a1, .LBB33_5 |
| ; RV32-NEXT: .LBB33_13: # %entry |
| ; RV32-NEXT: lui a3, 524288 |
| ; RV32-NEXT: bltu a2, a0, .LBB33_6 |
| ; RV32-NEXT: .LBB33_14: # %entry |
| ; RV32-NEXT: lui a0, 524288 |
| ; RV32-NEXT: li a2, -1 |
| ; RV32-NEXT: bne a1, a2, .LBB33_7 |
| ; RV32-NEXT: j .LBB33_8 |
| ; |
| ; RV64-LABEL: stest_f16i32_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.l.s a0, ft0, rtz |
| ; RV64-NEXT: lui a1, 524288 |
| ; RV64-NEXT: addiw a2, a1, -1 |
| ; RV64-NEXT: blt a0, a2, .LBB33_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a0, a2 |
| ; RV64-NEXT: .LBB33_2: # %entry |
| ; RV64-NEXT: blt a1, a0, .LBB33_4 |
| ; RV64-NEXT: # %bb.3: # %entry |
| ; RV64-NEXT: lui a0, 524288 |
| ; RV64-NEXT: .LBB33_4: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi half %x to i64 |
| %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 2147483647) |
| %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 -2147483648) |
| %conv6 = trunc i64 %spec.store.select7 to i32 |
| ret i32 %conv6 |
| } |
| |
| define i32 @utesth_f16i32_mm(half %x) { |
| ; RV32-LABEL: utesth_f16i32_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: call __fixunssfdi@plt |
| ; RV32-NEXT: beqz a1, .LBB34_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: li a0, -1 |
| ; RV32-NEXT: .LBB34_2: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: utesth_f16i32_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.lu.s a0, ft0, rtz |
| ; RV64-NEXT: li a1, -1 |
| ; RV64-NEXT: srli a1, a1, 32 |
| ; RV64-NEXT: bltu a0, a1, .LBB34_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: .LBB34_2: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptoui half %x to i64 |
| %spec.store.select = call i64 @llvm.umin.i64(i64 %conv, i64 4294967295) |
| %conv6 = trunc i64 %spec.store.select to i32 |
| ret i32 %conv6 |
| } |
| |
| define i32 @ustest_f16i32_mm(half %x) { |
| ; RV32-LABEL: ustest_f16i32_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: call __fixsfdi@plt |
| ; RV32-NEXT: mv a2, a0 |
| ; RV32-NEXT: bgez a1, .LBB35_7 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: bnez a1, .LBB35_8 |
| ; RV32-NEXT: .LBB35_2: # %entry |
| ; RV32-NEXT: bgez a1, .LBB35_9 |
| ; RV32-NEXT: .LBB35_3: # %entry |
| ; RV32-NEXT: mv a2, a0 |
| ; RV32-NEXT: blez a1, .LBB35_10 |
| ; RV32-NEXT: .LBB35_4: # %entry |
| ; RV32-NEXT: beqz a1, .LBB35_6 |
| ; RV32-NEXT: .LBB35_5: # %entry |
| ; RV32-NEXT: mv a0, a2 |
| ; RV32-NEXT: .LBB35_6: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; RV32-NEXT: .LBB35_7: # %entry |
| ; RV32-NEXT: li a2, -1 |
| ; RV32-NEXT: beqz a1, .LBB35_2 |
| ; RV32-NEXT: .LBB35_8: # %entry |
| ; RV32-NEXT: mv a0, a2 |
| ; RV32-NEXT: bltz a1, .LBB35_3 |
| ; RV32-NEXT: .LBB35_9: # %entry |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: mv a2, a0 |
| ; RV32-NEXT: bgtz a1, .LBB35_4 |
| ; RV32-NEXT: .LBB35_10: # %entry |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: bnez a1, .LBB35_5 |
| ; RV32-NEXT: j .LBB35_6 |
| ; |
| ; RV64-LABEL: ustest_f16i32_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.l.s a0, ft0, rtz |
| ; RV64-NEXT: li a1, -1 |
| ; RV64-NEXT: srli a1, a1, 32 |
| ; RV64-NEXT: blt a0, a1, .LBB35_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: .LBB35_2: # %entry |
| ; RV64-NEXT: bgtz a0, .LBB35_4 |
| ; RV64-NEXT: # %bb.3: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: .LBB35_4: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi half %x to i64 |
| %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 4294967295) |
| %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 0) |
| %conv6 = trunc i64 %spec.store.select7 to i32 |
| ret i32 %conv6 |
| } |
| |
| ; i16 saturate |
| |
| define i16 @stest_f64i16_mm(double %x) { |
| ; RV32IF-LABEL: stest_f64i16_mm: |
| ; RV32IF: # %bb.0: # %entry |
| ; RV32IF-NEXT: addi sp, sp, -16 |
| ; RV32IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32IF-NEXT: .cfi_offset ra, -4 |
| ; RV32IF-NEXT: call __fixdfsi@plt |
| ; RV32IF-NEXT: lui a1, 8 |
| ; RV32IF-NEXT: addi a1, a1, -1 |
| ; RV32IF-NEXT: blt a0, a1, .LBB36_2 |
| ; RV32IF-NEXT: # %bb.1: # %entry |
| ; RV32IF-NEXT: mv a0, a1 |
| ; RV32IF-NEXT: .LBB36_2: # %entry |
| ; RV32IF-NEXT: lui a1, 1048568 |
| ; RV32IF-NEXT: blt a1, a0, .LBB36_4 |
| ; RV32IF-NEXT: # %bb.3: # %entry |
| ; RV32IF-NEXT: lui a0, 1048568 |
| ; RV32IF-NEXT: .LBB36_4: # %entry |
| ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32IF-NEXT: addi sp, sp, 16 |
| ; RV32IF-NEXT: ret |
| ; |
| ; RV64IF-LABEL: stest_f64i16_mm: |
| ; RV64IF: # %bb.0: # %entry |
| ; RV64IF-NEXT: addi sp, sp, -16 |
| ; RV64IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IF-NEXT: .cfi_offset ra, -8 |
| ; RV64IF-NEXT: call __fixdfsi@plt |
| ; RV64IF-NEXT: lui a1, 8 |
| ; RV64IF-NEXT: addiw a1, a1, -1 |
| ; RV64IF-NEXT: blt a0, a1, .LBB36_2 |
| ; RV64IF-NEXT: # %bb.1: # %entry |
| ; RV64IF-NEXT: mv a0, a1 |
| ; RV64IF-NEXT: .LBB36_2: # %entry |
| ; RV64IF-NEXT: lui a1, 1048568 |
| ; RV64IF-NEXT: blt a1, a0, .LBB36_4 |
| ; RV64IF-NEXT: # %bb.3: # %entry |
| ; RV64IF-NEXT: lui a0, 1048568 |
| ; RV64IF-NEXT: .LBB36_4: # %entry |
| ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IF-NEXT: addi sp, sp, 16 |
| ; RV64IF-NEXT: ret |
| ; |
| ; RV32IFD-LABEL: stest_f64i16_mm: |
| ; RV32IFD: # %bb.0: # %entry |
| ; RV32IFD-NEXT: addi sp, sp, -16 |
| ; RV32IFD-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IFD-NEXT: sw a0, 8(sp) |
| ; RV32IFD-NEXT: sw a1, 12(sp) |
| ; RV32IFD-NEXT: fld ft0, 8(sp) |
| ; RV32IFD-NEXT: fcvt.w.d a0, ft0, rtz |
| ; RV32IFD-NEXT: lui a1, 8 |
| ; RV32IFD-NEXT: addi a1, a1, -1 |
| ; RV32IFD-NEXT: bge a0, a1, .LBB36_3 |
| ; RV32IFD-NEXT: # %bb.1: # %entry |
| ; RV32IFD-NEXT: lui a1, 1048568 |
| ; RV32IFD-NEXT: bge a1, a0, .LBB36_4 |
| ; RV32IFD-NEXT: .LBB36_2: # %entry |
| ; RV32IFD-NEXT: addi sp, sp, 16 |
| ; RV32IFD-NEXT: ret |
| ; RV32IFD-NEXT: .LBB36_3: # %entry |
| ; RV32IFD-NEXT: mv a0, a1 |
| ; RV32IFD-NEXT: lui a1, 1048568 |
| ; RV32IFD-NEXT: blt a1, a0, .LBB36_2 |
| ; RV32IFD-NEXT: .LBB36_4: # %entry |
| ; RV32IFD-NEXT: lui a0, 1048568 |
| ; RV32IFD-NEXT: addi sp, sp, 16 |
| ; RV32IFD-NEXT: ret |
| ; |
| ; RV64IFD-LABEL: stest_f64i16_mm: |
| ; RV64IFD: # %bb.0: # %entry |
| ; RV64IFD-NEXT: fmv.d.x ft0, a0 |
| ; RV64IFD-NEXT: fcvt.w.d a0, ft0, rtz |
| ; RV64IFD-NEXT: lui a1, 8 |
| ; RV64IFD-NEXT: addiw a1, a1, -1 |
| ; RV64IFD-NEXT: bge a0, a1, .LBB36_3 |
| ; RV64IFD-NEXT: # %bb.1: # %entry |
| ; RV64IFD-NEXT: lui a1, 1048568 |
| ; RV64IFD-NEXT: bge a1, a0, .LBB36_4 |
| ; RV64IFD-NEXT: .LBB36_2: # %entry |
| ; RV64IFD-NEXT: ret |
| ; RV64IFD-NEXT: .LBB36_3: # %entry |
| ; RV64IFD-NEXT: mv a0, a1 |
| ; RV64IFD-NEXT: lui a1, 1048568 |
| ; RV64IFD-NEXT: blt a1, a0, .LBB36_2 |
| ; RV64IFD-NEXT: .LBB36_4: # %entry |
| ; RV64IFD-NEXT: lui a0, 1048568 |
| ; RV64IFD-NEXT: ret |
| entry: |
| %conv = fptosi double %x to i32 |
| %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 32767) |
| %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 -32768) |
| %conv6 = trunc i32 %spec.store.select7 to i16 |
| ret i16 %conv6 |
| } |
| |
| define i16 @utest_f64i16_mm(double %x) { |
| ; RV32IF-LABEL: utest_f64i16_mm: |
| ; RV32IF: # %bb.0: # %entry |
| ; RV32IF-NEXT: addi sp, sp, -16 |
| ; RV32IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32IF-NEXT: .cfi_offset ra, -4 |
| ; RV32IF-NEXT: call __fixunsdfsi@plt |
| ; RV32IF-NEXT: lui a1, 16 |
| ; RV32IF-NEXT: addi a1, a1, -1 |
| ; RV32IF-NEXT: bltu a0, a1, .LBB37_2 |
| ; RV32IF-NEXT: # %bb.1: # %entry |
| ; RV32IF-NEXT: mv a0, a1 |
| ; RV32IF-NEXT: .LBB37_2: # %entry |
| ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32IF-NEXT: addi sp, sp, 16 |
| ; RV32IF-NEXT: ret |
| ; |
| ; RV64IF-LABEL: utest_f64i16_mm: |
| ; RV64IF: # %bb.0: # %entry |
| ; RV64IF-NEXT: addi sp, sp, -16 |
| ; RV64IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IF-NEXT: .cfi_offset ra, -8 |
| ; RV64IF-NEXT: call __fixunsdfsi@plt |
| ; RV64IF-NEXT: lui a1, 16 |
| ; RV64IF-NEXT: addiw a1, a1, -1 |
| ; RV64IF-NEXT: bltu a0, a1, .LBB37_2 |
| ; RV64IF-NEXT: # %bb.1: # %entry |
| ; RV64IF-NEXT: mv a0, a1 |
| ; RV64IF-NEXT: .LBB37_2: # %entry |
| ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IF-NEXT: addi sp, sp, 16 |
| ; RV64IF-NEXT: ret |
| ; |
| ; RV32IFD-LABEL: utest_f64i16_mm: |
| ; RV32IFD: # %bb.0: # %entry |
| ; RV32IFD-NEXT: addi sp, sp, -16 |
| ; RV32IFD-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IFD-NEXT: sw a0, 8(sp) |
| ; RV32IFD-NEXT: sw a1, 12(sp) |
| ; RV32IFD-NEXT: fld ft0, 8(sp) |
| ; RV32IFD-NEXT: fcvt.wu.d a0, ft0, rtz |
| ; RV32IFD-NEXT: lui a1, 16 |
| ; RV32IFD-NEXT: addi a1, a1, -1 |
| ; RV32IFD-NEXT: bltu a0, a1, .LBB37_2 |
| ; RV32IFD-NEXT: # %bb.1: # %entry |
| ; RV32IFD-NEXT: mv a0, a1 |
| ; RV32IFD-NEXT: .LBB37_2: # %entry |
| ; RV32IFD-NEXT: addi sp, sp, 16 |
| ; RV32IFD-NEXT: ret |
| ; |
| ; RV64IFD-LABEL: utest_f64i16_mm: |
| ; RV64IFD: # %bb.0: # %entry |
| ; RV64IFD-NEXT: fmv.d.x ft0, a0 |
| ; RV64IFD-NEXT: fcvt.wu.d a0, ft0, rtz |
| ; RV64IFD-NEXT: lui a1, 16 |
| ; RV64IFD-NEXT: addiw a1, a1, -1 |
| ; RV64IFD-NEXT: bltu a0, a1, .LBB37_2 |
| ; RV64IFD-NEXT: # %bb.1: # %entry |
| ; RV64IFD-NEXT: mv a0, a1 |
| ; RV64IFD-NEXT: .LBB37_2: # %entry |
| ; RV64IFD-NEXT: ret |
| entry: |
| %conv = fptoui double %x to i32 |
| %spec.store.select = call i32 @llvm.umin.i32(i32 %conv, i32 65535) |
| %conv6 = trunc i32 %spec.store.select to i16 |
| ret i16 %conv6 |
| } |
| |
| define i16 @ustest_f64i16_mm(double %x) { |
| ; RV32IF-LABEL: ustest_f64i16_mm: |
| ; RV32IF: # %bb.0: # %entry |
| ; RV32IF-NEXT: addi sp, sp, -16 |
| ; RV32IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32IF-NEXT: .cfi_offset ra, -4 |
| ; RV32IF-NEXT: call __fixdfsi@plt |
| ; RV32IF-NEXT: lui a1, 16 |
| ; RV32IF-NEXT: addi a1, a1, -1 |
| ; RV32IF-NEXT: blt a0, a1, .LBB38_2 |
| ; RV32IF-NEXT: # %bb.1: # %entry |
| ; RV32IF-NEXT: mv a0, a1 |
| ; RV32IF-NEXT: .LBB38_2: # %entry |
| ; RV32IF-NEXT: bgtz a0, .LBB38_4 |
| ; RV32IF-NEXT: # %bb.3: # %entry |
| ; RV32IF-NEXT: li a0, 0 |
| ; RV32IF-NEXT: .LBB38_4: # %entry |
| ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32IF-NEXT: addi sp, sp, 16 |
| ; RV32IF-NEXT: ret |
| ; |
| ; RV64IF-LABEL: ustest_f64i16_mm: |
| ; RV64IF: # %bb.0: # %entry |
| ; RV64IF-NEXT: addi sp, sp, -16 |
| ; RV64IF-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IF-NEXT: .cfi_offset ra, -8 |
| ; RV64IF-NEXT: call __fixdfsi@plt |
| ; RV64IF-NEXT: lui a1, 16 |
| ; RV64IF-NEXT: addiw a1, a1, -1 |
| ; RV64IF-NEXT: blt a0, a1, .LBB38_2 |
| ; RV64IF-NEXT: # %bb.1: # %entry |
| ; RV64IF-NEXT: mv a0, a1 |
| ; RV64IF-NEXT: .LBB38_2: # %entry |
| ; RV64IF-NEXT: bgtz a0, .LBB38_4 |
| ; RV64IF-NEXT: # %bb.3: # %entry |
| ; RV64IF-NEXT: li a0, 0 |
| ; RV64IF-NEXT: .LBB38_4: # %entry |
| ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IF-NEXT: addi sp, sp, 16 |
| ; RV64IF-NEXT: ret |
| ; |
| ; RV32IFD-LABEL: ustest_f64i16_mm: |
| ; RV32IFD: # %bb.0: # %entry |
| ; RV32IFD-NEXT: addi sp, sp, -16 |
| ; RV32IFD-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IFD-NEXT: sw a0, 8(sp) |
| ; RV32IFD-NEXT: sw a1, 12(sp) |
| ; RV32IFD-NEXT: fld ft0, 8(sp) |
| ; RV32IFD-NEXT: fcvt.w.d a0, ft0, rtz |
| ; RV32IFD-NEXT: lui a1, 16 |
| ; RV32IFD-NEXT: addi a1, a1, -1 |
| ; RV32IFD-NEXT: bge a0, a1, .LBB38_3 |
| ; RV32IFD-NEXT: # %bb.1: # %entry |
| ; RV32IFD-NEXT: blez a0, .LBB38_4 |
| ; RV32IFD-NEXT: .LBB38_2: # %entry |
| ; RV32IFD-NEXT: addi sp, sp, 16 |
| ; RV32IFD-NEXT: ret |
| ; RV32IFD-NEXT: .LBB38_3: # %entry |
| ; RV32IFD-NEXT: mv a0, a1 |
| ; RV32IFD-NEXT: bgtz a0, .LBB38_2 |
| ; RV32IFD-NEXT: .LBB38_4: # %entry |
| ; RV32IFD-NEXT: li a0, 0 |
| ; RV32IFD-NEXT: addi sp, sp, 16 |
| ; RV32IFD-NEXT: ret |
| ; |
| ; RV64IFD-LABEL: ustest_f64i16_mm: |
| ; RV64IFD: # %bb.0: # %entry |
| ; RV64IFD-NEXT: fmv.d.x ft0, a0 |
| ; RV64IFD-NEXT: fcvt.w.d a0, ft0, rtz |
| ; RV64IFD-NEXT: lui a1, 16 |
| ; RV64IFD-NEXT: addiw a1, a1, -1 |
| ; RV64IFD-NEXT: bge a0, a1, .LBB38_3 |
| ; RV64IFD-NEXT: # %bb.1: # %entry |
| ; RV64IFD-NEXT: blez a0, .LBB38_4 |
| ; RV64IFD-NEXT: .LBB38_2: # %entry |
| ; RV64IFD-NEXT: ret |
| ; RV64IFD-NEXT: .LBB38_3: # %entry |
| ; RV64IFD-NEXT: mv a0, a1 |
| ; RV64IFD-NEXT: bgtz a0, .LBB38_2 |
| ; RV64IFD-NEXT: .LBB38_4: # %entry |
| ; RV64IFD-NEXT: li a0, 0 |
| ; RV64IFD-NEXT: ret |
| entry: |
| %conv = fptosi double %x to i32 |
| %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 65535) |
| %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0) |
| %conv6 = trunc i32 %spec.store.select7 to i16 |
| ret i16 %conv6 |
| } |
| |
| define i16 @stest_f32i16_mm(float %x) { |
| ; RV32-LABEL: stest_f32i16_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: fmv.w.x ft0, a0 |
| ; RV32-NEXT: fcvt.w.s a0, ft0, rtz |
| ; RV32-NEXT: lui a1, 8 |
| ; RV32-NEXT: addi a1, a1, -1 |
| ; RV32-NEXT: bge a0, a1, .LBB39_3 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: lui a1, 1048568 |
| ; RV32-NEXT: bge a1, a0, .LBB39_4 |
| ; RV32-NEXT: .LBB39_2: # %entry |
| ; RV32-NEXT: ret |
| ; RV32-NEXT: .LBB39_3: # %entry |
| ; RV32-NEXT: mv a0, a1 |
| ; RV32-NEXT: lui a1, 1048568 |
| ; RV32-NEXT: blt a1, a0, .LBB39_2 |
| ; RV32-NEXT: .LBB39_4: # %entry |
| ; RV32-NEXT: lui a0, 1048568 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: stest_f32i16_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.w.s a0, ft0, rtz |
| ; RV64-NEXT: lui a1, 8 |
| ; RV64-NEXT: addiw a1, a1, -1 |
| ; RV64-NEXT: bge a0, a1, .LBB39_3 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: lui a1, 1048568 |
| ; RV64-NEXT: bge a1, a0, .LBB39_4 |
| ; RV64-NEXT: .LBB39_2: # %entry |
| ; RV64-NEXT: ret |
| ; RV64-NEXT: .LBB39_3: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: lui a1, 1048568 |
| ; RV64-NEXT: blt a1, a0, .LBB39_2 |
| ; RV64-NEXT: .LBB39_4: # %entry |
| ; RV64-NEXT: lui a0, 1048568 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi float %x to i32 |
| %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 32767) |
| %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 -32768) |
| %conv6 = trunc i32 %spec.store.select7 to i16 |
| ret i16 %conv6 |
| } |
| |
| define i16 @utest_f32i16_mm(float %x) { |
| ; RV32-LABEL: utest_f32i16_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: fmv.w.x ft0, a0 |
| ; RV32-NEXT: fcvt.wu.s a0, ft0, rtz |
| ; RV32-NEXT: lui a1, 16 |
| ; RV32-NEXT: addi a1, a1, -1 |
| ; RV32-NEXT: bltu a0, a1, .LBB40_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: mv a0, a1 |
| ; RV32-NEXT: .LBB40_2: # %entry |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: utest_f32i16_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.wu.s a0, ft0, rtz |
| ; RV64-NEXT: lui a1, 16 |
| ; RV64-NEXT: addiw a1, a1, -1 |
| ; RV64-NEXT: bltu a0, a1, .LBB40_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: .LBB40_2: # %entry |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptoui float %x to i32 |
| %spec.store.select = call i32 @llvm.umin.i32(i32 %conv, i32 65535) |
| %conv6 = trunc i32 %spec.store.select to i16 |
| ret i16 %conv6 |
| } |
| |
| define i16 @ustest_f32i16_mm(float %x) { |
| ; RV32-LABEL: ustest_f32i16_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: fmv.w.x ft0, a0 |
| ; RV32-NEXT: fcvt.w.s a0, ft0, rtz |
| ; RV32-NEXT: lui a1, 16 |
| ; RV32-NEXT: addi a1, a1, -1 |
| ; RV32-NEXT: bge a0, a1, .LBB41_3 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: blez a0, .LBB41_4 |
| ; RV32-NEXT: .LBB41_2: # %entry |
| ; RV32-NEXT: ret |
| ; RV32-NEXT: .LBB41_3: # %entry |
| ; RV32-NEXT: mv a0, a1 |
| ; RV32-NEXT: bgtz a0, .LBB41_2 |
| ; RV32-NEXT: .LBB41_4: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: ustest_f32i16_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.w.s a0, ft0, rtz |
| ; RV64-NEXT: lui a1, 16 |
| ; RV64-NEXT: addiw a1, a1, -1 |
| ; RV64-NEXT: bge a0, a1, .LBB41_3 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: blez a0, .LBB41_4 |
| ; RV64-NEXT: .LBB41_2: # %entry |
| ; RV64-NEXT: ret |
| ; RV64-NEXT: .LBB41_3: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: bgtz a0, .LBB41_2 |
| ; RV64-NEXT: .LBB41_4: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi float %x to i32 |
| %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 65535) |
| %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0) |
| %conv6 = trunc i32 %spec.store.select7 to i16 |
| ret i16 %conv6 |
| } |
| |
| define i16 @stest_f16i16_mm(half %x) { |
| ; RV32-LABEL: stest_f16i16_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: fmv.w.x ft0, a0 |
| ; RV32-NEXT: fcvt.w.s a0, ft0, rtz |
| ; RV32-NEXT: lui a1, 8 |
| ; RV32-NEXT: addi a1, a1, -1 |
| ; RV32-NEXT: blt a0, a1, .LBB42_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: mv a0, a1 |
| ; RV32-NEXT: .LBB42_2: # %entry |
| ; RV32-NEXT: lui a1, 1048568 |
| ; RV32-NEXT: blt a1, a0, .LBB42_4 |
| ; RV32-NEXT: # %bb.3: # %entry |
| ; RV32-NEXT: lui a0, 1048568 |
| ; RV32-NEXT: .LBB42_4: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: stest_f16i16_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.l.s a0, ft0, rtz |
| ; RV64-NEXT: lui a1, 8 |
| ; RV64-NEXT: addiw a1, a1, -1 |
| ; RV64-NEXT: blt a0, a1, .LBB42_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: .LBB42_2: # %entry |
| ; RV64-NEXT: lui a1, 1048568 |
| ; RV64-NEXT: blt a1, a0, .LBB42_4 |
| ; RV64-NEXT: # %bb.3: # %entry |
| ; RV64-NEXT: lui a0, 1048568 |
| ; RV64-NEXT: .LBB42_4: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi half %x to i32 |
| %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 32767) |
| %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 -32768) |
| %conv6 = trunc i32 %spec.store.select7 to i16 |
| ret i16 %conv6 |
| } |
| |
| define i16 @utesth_f16i16_mm(half %x) { |
| ; RV32-LABEL: utesth_f16i16_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: fmv.w.x ft0, a0 |
| ; RV32-NEXT: fcvt.wu.s a0, ft0, rtz |
| ; RV32-NEXT: lui a1, 16 |
| ; RV32-NEXT: addi a1, a1, -1 |
| ; RV32-NEXT: bltu a0, a1, .LBB43_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: mv a0, a1 |
| ; RV32-NEXT: .LBB43_2: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: utesth_f16i16_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.lu.s a0, ft0, rtz |
| ; RV64-NEXT: sext.w a0, a0 |
| ; RV64-NEXT: lui a1, 16 |
| ; RV64-NEXT: addiw a1, a1, -1 |
| ; RV64-NEXT: bltu a0, a1, .LBB43_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: .LBB43_2: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptoui half %x to i32 |
| %spec.store.select = call i32 @llvm.umin.i32(i32 %conv, i32 65535) |
| %conv6 = trunc i32 %spec.store.select to i16 |
| ret i16 %conv6 |
| } |
| |
| define i16 @ustest_f16i16_mm(half %x) { |
| ; RV32-LABEL: ustest_f16i16_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -16 |
| ; RV32-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: fmv.w.x ft0, a0 |
| ; RV32-NEXT: fcvt.w.s a0, ft0, rtz |
| ; RV32-NEXT: lui a1, 16 |
| ; RV32-NEXT: addi a1, a1, -1 |
| ; RV32-NEXT: blt a0, a1, .LBB44_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: mv a0, a1 |
| ; RV32-NEXT: .LBB44_2: # %entry |
| ; RV32-NEXT: bgtz a0, .LBB44_4 |
| ; RV32-NEXT: # %bb.3: # %entry |
| ; RV32-NEXT: li a0, 0 |
| ; RV32-NEXT: .LBB44_4: # %entry |
| ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 16 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: ustest_f16i16_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: fmv.w.x ft0, a0 |
| ; RV64-NEXT: fcvt.l.s a0, ft0, rtz |
| ; RV64-NEXT: lui a1, 16 |
| ; RV64-NEXT: addiw a1, a1, -1 |
| ; RV64-NEXT: blt a0, a1, .LBB44_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: .LBB44_2: # %entry |
| ; RV64-NEXT: bgtz a0, .LBB44_4 |
| ; RV64-NEXT: # %bb.3: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: .LBB44_4: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptosi half %x to i32 |
| %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 65535) |
| %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0) |
| %conv6 = trunc i32 %spec.store.select7 to i16 |
| ret i16 %conv6 |
| } |
| |
| ; i64 saturate |
| |
| define i64 @stest_f64i64_mm(double %x) { |
| ; RV32-LABEL: stest_f64i64_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a2, a1 |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixdfti@plt |
| ; RV32-NEXT: lw a5, 8(sp) |
| ; RV32-NEXT: lw a3, 20(sp) |
| ; RV32-NEXT: lw a1, 12(sp) |
| ; RV32-NEXT: li a6, -1 |
| ; RV32-NEXT: mv a4, a5 |
| ; RV32-NEXT: bltz a3, .LBB45_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: li a4, -1 |
| ; RV32-NEXT: .LBB45_2: # %entry |
| ; RV32-NEXT: lui a7, 524288 |
| ; RV32-NEXT: addi a2, a7, -1 |
| ; RV32-NEXT: mv a0, a5 |
| ; RV32-NEXT: bgeu a1, a2, .LBB45_19 |
| ; RV32-NEXT: # %bb.3: # %entry |
| ; RV32-NEXT: lw t0, 16(sp) |
| ; RV32-NEXT: bne a1, a2, .LBB45_20 |
| ; RV32-NEXT: .LBB45_4: # %entry |
| ; RV32-NEXT: or a0, t0, a3 |
| ; RV32-NEXT: bnez a0, .LBB45_21 |
| ; RV32-NEXT: .LBB45_5: # %entry |
| ; RV32-NEXT: mv a4, a1 |
| ; RV32-NEXT: bgez a3, .LBB45_22 |
| ; RV32-NEXT: .LBB45_6: # %entry |
| ; RV32-NEXT: bgeu a1, a2, .LBB45_23 |
| ; RV32-NEXT: .LBB45_7: # %entry |
| ; RV32-NEXT: bnez a0, .LBB45_24 |
| ; RV32-NEXT: .LBB45_8: # %entry |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: bnez a3, .LBB45_25 |
| ; RV32-NEXT: .LBB45_9: # %entry |
| ; RV32-NEXT: bgez a3, .LBB45_26 |
| ; RV32-NEXT: .LBB45_10: # %entry |
| ; RV32-NEXT: mv a4, a5 |
| ; RV32-NEXT: bgeu a7, a1, .LBB45_27 |
| ; RV32-NEXT: .LBB45_11: # %entry |
| ; RV32-NEXT: mv a0, a5 |
| ; RV32-NEXT: bne a1, a7, .LBB45_28 |
| ; RV32-NEXT: .LBB45_12: # %entry |
| ; RV32-NEXT: bltz a3, .LBB45_29 |
| ; RV32-NEXT: .LBB45_13: # %entry |
| ; RV32-NEXT: and a2, a2, a3 |
| ; RV32-NEXT: bne a2, a6, .LBB45_30 |
| ; RV32-NEXT: .LBB45_14: # %entry |
| ; RV32-NEXT: mv a4, a1 |
| ; RV32-NEXT: bltz a3, .LBB45_31 |
| ; RV32-NEXT: .LBB45_15: # %entry |
| ; RV32-NEXT: bgeu a7, a1, .LBB45_32 |
| ; RV32-NEXT: .LBB45_16: # %entry |
| ; RV32-NEXT: beq a2, a6, .LBB45_18 |
| ; RV32-NEXT: .LBB45_17: # %entry |
| ; RV32-NEXT: mv a1, a4 |
| ; RV32-NEXT: .LBB45_18: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; RV32-NEXT: .LBB45_19: # %entry |
| ; RV32-NEXT: li a0, -1 |
| ; RV32-NEXT: lw t0, 16(sp) |
| ; RV32-NEXT: beq a1, a2, .LBB45_4 |
| ; RV32-NEXT: .LBB45_20: # %entry |
| ; RV32-NEXT: mv a5, a0 |
| ; RV32-NEXT: or a0, t0, a3 |
| ; RV32-NEXT: beqz a0, .LBB45_5 |
| ; RV32-NEXT: .LBB45_21: # %entry |
| ; RV32-NEXT: mv a5, a4 |
| ; RV32-NEXT: mv a4, a1 |
| ; RV32-NEXT: bltz a3, .LBB45_6 |
| ; RV32-NEXT: .LBB45_22: # %entry |
| ; RV32-NEXT: mv a4, a2 |
| ; RV32-NEXT: bltu a1, a2, .LBB45_7 |
| ; RV32-NEXT: .LBB45_23: # %entry |
| ; RV32-NEXT: mv a1, a2 |
| ; RV32-NEXT: beqz a0, .LBB45_8 |
| ; RV32-NEXT: .LBB45_24: # %entry |
| ; RV32-NEXT: mv a1, a4 |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: beqz a3, .LBB45_9 |
| ; RV32-NEXT: .LBB45_25: # %entry |
| ; RV32-NEXT: srai a0, a3, 31 |
| ; RV32-NEXT: and a2, a0, t0 |
| ; RV32-NEXT: bltz a3, .LBB45_10 |
| ; RV32-NEXT: .LBB45_26: # %entry |
| ; RV32-NEXT: li a3, 0 |
| ; RV32-NEXT: mv a4, a5 |
| ; RV32-NEXT: bltu a7, a1, .LBB45_11 |
| ; RV32-NEXT: .LBB45_27: # %entry |
| ; RV32-NEXT: li a4, 0 |
| ; RV32-NEXT: mv a0, a5 |
| ; RV32-NEXT: beq a1, a7, .LBB45_12 |
| ; RV32-NEXT: .LBB45_28: # %entry |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: bgez a3, .LBB45_13 |
| ; RV32-NEXT: .LBB45_29: # %entry |
| ; RV32-NEXT: li a5, 0 |
| ; RV32-NEXT: and a2, a2, a3 |
| ; RV32-NEXT: beq a2, a6, .LBB45_14 |
| ; RV32-NEXT: .LBB45_30: # %entry |
| ; RV32-NEXT: mv a0, a5 |
| ; RV32-NEXT: mv a4, a1 |
| ; RV32-NEXT: bgez a3, .LBB45_15 |
| ; RV32-NEXT: .LBB45_31: # %entry |
| ; RV32-NEXT: lui a4, 524288 |
| ; RV32-NEXT: bltu a7, a1, .LBB45_16 |
| ; RV32-NEXT: .LBB45_32: # %entry |
| ; RV32-NEXT: lui a1, 524288 |
| ; RV32-NEXT: bne a2, a6, .LBB45_17 |
| ; RV32-NEXT: j .LBB45_18 |
| ; |
| ; RV64-LABEL: stest_f64i64_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __fixdfti@plt |
| ; RV64-NEXT: li a2, -1 |
| ; RV64-NEXT: srli a4, a2, 1 |
| ; RV64-NEXT: mv a3, a0 |
| ; RV64-NEXT: bgez a1, .LBB45_10 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: bgeu a0, a4, .LBB45_11 |
| ; RV64-NEXT: .LBB45_2: # %entry |
| ; RV64-NEXT: bnez a1, .LBB45_12 |
| ; RV64-NEXT: .LBB45_3: # %entry |
| ; RV64-NEXT: bltz a1, .LBB45_5 |
| ; RV64-NEXT: .LBB45_4: # %entry |
| ; RV64-NEXT: li a1, 0 |
| ; RV64-NEXT: .LBB45_5: # %entry |
| ; RV64-NEXT: slli a4, a2, 63 |
| ; RV64-NEXT: mv a3, a0 |
| ; RV64-NEXT: bltz a1, .LBB45_13 |
| ; RV64-NEXT: # %bb.6: # %entry |
| ; RV64-NEXT: bgeu a4, a0, .LBB45_14 |
| ; RV64-NEXT: .LBB45_7: # %entry |
| ; RV64-NEXT: beq a1, a2, .LBB45_9 |
| ; RV64-NEXT: .LBB45_8: # %entry |
| ; RV64-NEXT: mv a0, a3 |
| ; RV64-NEXT: .LBB45_9: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| ; RV64-NEXT: .LBB45_10: # %entry |
| ; RV64-NEXT: mv a3, a4 |
| ; RV64-NEXT: bltu a0, a4, .LBB45_2 |
| ; RV64-NEXT: .LBB45_11: # %entry |
| ; RV64-NEXT: mv a0, a4 |
| ; RV64-NEXT: beqz a1, .LBB45_3 |
| ; RV64-NEXT: .LBB45_12: # %entry |
| ; RV64-NEXT: mv a0, a3 |
| ; RV64-NEXT: bgez a1, .LBB45_4 |
| ; RV64-NEXT: j .LBB45_5 |
| ; RV64-NEXT: .LBB45_13: # %entry |
| ; RV64-NEXT: mv a3, a4 |
| ; RV64-NEXT: bltu a4, a0, .LBB45_7 |
| ; RV64-NEXT: .LBB45_14: # %entry |
| ; RV64-NEXT: mv a0, a4 |
| ; RV64-NEXT: bne a1, a2, .LBB45_8 |
| ; RV64-NEXT: j .LBB45_9 |
| entry: |
| %conv = fptosi double %x to i128 |
| %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 9223372036854775807) |
| %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 -9223372036854775808) |
| %conv6 = trunc i128 %spec.store.select7 to i64 |
| ret i64 %conv6 |
| } |
| |
| define i64 @utest_f64i64_mm(double %x) { |
| ; RV32-LABEL: utest_f64i64_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a2, a1 |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixunsdfti@plt |
| ; RV32-NEXT: lw a0, 20(sp) |
| ; RV32-NEXT: lw a3, 16(sp) |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: beqz a0, .LBB46_3 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: mv a2, a1 |
| ; RV32-NEXT: beq a2, a1, .LBB46_4 |
| ; RV32-NEXT: .LBB46_2: |
| ; RV32-NEXT: lw a4, 8(sp) |
| ; RV32-NEXT: j .LBB46_5 |
| ; RV32-NEXT: .LBB46_3: |
| ; RV32-NEXT: seqz a2, a3 |
| ; RV32-NEXT: bne a2, a1, .LBB46_2 |
| ; RV32-NEXT: .LBB46_4: # %entry |
| ; RV32-NEXT: mv a4, a1 |
| ; RV32-NEXT: .LBB46_5: # %entry |
| ; RV32-NEXT: xori a3, a3, 1 |
| ; RV32-NEXT: or a3, a3, a0 |
| ; RV32-NEXT: mv a0, a1 |
| ; RV32-NEXT: beq a3, a1, .LBB46_7 |
| ; RV32-NEXT: # %bb.6: # %entry |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: .LBB46_7: # %entry |
| ; RV32-NEXT: bne a2, a1, .LBB46_9 |
| ; RV32-NEXT: # %bb.8: # %entry |
| ; RV32-NEXT: mv a2, a1 |
| ; RV32-NEXT: bne a3, a1, .LBB46_10 |
| ; RV32-NEXT: j .LBB46_11 |
| ; RV32-NEXT: .LBB46_9: |
| ; RV32-NEXT: lw a2, 12(sp) |
| ; RV32-NEXT: beq a3, a1, .LBB46_11 |
| ; RV32-NEXT: .LBB46_10: # %entry |
| ; RV32-NEXT: mv a1, a2 |
| ; RV32-NEXT: .LBB46_11: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: utest_f64i64_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __fixunsdfti@plt |
| ; RV64-NEXT: mv a2, a0 |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: beqz a1, .LBB46_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a2, a0 |
| ; RV64-NEXT: .LBB46_2: # %entry |
| ; RV64-NEXT: li a3, 1 |
| ; RV64-NEXT: beq a1, a3, .LBB46_4 |
| ; RV64-NEXT: # %bb.3: # %entry |
| ; RV64-NEXT: mv a0, a2 |
| ; RV64-NEXT: .LBB46_4: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptoui double %x to i128 |
| %spec.store.select = call i128 @llvm.umin.i128(i128 %conv, i128 18446744073709551616) |
| %conv6 = trunc i128 %spec.store.select to i64 |
| ret i64 %conv6 |
| } |
| |
| define i64 @ustest_f64i64_mm(double %x) { |
| ; RV32-LABEL: ustest_f64i64_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a2, a1 |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixdfti@plt |
| ; RV32-NEXT: lw a0, 16(sp) |
| ; RV32-NEXT: lw a2, 20(sp) |
| ; RV32-NEXT: li a1, 1 |
| ; RV32-NEXT: mv a4, a0 |
| ; RV32-NEXT: bgez a2, .LBB47_5 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: mv a3, a0 |
| ; RV32-NEXT: bgeu a0, a1, .LBB47_6 |
| ; RV32-NEXT: .LBB47_2: # %entry |
| ; RV32-NEXT: beqz a2, .LBB47_7 |
| ; RV32-NEXT: .LBB47_3: # %entry |
| ; RV32-NEXT: slti a1, a2, 0 |
| ; RV32-NEXT: mv a3, a4 |
| ; RV32-NEXT: beqz a1, .LBB47_8 |
| ; RV32-NEXT: .LBB47_4: |
| ; RV32-NEXT: lw a5, 8(sp) |
| ; RV32-NEXT: j .LBB47_9 |
| ; RV32-NEXT: .LBB47_5: # %entry |
| ; RV32-NEXT: li a4, 1 |
| ; RV32-NEXT: mv a3, a0 |
| ; RV32-NEXT: bltu a0, a1, .LBB47_2 |
| ; RV32-NEXT: .LBB47_6: # %entry |
| ; RV32-NEXT: li a3, 1 |
| ; RV32-NEXT: bnez a2, .LBB47_3 |
| ; RV32-NEXT: .LBB47_7: |
| ; RV32-NEXT: seqz a1, a0 |
| ; RV32-NEXT: bnez a1, .LBB47_4 |
| ; RV32-NEXT: .LBB47_8: # %entry |
| ; RV32-NEXT: li a5, 0 |
| ; RV32-NEXT: .LBB47_9: # %entry |
| ; RV32-NEXT: xori a0, a0, 1 |
| ; RV32-NEXT: or a0, a0, a2 |
| ; RV32-NEXT: li a4, 0 |
| ; RV32-NEXT: beqz a0, .LBB47_11 |
| ; RV32-NEXT: # %bb.10: # %entry |
| ; RV32-NEXT: mv a4, a5 |
| ; RV32-NEXT: .LBB47_11: # %entry |
| ; RV32-NEXT: bnez a1, .LBB47_13 |
| ; RV32-NEXT: # %bb.12: # %entry |
| ; RV32-NEXT: li a5, 0 |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: bnez a0, .LBB47_14 |
| ; RV32-NEXT: j .LBB47_15 |
| ; RV32-NEXT: .LBB47_13: |
| ; RV32-NEXT: lw a5, 12(sp) |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: beqz a0, .LBB47_15 |
| ; RV32-NEXT: .LBB47_14: # %entry |
| ; RV32-NEXT: mv a1, a5 |
| ; RV32-NEXT: .LBB47_15: # %entry |
| ; RV32-NEXT: bgez a2, .LBB47_20 |
| ; RV32-NEXT: # %bb.16: # %entry |
| ; RV32-NEXT: mv a5, a4 |
| ; RV32-NEXT: beqz a1, .LBB47_21 |
| ; RV32-NEXT: .LBB47_17: # %entry |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: bnez a1, .LBB47_22 |
| ; RV32-NEXT: .LBB47_18: # %entry |
| ; RV32-NEXT: beqz a2, .LBB47_23 |
| ; RV32-NEXT: .LBB47_19: # %entry |
| ; RV32-NEXT: sgtz a5, a2 |
| ; RV32-NEXT: beqz a5, .LBB47_24 |
| ; RV32-NEXT: j .LBB47_25 |
| ; RV32-NEXT: .LBB47_20: # %entry |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: mv a5, a4 |
| ; RV32-NEXT: bnez a1, .LBB47_17 |
| ; RV32-NEXT: .LBB47_21: # %entry |
| ; RV32-NEXT: li a5, 0 |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: beqz a1, .LBB47_18 |
| ; RV32-NEXT: .LBB47_22: # %entry |
| ; RV32-NEXT: mv a0, a5 |
| ; RV32-NEXT: bnez a2, .LBB47_19 |
| ; RV32-NEXT: .LBB47_23: |
| ; RV32-NEXT: snez a5, a3 |
| ; RV32-NEXT: bnez a5, .LBB47_25 |
| ; RV32-NEXT: .LBB47_24: # %entry |
| ; RV32-NEXT: li a4, 0 |
| ; RV32-NEXT: .LBB47_25: # %entry |
| ; RV32-NEXT: or a2, a3, a2 |
| ; RV32-NEXT: bnez a2, .LBB47_30 |
| ; RV32-NEXT: # %bb.26: # %entry |
| ; RV32-NEXT: mv a3, a1 |
| ; RV32-NEXT: beqz a5, .LBB47_31 |
| ; RV32-NEXT: .LBB47_27: # %entry |
| ; RV32-NEXT: beqz a2, .LBB47_29 |
| ; RV32-NEXT: .LBB47_28: # %entry |
| ; RV32-NEXT: mv a1, a3 |
| ; RV32-NEXT: .LBB47_29: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; RV32-NEXT: .LBB47_30: # %entry |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: mv a3, a1 |
| ; RV32-NEXT: bnez a5, .LBB47_27 |
| ; RV32-NEXT: .LBB47_31: # %entry |
| ; RV32-NEXT: li a3, 0 |
| ; RV32-NEXT: bnez a2, .LBB47_28 |
| ; RV32-NEXT: j .LBB47_29 |
| ; |
| ; RV64-LABEL: ustest_f64i64_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __fixdfti@plt |
| ; RV64-NEXT: mv a2, a0 |
| ; RV64-NEXT: li a4, 1 |
| ; RV64-NEXT: mv a3, a1 |
| ; RV64-NEXT: bgtz a1, .LBB47_6 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: bne a1, a4, .LBB47_7 |
| ; RV64-NEXT: .LBB47_2: # %entry |
| ; RV64-NEXT: mv a1, a0 |
| ; RV64-NEXT: blez a3, .LBB47_8 |
| ; RV64-NEXT: .LBB47_3: # %entry |
| ; RV64-NEXT: beqz a3, .LBB47_5 |
| ; RV64-NEXT: .LBB47_4: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: .LBB47_5: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| ; RV64-NEXT: .LBB47_6: # %entry |
| ; RV64-NEXT: li a2, 0 |
| ; RV64-NEXT: li a3, 1 |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: beq a1, a4, .LBB47_2 |
| ; RV64-NEXT: .LBB47_7: # %entry |
| ; RV64-NEXT: mv a0, a2 |
| ; RV64-NEXT: mv a1, a0 |
| ; RV64-NEXT: bgtz a3, .LBB47_3 |
| ; RV64-NEXT: .LBB47_8: # %entry |
| ; RV64-NEXT: li a1, 0 |
| ; RV64-NEXT: bnez a3, .LBB47_4 |
| ; RV64-NEXT: j .LBB47_5 |
| entry: |
| %conv = fptosi double %x to i128 |
| %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) |
| %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 0) |
| %conv6 = trunc i128 %spec.store.select7 to i64 |
| ret i64 %conv6 |
| } |
| |
| define i64 @stest_f32i64_mm(float %x) { |
| ; RV32-LABEL: stest_f32i64_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixsfti@plt |
| ; RV32-NEXT: lw a5, 8(sp) |
| ; RV32-NEXT: lw a3, 20(sp) |
| ; RV32-NEXT: lw a1, 12(sp) |
| ; RV32-NEXT: li a6, -1 |
| ; RV32-NEXT: mv a4, a5 |
| ; RV32-NEXT: bltz a3, .LBB48_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: li a4, -1 |
| ; RV32-NEXT: .LBB48_2: # %entry |
| ; RV32-NEXT: lui a7, 524288 |
| ; RV32-NEXT: addi a2, a7, -1 |
| ; RV32-NEXT: mv a0, a5 |
| ; RV32-NEXT: bgeu a1, a2, .LBB48_19 |
| ; RV32-NEXT: # %bb.3: # %entry |
| ; RV32-NEXT: lw t0, 16(sp) |
| ; RV32-NEXT: bne a1, a2, .LBB48_20 |
| ; RV32-NEXT: .LBB48_4: # %entry |
| ; RV32-NEXT: or a0, t0, a3 |
| ; RV32-NEXT: bnez a0, .LBB48_21 |
| ; RV32-NEXT: .LBB48_5: # %entry |
| ; RV32-NEXT: mv a4, a1 |
| ; RV32-NEXT: bgez a3, .LBB48_22 |
| ; RV32-NEXT: .LBB48_6: # %entry |
| ; RV32-NEXT: bgeu a1, a2, .LBB48_23 |
| ; RV32-NEXT: .LBB48_7: # %entry |
| ; RV32-NEXT: bnez a0, .LBB48_24 |
| ; RV32-NEXT: .LBB48_8: # %entry |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: bnez a3, .LBB48_25 |
| ; RV32-NEXT: .LBB48_9: # %entry |
| ; RV32-NEXT: bgez a3, .LBB48_26 |
| ; RV32-NEXT: .LBB48_10: # %entry |
| ; RV32-NEXT: mv a4, a5 |
| ; RV32-NEXT: bgeu a7, a1, .LBB48_27 |
| ; RV32-NEXT: .LBB48_11: # %entry |
| ; RV32-NEXT: mv a0, a5 |
| ; RV32-NEXT: bne a1, a7, .LBB48_28 |
| ; RV32-NEXT: .LBB48_12: # %entry |
| ; RV32-NEXT: bltz a3, .LBB48_29 |
| ; RV32-NEXT: .LBB48_13: # %entry |
| ; RV32-NEXT: and a2, a2, a3 |
| ; RV32-NEXT: bne a2, a6, .LBB48_30 |
| ; RV32-NEXT: .LBB48_14: # %entry |
| ; RV32-NEXT: mv a4, a1 |
| ; RV32-NEXT: bltz a3, .LBB48_31 |
| ; RV32-NEXT: .LBB48_15: # %entry |
| ; RV32-NEXT: bgeu a7, a1, .LBB48_32 |
| ; RV32-NEXT: .LBB48_16: # %entry |
| ; RV32-NEXT: beq a2, a6, .LBB48_18 |
| ; RV32-NEXT: .LBB48_17: # %entry |
| ; RV32-NEXT: mv a1, a4 |
| ; RV32-NEXT: .LBB48_18: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; RV32-NEXT: .LBB48_19: # %entry |
| ; RV32-NEXT: li a0, -1 |
| ; RV32-NEXT: lw t0, 16(sp) |
| ; RV32-NEXT: beq a1, a2, .LBB48_4 |
| ; RV32-NEXT: .LBB48_20: # %entry |
| ; RV32-NEXT: mv a5, a0 |
| ; RV32-NEXT: or a0, t0, a3 |
| ; RV32-NEXT: beqz a0, .LBB48_5 |
| ; RV32-NEXT: .LBB48_21: # %entry |
| ; RV32-NEXT: mv a5, a4 |
| ; RV32-NEXT: mv a4, a1 |
| ; RV32-NEXT: bltz a3, .LBB48_6 |
| ; RV32-NEXT: .LBB48_22: # %entry |
| ; RV32-NEXT: mv a4, a2 |
| ; RV32-NEXT: bltu a1, a2, .LBB48_7 |
| ; RV32-NEXT: .LBB48_23: # %entry |
| ; RV32-NEXT: mv a1, a2 |
| ; RV32-NEXT: beqz a0, .LBB48_8 |
| ; RV32-NEXT: .LBB48_24: # %entry |
| ; RV32-NEXT: mv a1, a4 |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: beqz a3, .LBB48_9 |
| ; RV32-NEXT: .LBB48_25: # %entry |
| ; RV32-NEXT: srai a0, a3, 31 |
| ; RV32-NEXT: and a2, a0, t0 |
| ; RV32-NEXT: bltz a3, .LBB48_10 |
| ; RV32-NEXT: .LBB48_26: # %entry |
| ; RV32-NEXT: li a3, 0 |
| ; RV32-NEXT: mv a4, a5 |
| ; RV32-NEXT: bltu a7, a1, .LBB48_11 |
| ; RV32-NEXT: .LBB48_27: # %entry |
| ; RV32-NEXT: li a4, 0 |
| ; RV32-NEXT: mv a0, a5 |
| ; RV32-NEXT: beq a1, a7, .LBB48_12 |
| ; RV32-NEXT: .LBB48_28: # %entry |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: bgez a3, .LBB48_13 |
| ; RV32-NEXT: .LBB48_29: # %entry |
| ; RV32-NEXT: li a5, 0 |
| ; RV32-NEXT: and a2, a2, a3 |
| ; RV32-NEXT: beq a2, a6, .LBB48_14 |
| ; RV32-NEXT: .LBB48_30: # %entry |
| ; RV32-NEXT: mv a0, a5 |
| ; RV32-NEXT: mv a4, a1 |
| ; RV32-NEXT: bgez a3, .LBB48_15 |
| ; RV32-NEXT: .LBB48_31: # %entry |
| ; RV32-NEXT: lui a4, 524288 |
| ; RV32-NEXT: bltu a7, a1, .LBB48_16 |
| ; RV32-NEXT: .LBB48_32: # %entry |
| ; RV32-NEXT: lui a1, 524288 |
| ; RV32-NEXT: bne a2, a6, .LBB48_17 |
| ; RV32-NEXT: j .LBB48_18 |
| ; |
| ; RV64-LABEL: stest_f32i64_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __fixsfti@plt |
| ; RV64-NEXT: li a2, -1 |
| ; RV64-NEXT: srli a4, a2, 1 |
| ; RV64-NEXT: mv a3, a0 |
| ; RV64-NEXT: bgez a1, .LBB48_10 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: bgeu a0, a4, .LBB48_11 |
| ; RV64-NEXT: .LBB48_2: # %entry |
| ; RV64-NEXT: bnez a1, .LBB48_12 |
| ; RV64-NEXT: .LBB48_3: # %entry |
| ; RV64-NEXT: bltz a1, .LBB48_5 |
| ; RV64-NEXT: .LBB48_4: # %entry |
| ; RV64-NEXT: li a1, 0 |
| ; RV64-NEXT: .LBB48_5: # %entry |
| ; RV64-NEXT: slli a4, a2, 63 |
| ; RV64-NEXT: mv a3, a0 |
| ; RV64-NEXT: bltz a1, .LBB48_13 |
| ; RV64-NEXT: # %bb.6: # %entry |
| ; RV64-NEXT: bgeu a4, a0, .LBB48_14 |
| ; RV64-NEXT: .LBB48_7: # %entry |
| ; RV64-NEXT: beq a1, a2, .LBB48_9 |
| ; RV64-NEXT: .LBB48_8: # %entry |
| ; RV64-NEXT: mv a0, a3 |
| ; RV64-NEXT: .LBB48_9: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| ; RV64-NEXT: .LBB48_10: # %entry |
| ; RV64-NEXT: mv a3, a4 |
| ; RV64-NEXT: bltu a0, a4, .LBB48_2 |
| ; RV64-NEXT: .LBB48_11: # %entry |
| ; RV64-NEXT: mv a0, a4 |
| ; RV64-NEXT: beqz a1, .LBB48_3 |
| ; RV64-NEXT: .LBB48_12: # %entry |
| ; RV64-NEXT: mv a0, a3 |
| ; RV64-NEXT: bgez a1, .LBB48_4 |
| ; RV64-NEXT: j .LBB48_5 |
| ; RV64-NEXT: .LBB48_13: # %entry |
| ; RV64-NEXT: mv a3, a4 |
| ; RV64-NEXT: bltu a4, a0, .LBB48_7 |
| ; RV64-NEXT: .LBB48_14: # %entry |
| ; RV64-NEXT: mv a0, a4 |
| ; RV64-NEXT: bne a1, a2, .LBB48_8 |
| ; RV64-NEXT: j .LBB48_9 |
| entry: |
| %conv = fptosi float %x to i128 |
| %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 9223372036854775807) |
| %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 -9223372036854775808) |
| %conv6 = trunc i128 %spec.store.select7 to i64 |
| ret i64 %conv6 |
| } |
| |
| define i64 @utest_f32i64_mm(float %x) { |
| ; RV32-LABEL: utest_f32i64_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixunssfti@plt |
| ; RV32-NEXT: lw a0, 20(sp) |
| ; RV32-NEXT: lw a3, 16(sp) |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: beqz a0, .LBB49_3 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: mv a2, a1 |
| ; RV32-NEXT: beq a2, a1, .LBB49_4 |
| ; RV32-NEXT: .LBB49_2: |
| ; RV32-NEXT: lw a4, 8(sp) |
| ; RV32-NEXT: j .LBB49_5 |
| ; RV32-NEXT: .LBB49_3: |
| ; RV32-NEXT: seqz a2, a3 |
| ; RV32-NEXT: bne a2, a1, .LBB49_2 |
| ; RV32-NEXT: .LBB49_4: # %entry |
| ; RV32-NEXT: mv a4, a1 |
| ; RV32-NEXT: .LBB49_5: # %entry |
| ; RV32-NEXT: xori a3, a3, 1 |
| ; RV32-NEXT: or a3, a3, a0 |
| ; RV32-NEXT: mv a0, a1 |
| ; RV32-NEXT: beq a3, a1, .LBB49_7 |
| ; RV32-NEXT: # %bb.6: # %entry |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: .LBB49_7: # %entry |
| ; RV32-NEXT: bne a2, a1, .LBB49_9 |
| ; RV32-NEXT: # %bb.8: # %entry |
| ; RV32-NEXT: mv a2, a1 |
| ; RV32-NEXT: bne a3, a1, .LBB49_10 |
| ; RV32-NEXT: j .LBB49_11 |
| ; RV32-NEXT: .LBB49_9: |
| ; RV32-NEXT: lw a2, 12(sp) |
| ; RV32-NEXT: beq a3, a1, .LBB49_11 |
| ; RV32-NEXT: .LBB49_10: # %entry |
| ; RV32-NEXT: mv a1, a2 |
| ; RV32-NEXT: .LBB49_11: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: utest_f32i64_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __fixunssfti@plt |
| ; RV64-NEXT: mv a2, a0 |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: beqz a1, .LBB49_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a2, a0 |
| ; RV64-NEXT: .LBB49_2: # %entry |
| ; RV64-NEXT: li a3, 1 |
| ; RV64-NEXT: beq a1, a3, .LBB49_4 |
| ; RV64-NEXT: # %bb.3: # %entry |
| ; RV64-NEXT: mv a0, a2 |
| ; RV64-NEXT: .LBB49_4: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptoui float %x to i128 |
| %spec.store.select = call i128 @llvm.umin.i128(i128 %conv, i128 18446744073709551616) |
| %conv6 = trunc i128 %spec.store.select to i64 |
| ret i64 %conv6 |
| } |
| |
| define i64 @ustest_f32i64_mm(float %x) { |
| ; RV32-LABEL: ustest_f32i64_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixsfti@plt |
| ; RV32-NEXT: lw a0, 16(sp) |
| ; RV32-NEXT: lw a2, 20(sp) |
| ; RV32-NEXT: li a1, 1 |
| ; RV32-NEXT: mv a4, a0 |
| ; RV32-NEXT: bgez a2, .LBB50_5 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: mv a3, a0 |
| ; RV32-NEXT: bgeu a0, a1, .LBB50_6 |
| ; RV32-NEXT: .LBB50_2: # %entry |
| ; RV32-NEXT: beqz a2, .LBB50_7 |
| ; RV32-NEXT: .LBB50_3: # %entry |
| ; RV32-NEXT: slti a1, a2, 0 |
| ; RV32-NEXT: mv a3, a4 |
| ; RV32-NEXT: beqz a1, .LBB50_8 |
| ; RV32-NEXT: .LBB50_4: |
| ; RV32-NEXT: lw a5, 8(sp) |
| ; RV32-NEXT: j .LBB50_9 |
| ; RV32-NEXT: .LBB50_5: # %entry |
| ; RV32-NEXT: li a4, 1 |
| ; RV32-NEXT: mv a3, a0 |
| ; RV32-NEXT: bltu a0, a1, .LBB50_2 |
| ; RV32-NEXT: .LBB50_6: # %entry |
| ; RV32-NEXT: li a3, 1 |
| ; RV32-NEXT: bnez a2, .LBB50_3 |
| ; RV32-NEXT: .LBB50_7: |
| ; RV32-NEXT: seqz a1, a0 |
| ; RV32-NEXT: bnez a1, .LBB50_4 |
| ; RV32-NEXT: .LBB50_8: # %entry |
| ; RV32-NEXT: li a5, 0 |
| ; RV32-NEXT: .LBB50_9: # %entry |
| ; RV32-NEXT: xori a0, a0, 1 |
| ; RV32-NEXT: or a0, a0, a2 |
| ; RV32-NEXT: li a4, 0 |
| ; RV32-NEXT: beqz a0, .LBB50_11 |
| ; RV32-NEXT: # %bb.10: # %entry |
| ; RV32-NEXT: mv a4, a5 |
| ; RV32-NEXT: .LBB50_11: # %entry |
| ; RV32-NEXT: bnez a1, .LBB50_13 |
| ; RV32-NEXT: # %bb.12: # %entry |
| ; RV32-NEXT: li a5, 0 |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: bnez a0, .LBB50_14 |
| ; RV32-NEXT: j .LBB50_15 |
| ; RV32-NEXT: .LBB50_13: |
| ; RV32-NEXT: lw a5, 12(sp) |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: beqz a0, .LBB50_15 |
| ; RV32-NEXT: .LBB50_14: # %entry |
| ; RV32-NEXT: mv a1, a5 |
| ; RV32-NEXT: .LBB50_15: # %entry |
| ; RV32-NEXT: bgez a2, .LBB50_20 |
| ; RV32-NEXT: # %bb.16: # %entry |
| ; RV32-NEXT: mv a5, a4 |
| ; RV32-NEXT: beqz a1, .LBB50_21 |
| ; RV32-NEXT: .LBB50_17: # %entry |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: bnez a1, .LBB50_22 |
| ; RV32-NEXT: .LBB50_18: # %entry |
| ; RV32-NEXT: beqz a2, .LBB50_23 |
| ; RV32-NEXT: .LBB50_19: # %entry |
| ; RV32-NEXT: sgtz a5, a2 |
| ; RV32-NEXT: beqz a5, .LBB50_24 |
| ; RV32-NEXT: j .LBB50_25 |
| ; RV32-NEXT: .LBB50_20: # %entry |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: mv a5, a4 |
| ; RV32-NEXT: bnez a1, .LBB50_17 |
| ; RV32-NEXT: .LBB50_21: # %entry |
| ; RV32-NEXT: li a5, 0 |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: beqz a1, .LBB50_18 |
| ; RV32-NEXT: .LBB50_22: # %entry |
| ; RV32-NEXT: mv a0, a5 |
| ; RV32-NEXT: bnez a2, .LBB50_19 |
| ; RV32-NEXT: .LBB50_23: |
| ; RV32-NEXT: snez a5, a3 |
| ; RV32-NEXT: bnez a5, .LBB50_25 |
| ; RV32-NEXT: .LBB50_24: # %entry |
| ; RV32-NEXT: li a4, 0 |
| ; RV32-NEXT: .LBB50_25: # %entry |
| ; RV32-NEXT: or a2, a3, a2 |
| ; RV32-NEXT: bnez a2, .LBB50_30 |
| ; RV32-NEXT: # %bb.26: # %entry |
| ; RV32-NEXT: mv a3, a1 |
| ; RV32-NEXT: beqz a5, .LBB50_31 |
| ; RV32-NEXT: .LBB50_27: # %entry |
| ; RV32-NEXT: beqz a2, .LBB50_29 |
| ; RV32-NEXT: .LBB50_28: # %entry |
| ; RV32-NEXT: mv a1, a3 |
| ; RV32-NEXT: .LBB50_29: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; RV32-NEXT: .LBB50_30: # %entry |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: mv a3, a1 |
| ; RV32-NEXT: bnez a5, .LBB50_27 |
| ; RV32-NEXT: .LBB50_31: # %entry |
| ; RV32-NEXT: li a3, 0 |
| ; RV32-NEXT: bnez a2, .LBB50_28 |
| ; RV32-NEXT: j .LBB50_29 |
| ; |
| ; RV64-LABEL: ustest_f32i64_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __fixsfti@plt |
| ; RV64-NEXT: mv a2, a0 |
| ; RV64-NEXT: li a4, 1 |
| ; RV64-NEXT: mv a3, a1 |
| ; RV64-NEXT: bgtz a1, .LBB50_6 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: bne a1, a4, .LBB50_7 |
| ; RV64-NEXT: .LBB50_2: # %entry |
| ; RV64-NEXT: mv a1, a0 |
| ; RV64-NEXT: blez a3, .LBB50_8 |
| ; RV64-NEXT: .LBB50_3: # %entry |
| ; RV64-NEXT: beqz a3, .LBB50_5 |
| ; RV64-NEXT: .LBB50_4: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: .LBB50_5: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| ; RV64-NEXT: .LBB50_6: # %entry |
| ; RV64-NEXT: li a2, 0 |
| ; RV64-NEXT: li a3, 1 |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: beq a1, a4, .LBB50_2 |
| ; RV64-NEXT: .LBB50_7: # %entry |
| ; RV64-NEXT: mv a0, a2 |
| ; RV64-NEXT: mv a1, a0 |
| ; RV64-NEXT: bgtz a3, .LBB50_3 |
| ; RV64-NEXT: .LBB50_8: # %entry |
| ; RV64-NEXT: li a1, 0 |
| ; RV64-NEXT: bnez a3, .LBB50_4 |
| ; RV64-NEXT: j .LBB50_5 |
| entry: |
| %conv = fptosi float %x to i128 |
| %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) |
| %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 0) |
| %conv6 = trunc i128 %spec.store.select7 to i64 |
| ret i64 %conv6 |
| } |
| |
| define i64 @stest_f16i64_mm(half %x) { |
| ; RV32-LABEL: stest_f16i64_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixsfti@plt |
| ; RV32-NEXT: lw a5, 8(sp) |
| ; RV32-NEXT: lw a3, 20(sp) |
| ; RV32-NEXT: lw a1, 12(sp) |
| ; RV32-NEXT: li a6, -1 |
| ; RV32-NEXT: mv a4, a5 |
| ; RV32-NEXT: bltz a3, .LBB51_2 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: li a4, -1 |
| ; RV32-NEXT: .LBB51_2: # %entry |
| ; RV32-NEXT: lui a7, 524288 |
| ; RV32-NEXT: addi a2, a7, -1 |
| ; RV32-NEXT: mv a0, a5 |
| ; RV32-NEXT: bgeu a1, a2, .LBB51_19 |
| ; RV32-NEXT: # %bb.3: # %entry |
| ; RV32-NEXT: lw t0, 16(sp) |
| ; RV32-NEXT: bne a1, a2, .LBB51_20 |
| ; RV32-NEXT: .LBB51_4: # %entry |
| ; RV32-NEXT: or a0, t0, a3 |
| ; RV32-NEXT: bnez a0, .LBB51_21 |
| ; RV32-NEXT: .LBB51_5: # %entry |
| ; RV32-NEXT: mv a4, a1 |
| ; RV32-NEXT: bgez a3, .LBB51_22 |
| ; RV32-NEXT: .LBB51_6: # %entry |
| ; RV32-NEXT: bgeu a1, a2, .LBB51_23 |
| ; RV32-NEXT: .LBB51_7: # %entry |
| ; RV32-NEXT: bnez a0, .LBB51_24 |
| ; RV32-NEXT: .LBB51_8: # %entry |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: bnez a3, .LBB51_25 |
| ; RV32-NEXT: .LBB51_9: # %entry |
| ; RV32-NEXT: bgez a3, .LBB51_26 |
| ; RV32-NEXT: .LBB51_10: # %entry |
| ; RV32-NEXT: mv a4, a5 |
| ; RV32-NEXT: bgeu a7, a1, .LBB51_27 |
| ; RV32-NEXT: .LBB51_11: # %entry |
| ; RV32-NEXT: mv a0, a5 |
| ; RV32-NEXT: bne a1, a7, .LBB51_28 |
| ; RV32-NEXT: .LBB51_12: # %entry |
| ; RV32-NEXT: bltz a3, .LBB51_29 |
| ; RV32-NEXT: .LBB51_13: # %entry |
| ; RV32-NEXT: and a2, a2, a3 |
| ; RV32-NEXT: bne a2, a6, .LBB51_30 |
| ; RV32-NEXT: .LBB51_14: # %entry |
| ; RV32-NEXT: mv a4, a1 |
| ; RV32-NEXT: bltz a3, .LBB51_31 |
| ; RV32-NEXT: .LBB51_15: # %entry |
| ; RV32-NEXT: bgeu a7, a1, .LBB51_32 |
| ; RV32-NEXT: .LBB51_16: # %entry |
| ; RV32-NEXT: beq a2, a6, .LBB51_18 |
| ; RV32-NEXT: .LBB51_17: # %entry |
| ; RV32-NEXT: mv a1, a4 |
| ; RV32-NEXT: .LBB51_18: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; RV32-NEXT: .LBB51_19: # %entry |
| ; RV32-NEXT: li a0, -1 |
| ; RV32-NEXT: lw t0, 16(sp) |
| ; RV32-NEXT: beq a1, a2, .LBB51_4 |
| ; RV32-NEXT: .LBB51_20: # %entry |
| ; RV32-NEXT: mv a5, a0 |
| ; RV32-NEXT: or a0, t0, a3 |
| ; RV32-NEXT: beqz a0, .LBB51_5 |
| ; RV32-NEXT: .LBB51_21: # %entry |
| ; RV32-NEXT: mv a5, a4 |
| ; RV32-NEXT: mv a4, a1 |
| ; RV32-NEXT: bltz a3, .LBB51_6 |
| ; RV32-NEXT: .LBB51_22: # %entry |
| ; RV32-NEXT: mv a4, a2 |
| ; RV32-NEXT: bltu a1, a2, .LBB51_7 |
| ; RV32-NEXT: .LBB51_23: # %entry |
| ; RV32-NEXT: mv a1, a2 |
| ; RV32-NEXT: beqz a0, .LBB51_8 |
| ; RV32-NEXT: .LBB51_24: # %entry |
| ; RV32-NEXT: mv a1, a4 |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: beqz a3, .LBB51_9 |
| ; RV32-NEXT: .LBB51_25: # %entry |
| ; RV32-NEXT: srai a0, a3, 31 |
| ; RV32-NEXT: and a2, a0, t0 |
| ; RV32-NEXT: bltz a3, .LBB51_10 |
| ; RV32-NEXT: .LBB51_26: # %entry |
| ; RV32-NEXT: li a3, 0 |
| ; RV32-NEXT: mv a4, a5 |
| ; RV32-NEXT: bltu a7, a1, .LBB51_11 |
| ; RV32-NEXT: .LBB51_27: # %entry |
| ; RV32-NEXT: li a4, 0 |
| ; RV32-NEXT: mv a0, a5 |
| ; RV32-NEXT: beq a1, a7, .LBB51_12 |
| ; RV32-NEXT: .LBB51_28: # %entry |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: bgez a3, .LBB51_13 |
| ; RV32-NEXT: .LBB51_29: # %entry |
| ; RV32-NEXT: li a5, 0 |
| ; RV32-NEXT: and a2, a2, a3 |
| ; RV32-NEXT: beq a2, a6, .LBB51_14 |
| ; RV32-NEXT: .LBB51_30: # %entry |
| ; RV32-NEXT: mv a0, a5 |
| ; RV32-NEXT: mv a4, a1 |
| ; RV32-NEXT: bgez a3, .LBB51_15 |
| ; RV32-NEXT: .LBB51_31: # %entry |
| ; RV32-NEXT: lui a4, 524288 |
| ; RV32-NEXT: bltu a7, a1, .LBB51_16 |
| ; RV32-NEXT: .LBB51_32: # %entry |
| ; RV32-NEXT: lui a1, 524288 |
| ; RV32-NEXT: bne a2, a6, .LBB51_17 |
| ; RV32-NEXT: j .LBB51_18 |
| ; |
| ; RV64-LABEL: stest_f16i64_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: call __fixsfti@plt |
| ; RV64-NEXT: li a2, -1 |
| ; RV64-NEXT: srli a4, a2, 1 |
| ; RV64-NEXT: mv a3, a0 |
| ; RV64-NEXT: bgez a1, .LBB51_10 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: bgeu a0, a4, .LBB51_11 |
| ; RV64-NEXT: .LBB51_2: # %entry |
| ; RV64-NEXT: bnez a1, .LBB51_12 |
| ; RV64-NEXT: .LBB51_3: # %entry |
| ; RV64-NEXT: bltz a1, .LBB51_5 |
| ; RV64-NEXT: .LBB51_4: # %entry |
| ; RV64-NEXT: li a1, 0 |
| ; RV64-NEXT: .LBB51_5: # %entry |
| ; RV64-NEXT: slli a4, a2, 63 |
| ; RV64-NEXT: mv a3, a0 |
| ; RV64-NEXT: bltz a1, .LBB51_13 |
| ; RV64-NEXT: # %bb.6: # %entry |
| ; RV64-NEXT: bgeu a4, a0, .LBB51_14 |
| ; RV64-NEXT: .LBB51_7: # %entry |
| ; RV64-NEXT: beq a1, a2, .LBB51_9 |
| ; RV64-NEXT: .LBB51_8: # %entry |
| ; RV64-NEXT: mv a0, a3 |
| ; RV64-NEXT: .LBB51_9: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| ; RV64-NEXT: .LBB51_10: # %entry |
| ; RV64-NEXT: mv a3, a4 |
| ; RV64-NEXT: bltu a0, a4, .LBB51_2 |
| ; RV64-NEXT: .LBB51_11: # %entry |
| ; RV64-NEXT: mv a0, a4 |
| ; RV64-NEXT: beqz a1, .LBB51_3 |
| ; RV64-NEXT: .LBB51_12: # %entry |
| ; RV64-NEXT: mv a0, a3 |
| ; RV64-NEXT: bgez a1, .LBB51_4 |
| ; RV64-NEXT: j .LBB51_5 |
| ; RV64-NEXT: .LBB51_13: # %entry |
| ; RV64-NEXT: mv a3, a4 |
| ; RV64-NEXT: bltu a4, a0, .LBB51_7 |
| ; RV64-NEXT: .LBB51_14: # %entry |
| ; RV64-NEXT: mv a0, a4 |
| ; RV64-NEXT: bne a1, a2, .LBB51_8 |
| ; RV64-NEXT: j .LBB51_9 |
| entry: |
| %conv = fptosi half %x to i128 |
| %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 9223372036854775807) |
| %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 -9223372036854775808) |
| %conv6 = trunc i128 %spec.store.select7 to i64 |
| ret i64 %conv6 |
| } |
| |
| define i64 @utesth_f16i64_mm(half %x) { |
| ; RV32-LABEL: utesth_f16i64_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixunssfti@plt |
| ; RV32-NEXT: lw a0, 20(sp) |
| ; RV32-NEXT: lw a3, 16(sp) |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: beqz a0, .LBB52_3 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: mv a2, a1 |
| ; RV32-NEXT: beq a2, a1, .LBB52_4 |
| ; RV32-NEXT: .LBB52_2: |
| ; RV32-NEXT: lw a4, 8(sp) |
| ; RV32-NEXT: j .LBB52_5 |
| ; RV32-NEXT: .LBB52_3: |
| ; RV32-NEXT: seqz a2, a3 |
| ; RV32-NEXT: bne a2, a1, .LBB52_2 |
| ; RV32-NEXT: .LBB52_4: # %entry |
| ; RV32-NEXT: mv a4, a1 |
| ; RV32-NEXT: .LBB52_5: # %entry |
| ; RV32-NEXT: xori a3, a3, 1 |
| ; RV32-NEXT: or a3, a3, a0 |
| ; RV32-NEXT: mv a0, a1 |
| ; RV32-NEXT: beq a3, a1, .LBB52_7 |
| ; RV32-NEXT: # %bb.6: # %entry |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: .LBB52_7: # %entry |
| ; RV32-NEXT: bne a2, a1, .LBB52_9 |
| ; RV32-NEXT: # %bb.8: # %entry |
| ; RV32-NEXT: mv a2, a1 |
| ; RV32-NEXT: bne a3, a1, .LBB52_10 |
| ; RV32-NEXT: j .LBB52_11 |
| ; RV32-NEXT: .LBB52_9: |
| ; RV32-NEXT: lw a2, 12(sp) |
| ; RV32-NEXT: beq a3, a1, .LBB52_11 |
| ; RV32-NEXT: .LBB52_10: # %entry |
| ; RV32-NEXT: mv a1, a2 |
| ; RV32-NEXT: .LBB52_11: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: utesth_f16i64_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: call __fixunssfti@plt |
| ; RV64-NEXT: mv a2, a0 |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: beqz a1, .LBB52_2 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: mv a2, a0 |
| ; RV64-NEXT: .LBB52_2: # %entry |
| ; RV64-NEXT: li a3, 1 |
| ; RV64-NEXT: beq a1, a3, .LBB52_4 |
| ; RV64-NEXT: # %bb.3: # %entry |
| ; RV64-NEXT: mv a0, a2 |
| ; RV64-NEXT: .LBB52_4: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| entry: |
| %conv = fptoui half %x to i128 |
| %spec.store.select = call i128 @llvm.umin.i128(i128 %conv, i128 18446744073709551616) |
| %conv6 = trunc i128 %spec.store.select to i64 |
| ret i64 %conv6 |
| } |
| |
| define i64 @ustest_f16i64_mm(half %x) { |
| ; RV32-LABEL: ustest_f16i64_mm: |
| ; RV32: # %bb.0: # %entry |
| ; RV32-NEXT: addi sp, sp, -32 |
| ; RV32-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32-NEXT: .cfi_offset ra, -4 |
| ; RV32-NEXT: call __gnu_h2f_ieee@plt |
| ; RV32-NEXT: mv a1, a0 |
| ; RV32-NEXT: addi a0, sp, 8 |
| ; RV32-NEXT: call __fixsfti@plt |
| ; RV32-NEXT: lw a0, 16(sp) |
| ; RV32-NEXT: lw a2, 20(sp) |
| ; RV32-NEXT: li a1, 1 |
| ; RV32-NEXT: mv a4, a0 |
| ; RV32-NEXT: bgez a2, .LBB53_5 |
| ; RV32-NEXT: # %bb.1: # %entry |
| ; RV32-NEXT: mv a3, a0 |
| ; RV32-NEXT: bgeu a0, a1, .LBB53_6 |
| ; RV32-NEXT: .LBB53_2: # %entry |
| ; RV32-NEXT: beqz a2, .LBB53_7 |
| ; RV32-NEXT: .LBB53_3: # %entry |
| ; RV32-NEXT: slti a1, a2, 0 |
| ; RV32-NEXT: mv a3, a4 |
| ; RV32-NEXT: beqz a1, .LBB53_8 |
| ; RV32-NEXT: .LBB53_4: |
| ; RV32-NEXT: lw a5, 8(sp) |
| ; RV32-NEXT: j .LBB53_9 |
| ; RV32-NEXT: .LBB53_5: # %entry |
| ; RV32-NEXT: li a4, 1 |
| ; RV32-NEXT: mv a3, a0 |
| ; RV32-NEXT: bltu a0, a1, .LBB53_2 |
| ; RV32-NEXT: .LBB53_6: # %entry |
| ; RV32-NEXT: li a3, 1 |
| ; RV32-NEXT: bnez a2, .LBB53_3 |
| ; RV32-NEXT: .LBB53_7: |
| ; RV32-NEXT: seqz a1, a0 |
| ; RV32-NEXT: bnez a1, .LBB53_4 |
| ; RV32-NEXT: .LBB53_8: # %entry |
| ; RV32-NEXT: li a5, 0 |
| ; RV32-NEXT: .LBB53_9: # %entry |
| ; RV32-NEXT: xori a0, a0, 1 |
| ; RV32-NEXT: or a0, a0, a2 |
| ; RV32-NEXT: li a4, 0 |
| ; RV32-NEXT: beqz a0, .LBB53_11 |
| ; RV32-NEXT: # %bb.10: # %entry |
| ; RV32-NEXT: mv a4, a5 |
| ; RV32-NEXT: .LBB53_11: # %entry |
| ; RV32-NEXT: bnez a1, .LBB53_13 |
| ; RV32-NEXT: # %bb.12: # %entry |
| ; RV32-NEXT: li a5, 0 |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: bnez a0, .LBB53_14 |
| ; RV32-NEXT: j .LBB53_15 |
| ; RV32-NEXT: .LBB53_13: |
| ; RV32-NEXT: lw a5, 12(sp) |
| ; RV32-NEXT: li a1, 0 |
| ; RV32-NEXT: beqz a0, .LBB53_15 |
| ; RV32-NEXT: .LBB53_14: # %entry |
| ; RV32-NEXT: mv a1, a5 |
| ; RV32-NEXT: .LBB53_15: # %entry |
| ; RV32-NEXT: bgez a2, .LBB53_20 |
| ; RV32-NEXT: # %bb.16: # %entry |
| ; RV32-NEXT: mv a5, a4 |
| ; RV32-NEXT: beqz a1, .LBB53_21 |
| ; RV32-NEXT: .LBB53_17: # %entry |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: bnez a1, .LBB53_22 |
| ; RV32-NEXT: .LBB53_18: # %entry |
| ; RV32-NEXT: beqz a2, .LBB53_23 |
| ; RV32-NEXT: .LBB53_19: # %entry |
| ; RV32-NEXT: sgtz a5, a2 |
| ; RV32-NEXT: beqz a5, .LBB53_24 |
| ; RV32-NEXT: j .LBB53_25 |
| ; RV32-NEXT: .LBB53_20: # %entry |
| ; RV32-NEXT: li a2, 0 |
| ; RV32-NEXT: mv a5, a4 |
| ; RV32-NEXT: bnez a1, .LBB53_17 |
| ; RV32-NEXT: .LBB53_21: # %entry |
| ; RV32-NEXT: li a5, 0 |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: beqz a1, .LBB53_18 |
| ; RV32-NEXT: .LBB53_22: # %entry |
| ; RV32-NEXT: mv a0, a5 |
| ; RV32-NEXT: bnez a2, .LBB53_19 |
| ; RV32-NEXT: .LBB53_23: |
| ; RV32-NEXT: snez a5, a3 |
| ; RV32-NEXT: bnez a5, .LBB53_25 |
| ; RV32-NEXT: .LBB53_24: # %entry |
| ; RV32-NEXT: li a4, 0 |
| ; RV32-NEXT: .LBB53_25: # %entry |
| ; RV32-NEXT: or a2, a3, a2 |
| ; RV32-NEXT: bnez a2, .LBB53_30 |
| ; RV32-NEXT: # %bb.26: # %entry |
| ; RV32-NEXT: mv a3, a1 |
| ; RV32-NEXT: beqz a5, .LBB53_31 |
| ; RV32-NEXT: .LBB53_27: # %entry |
| ; RV32-NEXT: beqz a2, .LBB53_29 |
| ; RV32-NEXT: .LBB53_28: # %entry |
| ; RV32-NEXT: mv a1, a3 |
| ; RV32-NEXT: .LBB53_29: # %entry |
| ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32-NEXT: addi sp, sp, 32 |
| ; RV32-NEXT: ret |
| ; RV32-NEXT: .LBB53_30: # %entry |
| ; RV32-NEXT: mv a0, a4 |
| ; RV32-NEXT: mv a3, a1 |
| ; RV32-NEXT: bnez a5, .LBB53_27 |
| ; RV32-NEXT: .LBB53_31: # %entry |
| ; RV32-NEXT: li a3, 0 |
| ; RV32-NEXT: bnez a2, .LBB53_28 |
| ; RV32-NEXT: j .LBB53_29 |
| ; |
| ; RV64-LABEL: ustest_f16i64_mm: |
| ; RV64: # %bb.0: # %entry |
| ; RV64-NEXT: addi sp, sp, -16 |
| ; RV64-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64-NEXT: .cfi_offset ra, -8 |
| ; RV64-NEXT: call __gnu_h2f_ieee@plt |
| ; RV64-NEXT: call __fixsfti@plt |
| ; RV64-NEXT: mv a2, a0 |
| ; RV64-NEXT: li a4, 1 |
| ; RV64-NEXT: mv a3, a1 |
| ; RV64-NEXT: bgtz a1, .LBB53_6 |
| ; RV64-NEXT: # %bb.1: # %entry |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: bne a1, a4, .LBB53_7 |
| ; RV64-NEXT: .LBB53_2: # %entry |
| ; RV64-NEXT: mv a1, a0 |
| ; RV64-NEXT: blez a3, .LBB53_8 |
| ; RV64-NEXT: .LBB53_3: # %entry |
| ; RV64-NEXT: beqz a3, .LBB53_5 |
| ; RV64-NEXT: .LBB53_4: # %entry |
| ; RV64-NEXT: mv a0, a1 |
| ; RV64-NEXT: .LBB53_5: # %entry |
| ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64-NEXT: addi sp, sp, 16 |
| ; RV64-NEXT: ret |
| ; RV64-NEXT: .LBB53_6: # %entry |
| ; RV64-NEXT: li a2, 0 |
| ; RV64-NEXT: li a3, 1 |
| ; RV64-NEXT: li a0, 0 |
| ; RV64-NEXT: beq a1, a4, .LBB53_2 |
| ; RV64-NEXT: .LBB53_7: # %entry |
| ; RV64-NEXT: mv a0, a2 |
| ; RV64-NEXT: mv a1, a0 |
| ; RV64-NEXT: bgtz a3, .LBB53_3 |
| ; RV64-NEXT: .LBB53_8: # %entry |
| ; RV64-NEXT: li a1, 0 |
| ; RV64-NEXT: bnez a3, .LBB53_4 |
| ; RV64-NEXT: j .LBB53_5 |
| entry: |
| %conv = fptosi half %x to i128 |
| %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) |
| %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 0) |
| %conv6 = trunc i128 %spec.store.select7 to i64 |
| ret i64 %conv6 |
| } |
| |
| declare i32 @llvm.smin.i32(i32, i32) |
| declare i32 @llvm.smax.i32(i32, i32) |
| declare i32 @llvm.umin.i32(i32, i32) |
| declare i64 @llvm.smin.i64(i64, i64) |
| declare i64 @llvm.smax.i64(i64, i64) |
| declare i64 @llvm.umin.i64(i64, i64) |
| declare i128 @llvm.smin.i128(i128, i128) |
| declare i128 @llvm.smax.i128(i128, i128) |
| declare i128 @llvm.umin.i128(i128, i128) |