| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
 | ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \ | 
 | ; RUN:   -target-abi=ilp32f | FileCheck -check-prefixes=RV32,RV32IF %s | 
 | ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \ | 
 | ; RUN:   -target-abi=lp64f | FileCheck -check-prefixes=RV64,RV64IF %s | 
 | ; RUN: llc -mtriple=riscv32 -mattr=+f,+d -verify-machineinstrs < %s \ | 
 | ; RUN:   -target-abi=ilp32d | FileCheck -check-prefixes=RV32,RV32IFD %s | 
 | ; RUN: llc -mtriple=riscv64 -mattr=+f,+d -verify-machineinstrs < %s \ | 
 | ; RUN:   -target-abi=lp64d | FileCheck -check-prefixes=RV64,RV64IFD %s | 
 |  | 
 | ; i32 saturate | 
 |  | 
 | define i32 @stest_f64i32(double %x) { | 
 | ; RV32IF-LABEL: stest_f64i32: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    call __fixdfdi | 
 | ; RV32IF-NEXT:    lui a2, 524288 | 
 | ; RV32IF-NEXT:    addi a3, a2, -1 | 
 | ; RV32IF-NEXT:    beqz a1, .LBB0_2 | 
 | ; RV32IF-NEXT:  # %bb.1: # %entry | 
 | ; RV32IF-NEXT:    slti a4, a1, 0 | 
 | ; RV32IF-NEXT:    j .LBB0_3 | 
 | ; RV32IF-NEXT:  .LBB0_2: | 
 | ; RV32IF-NEXT:    sltu a4, a0, a3 | 
 | ; RV32IF-NEXT:  .LBB0_3: # %entry | 
 | ; RV32IF-NEXT:    neg a5, a4 | 
 | ; RV32IF-NEXT:    and a1, a5, a1 | 
 | ; RV32IF-NEXT:    bnez a4, .LBB0_5 | 
 | ; RV32IF-NEXT:  # %bb.4: # %entry | 
 | ; RV32IF-NEXT:    mv a0, a3 | 
 | ; RV32IF-NEXT:  .LBB0_5: # %entry | 
 | ; RV32IF-NEXT:    li a3, -1 | 
 | ; RV32IF-NEXT:    beq a1, a3, .LBB0_7 | 
 | ; RV32IF-NEXT:  # %bb.6: # %entry | 
 | ; RV32IF-NEXT:    slti a1, a1, 0 | 
 | ; RV32IF-NEXT:    xori a1, a1, 1 | 
 | ; RV32IF-NEXT:    beqz a1, .LBB0_8 | 
 | ; RV32IF-NEXT:    j .LBB0_9 | 
 | ; RV32IF-NEXT:  .LBB0_7: | 
 | ; RV32IF-NEXT:    sltu a1, a2, a0 | 
 | ; RV32IF-NEXT:    bnez a1, .LBB0_9 | 
 | ; RV32IF-NEXT:  .LBB0_8: # %entry | 
 | ; RV32IF-NEXT:    lui a0, 524288 | 
 | ; RV32IF-NEXT:  .LBB0_9: # %entry | 
 | ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64IF-LABEL: stest_f64i32: | 
 | ; RV64IF:       # %bb.0: # %entry | 
 | ; RV64IF-NEXT:    addi sp, sp, -16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64IF-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64IF-NEXT:    call __fixdfdi | 
 | ; RV64IF-NEXT:    lui a1, 524288 | 
 | ; RV64IF-NEXT:    addiw a2, a1, -1 | 
 | ; RV64IF-NEXT:    blt a0, a2, .LBB0_2 | 
 | ; RV64IF-NEXT:  # %bb.1: # %entry | 
 | ; RV64IF-NEXT:    mv a0, a2 | 
 | ; RV64IF-NEXT:  .LBB0_2: # %entry | 
 | ; RV64IF-NEXT:    blt a1, a0, .LBB0_4 | 
 | ; RV64IF-NEXT:  # %bb.3: # %entry | 
 | ; RV64IF-NEXT:    lui a0, 524288 | 
 | ; RV64IF-NEXT:  .LBB0_4: # %entry | 
 | ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64IF-NEXT:    .cfi_restore ra | 
 | ; RV64IF-NEXT:    addi sp, sp, 16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64IF-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: stest_f64i32: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rtz | 
 | ; RV32IFD-NEXT:    feq.d a1, fa0, fa0 | 
 | ; RV32IFD-NEXT:    seqz a1, a1 | 
 | ; RV32IFD-NEXT:    addi a1, a1, -1 | 
 | ; RV32IFD-NEXT:    and a0, a1, a0 | 
 | ; RV32IFD-NEXT:    ret | 
 | ; | 
 | ; RV64IFD-LABEL: stest_f64i32: | 
 | ; RV64IFD:       # %bb.0: # %entry | 
 | ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz | 
 | ; RV64IFD-NEXT:    lui a1, 524288 | 
 | ; RV64IFD-NEXT:    addiw a2, a1, -1 | 
 | ; RV64IFD-NEXT:    bge a0, a2, .LBB0_3 | 
 | ; RV64IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV64IFD-NEXT:    bge a1, a0, .LBB0_4 | 
 | ; RV64IFD-NEXT:  .LBB0_2: # %entry | 
 | ; RV64IFD-NEXT:    ret | 
 | ; RV64IFD-NEXT:  .LBB0_3: # %entry | 
 | ; RV64IFD-NEXT:    mv a0, a2 | 
 | ; RV64IFD-NEXT:    blt a1, a2, .LBB0_2 | 
 | ; RV64IFD-NEXT:  .LBB0_4: # %entry | 
 | ; RV64IFD-NEXT:    lui a0, 524288 | 
 | ; RV64IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi double %x to i64 | 
 |   %0 = icmp slt i64 %conv, 2147483647 | 
 |   %spec.store.select = select i1 %0, i64 %conv, i64 2147483647 | 
 |   %1 = icmp sgt i64 %spec.store.select, -2147483648 | 
 |   %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 -2147483648 | 
 |   %conv6 = trunc i64 %spec.store.select7 to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | define i32 @utest_f64i32(double %x) { | 
 | ; RV32IF-LABEL: utest_f64i32: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    call __fixunsdfdi | 
 | ; RV32IF-NEXT:    sltiu a2, a0, -1 | 
 | ; RV32IF-NEXT:    seqz a1, a1 | 
 | ; RV32IF-NEXT:    and a1, a1, a2 | 
 | ; RV32IF-NEXT:    addi a1, a1, -1 | 
 | ; RV32IF-NEXT:    or a0, a1, a0 | 
 | ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64IF-LABEL: utest_f64i32: | 
 | ; RV64IF:       # %bb.0: # %entry | 
 | ; RV64IF-NEXT:    addi sp, sp, -16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64IF-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64IF-NEXT:    call __fixunsdfdi | 
 | ; RV64IF-NEXT:    li a1, -1 | 
 | ; RV64IF-NEXT:    srli a1, a1, 32 | 
 | ; RV64IF-NEXT:    bltu a0, a1, .LBB1_2 | 
 | ; RV64IF-NEXT:  # %bb.1: # %entry | 
 | ; RV64IF-NEXT:    mv a0, a1 | 
 | ; RV64IF-NEXT:  .LBB1_2: # %entry | 
 | ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64IF-NEXT:    .cfi_restore ra | 
 | ; RV64IF-NEXT:    addi sp, sp, 16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64IF-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: utest_f64i32: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz | 
 | ; RV32IFD-NEXT:    feq.d a1, fa0, fa0 | 
 | ; RV32IFD-NEXT:    seqz a1, a1 | 
 | ; RV32IFD-NEXT:    addi a1, a1, -1 | 
 | ; RV32IFD-NEXT:    and a0, a1, a0 | 
 | ; RV32IFD-NEXT:    ret | 
 | ; | 
 | ; RV64IFD-LABEL: utest_f64i32: | 
 | ; RV64IFD:       # %bb.0: # %entry | 
 | ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz | 
 | ; RV64IFD-NEXT:    li a1, -1 | 
 | ; RV64IFD-NEXT:    srli a1, a1, 32 | 
 | ; RV64IFD-NEXT:    bltu a0, a1, .LBB1_2 | 
 | ; RV64IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV64IFD-NEXT:    mv a0, a1 | 
 | ; RV64IFD-NEXT:  .LBB1_2: # %entry | 
 | ; RV64IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui double %x to i64 | 
 |   %0 = icmp ult i64 %conv, 4294967295 | 
 |   %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 | 
 |   %conv6 = trunc i64 %spec.store.select to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | define i32 @ustest_f64i32(double %x) { | 
 | ; RV32IF-LABEL: ustest_f64i32: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    call __fixdfdi | 
 | ; RV32IF-NEXT:    beqz a1, .LBB2_2 | 
 | ; RV32IF-NEXT:  # %bb.1: # %entry | 
 | ; RV32IF-NEXT:    slti a2, a1, 0 | 
 | ; RV32IF-NEXT:    j .LBB2_3 | 
 | ; RV32IF-NEXT:  .LBB2_2: | 
 | ; RV32IF-NEXT:    sltiu a2, a0, -1 | 
 | ; RV32IF-NEXT:  .LBB2_3: # %entry | 
 | ; RV32IF-NEXT:    addi a3, a2, -1 | 
 | ; RV32IF-NEXT:    neg a2, a2 | 
 | ; RV32IF-NEXT:    and a1, a2, a1 | 
 | ; RV32IF-NEXT:    or a0, a3, a0 | 
 | ; RV32IF-NEXT:    beqz a1, .LBB2_5 | 
 | ; RV32IF-NEXT:  # %bb.4: # %entry | 
 | ; RV32IF-NEXT:    sgtz a1, a1 | 
 | ; RV32IF-NEXT:    j .LBB2_6 | 
 | ; RV32IF-NEXT:  .LBB2_5: | 
 | ; RV32IF-NEXT:    snez a1, a0 | 
 | ; RV32IF-NEXT:  .LBB2_6: # %entry | 
 | ; RV32IF-NEXT:    neg a1, a1 | 
 | ; RV32IF-NEXT:    and a0, a1, a0 | 
 | ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64IF-LABEL: ustest_f64i32: | 
 | ; RV64IF:       # %bb.0: # %entry | 
 | ; RV64IF-NEXT:    addi sp, sp, -16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64IF-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64IF-NEXT:    call __fixdfdi | 
 | ; RV64IF-NEXT:    li a1, -1 | 
 | ; RV64IF-NEXT:    srli a1, a1, 32 | 
 | ; RV64IF-NEXT:    blt a0, a1, .LBB2_2 | 
 | ; RV64IF-NEXT:  # %bb.1: # %entry | 
 | ; RV64IF-NEXT:    mv a0, a1 | 
 | ; RV64IF-NEXT:  .LBB2_2: # %entry | 
 | ; RV64IF-NEXT:    sgtz a1, a0 | 
 | ; RV64IF-NEXT:    neg a1, a1 | 
 | ; RV64IF-NEXT:    and a0, a1, a0 | 
 | ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64IF-NEXT:    .cfi_restore ra | 
 | ; RV64IF-NEXT:    addi sp, sp, 16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64IF-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: ustest_f64i32: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz | 
 | ; RV32IFD-NEXT:    feq.d a1, fa0, fa0 | 
 | ; RV32IFD-NEXT:    seqz a1, a1 | 
 | ; RV32IFD-NEXT:    addi a1, a1, -1 | 
 | ; RV32IFD-NEXT:    and a0, a1, a0 | 
 | ; RV32IFD-NEXT:    ret | 
 | ; | 
 | ; RV64IFD-LABEL: ustest_f64i32: | 
 | ; RV64IFD:       # %bb.0: # %entry | 
 | ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz | 
 | ; RV64IFD-NEXT:    li a1, -1 | 
 | ; RV64IFD-NEXT:    srli a1, a1, 32 | 
 | ; RV64IFD-NEXT:    blt a0, a1, .LBB2_2 | 
 | ; RV64IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV64IFD-NEXT:    mv a0, a1 | 
 | ; RV64IFD-NEXT:  .LBB2_2: # %entry | 
 | ; RV64IFD-NEXT:    sgtz a1, a0 | 
 | ; RV64IFD-NEXT:    neg a1, a1 | 
 | ; RV64IFD-NEXT:    and a0, a1, a0 | 
 | ; RV64IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi double %x to i64 | 
 |   %0 = icmp slt i64 %conv, 4294967295 | 
 |   %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 | 
 |   %1 = icmp sgt i64 %spec.store.select, 0 | 
 |   %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 0 | 
 |   %conv6 = trunc i64 %spec.store.select7 to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | define i32 @stest_f32i32(float %x) { | 
 | ; RV32-LABEL: stest_f32i32: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    fcvt.w.s a0, fa0, rtz | 
 | ; RV32-NEXT:    feq.s a1, fa0, fa0 | 
 | ; RV32-NEXT:    seqz a1, a1 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    and a0, a1, a0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: stest_f32i32: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz | 
 | ; RV64-NEXT:    lui a1, 524288 | 
 | ; RV64-NEXT:    addiw a2, a1, -1 | 
 | ; RV64-NEXT:    bge a0, a2, .LBB3_3 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    bge a1, a0, .LBB3_4 | 
 | ; RV64-NEXT:  .LBB3_2: # %entry | 
 | ; RV64-NEXT:    ret | 
 | ; RV64-NEXT:  .LBB3_3: # %entry | 
 | ; RV64-NEXT:    mv a0, a2 | 
 | ; RV64-NEXT:    blt a1, a2, .LBB3_2 | 
 | ; RV64-NEXT:  .LBB3_4: # %entry | 
 | ; RV64-NEXT:    lui a0, 524288 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi float %x to i64 | 
 |   %0 = icmp slt i64 %conv, 2147483647 | 
 |   %spec.store.select = select i1 %0, i64 %conv, i64 2147483647 | 
 |   %1 = icmp sgt i64 %spec.store.select, -2147483648 | 
 |   %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 -2147483648 | 
 |   %conv6 = trunc i64 %spec.store.select7 to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | define i32 @utest_f32i32(float %x) { | 
 | ; RV32-LABEL: utest_f32i32: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    fcvt.wu.s a0, fa0, rtz | 
 | ; RV32-NEXT:    feq.s a1, fa0, fa0 | 
 | ; RV32-NEXT:    seqz a1, a1 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    and a0, a1, a0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: utest_f32i32: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    fcvt.lu.s a0, fa0, rtz | 
 | ; RV64-NEXT:    li a1, -1 | 
 | ; RV64-NEXT:    srli a1, a1, 32 | 
 | ; RV64-NEXT:    bltu a0, a1, .LBB4_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB4_2: # %entry | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui float %x to i64 | 
 |   %0 = icmp ult i64 %conv, 4294967295 | 
 |   %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 | 
 |   %conv6 = trunc i64 %spec.store.select to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | define i32 @ustest_f32i32(float %x) { | 
 | ; RV32-LABEL: ustest_f32i32: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    fcvt.wu.s a0, fa0, rtz | 
 | ; RV32-NEXT:    feq.s a1, fa0, fa0 | 
 | ; RV32-NEXT:    seqz a1, a1 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    and a0, a1, a0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: ustest_f32i32: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz | 
 | ; RV64-NEXT:    li a1, -1 | 
 | ; RV64-NEXT:    srli a1, a1, 32 | 
 | ; RV64-NEXT:    blt a0, a1, .LBB5_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB5_2: # %entry | 
 | ; RV64-NEXT:    sgtz a1, a0 | 
 | ; RV64-NEXT:    neg a1, a1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi float %x to i64 | 
 |   %0 = icmp slt i64 %conv, 4294967295 | 
 |   %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 | 
 |   %1 = icmp sgt i64 %spec.store.select, 0 | 
 |   %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 0 | 
 |   %conv6 = trunc i64 %spec.store.select7 to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | define i32 @stest_f16i32(half %x) { | 
 | ; RV32-LABEL: stest_f16i32: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    call __fixsfdi | 
 | ; RV32-NEXT:    lui a2, 524288 | 
 | ; RV32-NEXT:    addi a3, a2, -1 | 
 | ; RV32-NEXT:    beqz a1, .LBB6_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    slti a4, a1, 0 | 
 | ; RV32-NEXT:    j .LBB6_3 | 
 | ; RV32-NEXT:  .LBB6_2: | 
 | ; RV32-NEXT:    sltu a4, a0, a3 | 
 | ; RV32-NEXT:  .LBB6_3: # %entry | 
 | ; RV32-NEXT:    neg a5, a4 | 
 | ; RV32-NEXT:    and a1, a5, a1 | 
 | ; RV32-NEXT:    bnez a4, .LBB6_5 | 
 | ; RV32-NEXT:  # %bb.4: # %entry | 
 | ; RV32-NEXT:    mv a0, a3 | 
 | ; RV32-NEXT:  .LBB6_5: # %entry | 
 | ; RV32-NEXT:    li a3, -1 | 
 | ; RV32-NEXT:    beq a1, a3, .LBB6_7 | 
 | ; RV32-NEXT:  # %bb.6: # %entry | 
 | ; RV32-NEXT:    slti a1, a1, 0 | 
 | ; RV32-NEXT:    xori a1, a1, 1 | 
 | ; RV32-NEXT:    beqz a1, .LBB6_8 | 
 | ; RV32-NEXT:    j .LBB6_9 | 
 | ; RV32-NEXT:  .LBB6_7: | 
 | ; RV32-NEXT:    sltu a1, a2, a0 | 
 | ; RV32-NEXT:    bnez a1, .LBB6_9 | 
 | ; RV32-NEXT:  .LBB6_8: # %entry | 
 | ; RV32-NEXT:    lui a0, 524288 | 
 | ; RV32-NEXT:  .LBB6_9: # %entry | 
 | ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: stest_f16i32: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz | 
 | ; RV64-NEXT:    lui a1, 524288 | 
 | ; RV64-NEXT:    addiw a2, a1, -1 | 
 | ; RV64-NEXT:    blt a0, a2, .LBB6_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a2 | 
 | ; RV64-NEXT:  .LBB6_2: # %entry | 
 | ; RV64-NEXT:    blt a1, a0, .LBB6_4 | 
 | ; RV64-NEXT:  # %bb.3: # %entry | 
 | ; RV64-NEXT:    lui a0, 524288 | 
 | ; RV64-NEXT:  .LBB6_4: # %entry | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi half %x to i64 | 
 |   %0 = icmp slt i64 %conv, 2147483647 | 
 |   %spec.store.select = select i1 %0, i64 %conv, i64 2147483647 | 
 |   %1 = icmp sgt i64 %spec.store.select, -2147483648 | 
 |   %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 -2147483648 | 
 |   %conv6 = trunc i64 %spec.store.select7 to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | define i32 @utesth_f16i32(half %x) { | 
 | ; RV32-LABEL: utesth_f16i32: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    call __fixunssfdi | 
 | ; RV32-NEXT:    sltiu a2, a0, -1 | 
 | ; RV32-NEXT:    seqz a1, a1 | 
 | ; RV32-NEXT:    and a1, a1, a2 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    or a0, a1, a0 | 
 | ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: utesth_f16i32: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    fcvt.lu.s a0, fa0, rtz | 
 | ; RV64-NEXT:    li a1, -1 | 
 | ; RV64-NEXT:    srli a1, a1, 32 | 
 | ; RV64-NEXT:    bltu a0, a1, .LBB7_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB7_2: # %entry | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui half %x to i64 | 
 |   %0 = icmp ult i64 %conv, 4294967295 | 
 |   %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 | 
 |   %conv6 = trunc i64 %spec.store.select to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | define i32 @ustest_f16i32(half %x) { | 
 | ; RV32-LABEL: ustest_f16i32: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    call __fixsfdi | 
 | ; RV32-NEXT:    beqz a1, .LBB8_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    slti a2, a1, 0 | 
 | ; RV32-NEXT:    j .LBB8_3 | 
 | ; RV32-NEXT:  .LBB8_2: | 
 | ; RV32-NEXT:    sltiu a2, a0, -1 | 
 | ; RV32-NEXT:  .LBB8_3: # %entry | 
 | ; RV32-NEXT:    addi a3, a2, -1 | 
 | ; RV32-NEXT:    neg a2, a2 | 
 | ; RV32-NEXT:    and a1, a2, a1 | 
 | ; RV32-NEXT:    or a0, a3, a0 | 
 | ; RV32-NEXT:    beqz a1, .LBB8_5 | 
 | ; RV32-NEXT:  # %bb.4: # %entry | 
 | ; RV32-NEXT:    sgtz a1, a1 | 
 | ; RV32-NEXT:    j .LBB8_6 | 
 | ; RV32-NEXT:  .LBB8_5: | 
 | ; RV32-NEXT:    snez a1, a0 | 
 | ; RV32-NEXT:  .LBB8_6: # %entry | 
 | ; RV32-NEXT:    neg a1, a1 | 
 | ; RV32-NEXT:    and a0, a1, a0 | 
 | ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: ustest_f16i32: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz | 
 | ; RV64-NEXT:    li a1, -1 | 
 | ; RV64-NEXT:    srli a1, a1, 32 | 
 | ; RV64-NEXT:    blt a0, a1, .LBB8_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB8_2: # %entry | 
 | ; RV64-NEXT:    sgtz a1, a0 | 
 | ; RV64-NEXT:    neg a1, a1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi half %x to i64 | 
 |   %0 = icmp slt i64 %conv, 4294967295 | 
 |   %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 | 
 |   %1 = icmp sgt i64 %spec.store.select, 0 | 
 |   %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 0 | 
 |   %conv6 = trunc i64 %spec.store.select7 to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | ; i16 saturate | 
 |  | 
 | define i16 @stest_f64i16(double %x) { | 
 | ; RV32IF-LABEL: stest_f64i16: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    call __fixdfsi | 
 | ; RV32IF-NEXT:    lui a1, 8 | 
 | ; RV32IF-NEXT:    addi a1, a1, -1 | 
 | ; RV32IF-NEXT:    blt a0, a1, .LBB9_2 | 
 | ; RV32IF-NEXT:  # %bb.1: # %entry | 
 | ; RV32IF-NEXT:    mv a0, a1 | 
 | ; RV32IF-NEXT:  .LBB9_2: # %entry | 
 | ; RV32IF-NEXT:    lui a1, 1048568 | 
 | ; RV32IF-NEXT:    blt a1, a0, .LBB9_4 | 
 | ; RV32IF-NEXT:  # %bb.3: # %entry | 
 | ; RV32IF-NEXT:    lui a0, 1048568 | 
 | ; RV32IF-NEXT:  .LBB9_4: # %entry | 
 | ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64IF-LABEL: stest_f64i16: | 
 | ; RV64IF:       # %bb.0: # %entry | 
 | ; RV64IF-NEXT:    addi sp, sp, -16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64IF-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64IF-NEXT:    call __fixdfsi | 
 | ; RV64IF-NEXT:    lui a1, 8 | 
 | ; RV64IF-NEXT:    addiw a1, a1, -1 | 
 | ; RV64IF-NEXT:    blt a0, a1, .LBB9_2 | 
 | ; RV64IF-NEXT:  # %bb.1: # %entry | 
 | ; RV64IF-NEXT:    mv a0, a1 | 
 | ; RV64IF-NEXT:  .LBB9_2: # %entry | 
 | ; RV64IF-NEXT:    lui a1, 1048568 | 
 | ; RV64IF-NEXT:    blt a1, a0, .LBB9_4 | 
 | ; RV64IF-NEXT:  # %bb.3: # %entry | 
 | ; RV64IF-NEXT:    lui a0, 1048568 | 
 | ; RV64IF-NEXT:  .LBB9_4: # %entry | 
 | ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64IF-NEXT:    .cfi_restore ra | 
 | ; RV64IF-NEXT:    addi sp, sp, 16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64IF-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: stest_f64i16: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rtz | 
 | ; RV32IFD-NEXT:    lui a1, 8 | 
 | ; RV32IFD-NEXT:    addi a1, a1, -1 | 
 | ; RV32IFD-NEXT:    bge a0, a1, .LBB9_3 | 
 | ; RV32IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV32IFD-NEXT:    lui a1, 1048568 | 
 | ; RV32IFD-NEXT:    bge a1, a0, .LBB9_4 | 
 | ; RV32IFD-NEXT:  .LBB9_2: # %entry | 
 | ; RV32IFD-NEXT:    ret | 
 | ; RV32IFD-NEXT:  .LBB9_3: # %entry | 
 | ; RV32IFD-NEXT:    mv a0, a1 | 
 | ; RV32IFD-NEXT:    lui a1, 1048568 | 
 | ; RV32IFD-NEXT:    blt a1, a0, .LBB9_2 | 
 | ; RV32IFD-NEXT:  .LBB9_4: # %entry | 
 | ; RV32IFD-NEXT:    lui a0, 1048568 | 
 | ; RV32IFD-NEXT:    ret | 
 | ; | 
 | ; RV64IFD-LABEL: stest_f64i16: | 
 | ; RV64IFD:       # %bb.0: # %entry | 
 | ; RV64IFD-NEXT:    fcvt.w.d a0, fa0, rtz | 
 | ; RV64IFD-NEXT:    lui a1, 8 | 
 | ; RV64IFD-NEXT:    addiw a1, a1, -1 | 
 | ; RV64IFD-NEXT:    bge a0, a1, .LBB9_3 | 
 | ; RV64IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV64IFD-NEXT:    lui a1, 1048568 | 
 | ; RV64IFD-NEXT:    bge a1, a0, .LBB9_4 | 
 | ; RV64IFD-NEXT:  .LBB9_2: # %entry | 
 | ; RV64IFD-NEXT:    ret | 
 | ; RV64IFD-NEXT:  .LBB9_3: # %entry | 
 | ; RV64IFD-NEXT:    mv a0, a1 | 
 | ; RV64IFD-NEXT:    lui a1, 1048568 | 
 | ; RV64IFD-NEXT:    blt a1, a0, .LBB9_2 | 
 | ; RV64IFD-NEXT:  .LBB9_4: # %entry | 
 | ; RV64IFD-NEXT:    lui a0, 1048568 | 
 | ; RV64IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi double %x to i32 | 
 |   %0 = icmp slt i32 %conv, 32767 | 
 |   %spec.store.select = select i1 %0, i32 %conv, i32 32767 | 
 |   %1 = icmp sgt i32 %spec.store.select, -32768 | 
 |   %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 -32768 | 
 |   %conv6 = trunc i32 %spec.store.select7 to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | define i16 @utest_f64i16(double %x) { | 
 | ; RV32IF-LABEL: utest_f64i16: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    call __fixunsdfsi | 
 | ; RV32IF-NEXT:    lui a1, 16 | 
 | ; RV32IF-NEXT:    addi a1, a1, -1 | 
 | ; RV32IF-NEXT:    bltu a0, a1, .LBB10_2 | 
 | ; RV32IF-NEXT:  # %bb.1: # %entry | 
 | ; RV32IF-NEXT:    mv a0, a1 | 
 | ; RV32IF-NEXT:  .LBB10_2: # %entry | 
 | ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64IF-LABEL: utest_f64i16: | 
 | ; RV64IF:       # %bb.0: # %entry | 
 | ; RV64IF-NEXT:    addi sp, sp, -16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64IF-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64IF-NEXT:    call __fixunsdfsi | 
 | ; RV64IF-NEXT:    lui a1, 16 | 
 | ; RV64IF-NEXT:    addiw a1, a1, -1 | 
 | ; RV64IF-NEXT:    bltu a0, a1, .LBB10_2 | 
 | ; RV64IF-NEXT:  # %bb.1: # %entry | 
 | ; RV64IF-NEXT:    mv a0, a1 | 
 | ; RV64IF-NEXT:  .LBB10_2: # %entry | 
 | ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64IF-NEXT:    .cfi_restore ra | 
 | ; RV64IF-NEXT:    addi sp, sp, 16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64IF-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: utest_f64i16: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz | 
 | ; RV32IFD-NEXT:    lui a1, 16 | 
 | ; RV32IFD-NEXT:    addi a1, a1, -1 | 
 | ; RV32IFD-NEXT:    bltu a0, a1, .LBB10_2 | 
 | ; RV32IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV32IFD-NEXT:    mv a0, a1 | 
 | ; RV32IFD-NEXT:  .LBB10_2: # %entry | 
 | ; RV32IFD-NEXT:    ret | 
 | ; | 
 | ; RV64IFD-LABEL: utest_f64i16: | 
 | ; RV64IFD:       # %bb.0: # %entry | 
 | ; RV64IFD-NEXT:    fcvt.wu.d a0, fa0, rtz | 
 | ; RV64IFD-NEXT:    lui a1, 16 | 
 | ; RV64IFD-NEXT:    addiw a1, a1, -1 | 
 | ; RV64IFD-NEXT:    bltu a0, a1, .LBB10_2 | 
 | ; RV64IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV64IFD-NEXT:    mv a0, a1 | 
 | ; RV64IFD-NEXT:  .LBB10_2: # %entry | 
 | ; RV64IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui double %x to i32 | 
 |   %0 = icmp ult i32 %conv, 65535 | 
 |   %spec.store.select = select i1 %0, i32 %conv, i32 65535 | 
 |   %conv6 = trunc i32 %spec.store.select to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | define i16 @ustest_f64i16(double %x) { | 
 | ; RV32IF-LABEL: ustest_f64i16: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    call __fixdfsi | 
 | ; RV32IF-NEXT:    lui a1, 16 | 
 | ; RV32IF-NEXT:    addi a1, a1, -1 | 
 | ; RV32IF-NEXT:    blt a0, a1, .LBB11_2 | 
 | ; RV32IF-NEXT:  # %bb.1: # %entry | 
 | ; RV32IF-NEXT:    mv a0, a1 | 
 | ; RV32IF-NEXT:  .LBB11_2: # %entry | 
 | ; RV32IF-NEXT:    sgtz a1, a0 | 
 | ; RV32IF-NEXT:    neg a1, a1 | 
 | ; RV32IF-NEXT:    and a0, a1, a0 | 
 | ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64IF-LABEL: ustest_f64i16: | 
 | ; RV64IF:       # %bb.0: # %entry | 
 | ; RV64IF-NEXT:    addi sp, sp, -16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64IF-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64IF-NEXT:    call __fixdfsi | 
 | ; RV64IF-NEXT:    lui a1, 16 | 
 | ; RV64IF-NEXT:    addiw a1, a1, -1 | 
 | ; RV64IF-NEXT:    blt a0, a1, .LBB11_2 | 
 | ; RV64IF-NEXT:  # %bb.1: # %entry | 
 | ; RV64IF-NEXT:    mv a0, a1 | 
 | ; RV64IF-NEXT:  .LBB11_2: # %entry | 
 | ; RV64IF-NEXT:    sgtz a1, a0 | 
 | ; RV64IF-NEXT:    neg a1, a1 | 
 | ; RV64IF-NEXT:    and a0, a1, a0 | 
 | ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64IF-NEXT:    .cfi_restore ra | 
 | ; RV64IF-NEXT:    addi sp, sp, 16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64IF-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: ustest_f64i16: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rtz | 
 | ; RV32IFD-NEXT:    lui a1, 16 | 
 | ; RV32IFD-NEXT:    addi a1, a1, -1 | 
 | ; RV32IFD-NEXT:    blt a0, a1, .LBB11_2 | 
 | ; RV32IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV32IFD-NEXT:    mv a0, a1 | 
 | ; RV32IFD-NEXT:  .LBB11_2: # %entry | 
 | ; RV32IFD-NEXT:    sgtz a1, a0 | 
 | ; RV32IFD-NEXT:    neg a1, a1 | 
 | ; RV32IFD-NEXT:    and a0, a1, a0 | 
 | ; RV32IFD-NEXT:    ret | 
 | ; | 
 | ; RV64IFD-LABEL: ustest_f64i16: | 
 | ; RV64IFD:       # %bb.0: # %entry | 
 | ; RV64IFD-NEXT:    fcvt.w.d a0, fa0, rtz | 
 | ; RV64IFD-NEXT:    lui a1, 16 | 
 | ; RV64IFD-NEXT:    addiw a1, a1, -1 | 
 | ; RV64IFD-NEXT:    blt a0, a1, .LBB11_2 | 
 | ; RV64IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV64IFD-NEXT:    mv a0, a1 | 
 | ; RV64IFD-NEXT:  .LBB11_2: # %entry | 
 | ; RV64IFD-NEXT:    sgtz a1, a0 | 
 | ; RV64IFD-NEXT:    neg a1, a1 | 
 | ; RV64IFD-NEXT:    and a0, a1, a0 | 
 | ; RV64IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi double %x to i32 | 
 |   %0 = icmp slt i32 %conv, 65535 | 
 |   %spec.store.select = select i1 %0, i32 %conv, i32 65535 | 
 |   %1 = icmp sgt i32 %spec.store.select, 0 | 
 |   %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 0 | 
 |   %conv6 = trunc i32 %spec.store.select7 to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | define i16 @stest_f32i16(float %x) { | 
 | ; RV32-LABEL: stest_f32i16: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    fcvt.w.s a0, fa0, rtz | 
 | ; RV32-NEXT:    lui a1, 8 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    bge a0, a1, .LBB12_3 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    lui a1, 1048568 | 
 | ; RV32-NEXT:    bge a1, a0, .LBB12_4 | 
 | ; RV32-NEXT:  .LBB12_2: # %entry | 
 | ; RV32-NEXT:    ret | 
 | ; RV32-NEXT:  .LBB12_3: # %entry | 
 | ; RV32-NEXT:    mv a0, a1 | 
 | ; RV32-NEXT:    lui a1, 1048568 | 
 | ; RV32-NEXT:    blt a1, a0, .LBB12_2 | 
 | ; RV32-NEXT:  .LBB12_4: # %entry | 
 | ; RV32-NEXT:    lui a0, 1048568 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: stest_f32i16: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    fcvt.w.s a0, fa0, rtz | 
 | ; RV64-NEXT:    lui a1, 8 | 
 | ; RV64-NEXT:    addiw a1, a1, -1 | 
 | ; RV64-NEXT:    bge a0, a1, .LBB12_3 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    lui a1, 1048568 | 
 | ; RV64-NEXT:    bge a1, a0, .LBB12_4 | 
 | ; RV64-NEXT:  .LBB12_2: # %entry | 
 | ; RV64-NEXT:    ret | 
 | ; RV64-NEXT:  .LBB12_3: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:    lui a1, 1048568 | 
 | ; RV64-NEXT:    blt a1, a0, .LBB12_2 | 
 | ; RV64-NEXT:  .LBB12_4: # %entry | 
 | ; RV64-NEXT:    lui a0, 1048568 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi float %x to i32 | 
 |   %0 = icmp slt i32 %conv, 32767 | 
 |   %spec.store.select = select i1 %0, i32 %conv, i32 32767 | 
 |   %1 = icmp sgt i32 %spec.store.select, -32768 | 
 |   %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 -32768 | 
 |   %conv6 = trunc i32 %spec.store.select7 to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | define i16 @utest_f32i16(float %x) { | 
 | ; RV32-LABEL: utest_f32i16: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    fcvt.wu.s a0, fa0, rtz | 
 | ; RV32-NEXT:    lui a1, 16 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    bltu a0, a1, .LBB13_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    mv a0, a1 | 
 | ; RV32-NEXT:  .LBB13_2: # %entry | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: utest_f32i16: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    fcvt.wu.s a0, fa0, rtz | 
 | ; RV64-NEXT:    lui a1, 16 | 
 | ; RV64-NEXT:    addiw a1, a1, -1 | 
 | ; RV64-NEXT:    bltu a0, a1, .LBB13_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB13_2: # %entry | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui float %x to i32 | 
 |   %0 = icmp ult i32 %conv, 65535 | 
 |   %spec.store.select = select i1 %0, i32 %conv, i32 65535 | 
 |   %conv6 = trunc i32 %spec.store.select to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | define i16 @ustest_f32i16(float %x) { | 
 | ; RV32-LABEL: ustest_f32i16: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    fcvt.w.s a0, fa0, rtz | 
 | ; RV32-NEXT:    lui a1, 16 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    blt a0, a1, .LBB14_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    mv a0, a1 | 
 | ; RV32-NEXT:  .LBB14_2: # %entry | 
 | ; RV32-NEXT:    sgtz a1, a0 | 
 | ; RV32-NEXT:    neg a1, a1 | 
 | ; RV32-NEXT:    and a0, a1, a0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: ustest_f32i16: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    fcvt.w.s a0, fa0, rtz | 
 | ; RV64-NEXT:    lui a1, 16 | 
 | ; RV64-NEXT:    addiw a1, a1, -1 | 
 | ; RV64-NEXT:    blt a0, a1, .LBB14_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB14_2: # %entry | 
 | ; RV64-NEXT:    sgtz a1, a0 | 
 | ; RV64-NEXT:    neg a1, a1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi float %x to i32 | 
 |   %0 = icmp slt i32 %conv, 65535 | 
 |   %spec.store.select = select i1 %0, i32 %conv, i32 65535 | 
 |   %1 = icmp sgt i32 %spec.store.select, 0 | 
 |   %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 0 | 
 |   %conv6 = trunc i32 %spec.store.select7 to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | define i16 @stest_f16i16(half %x) { | 
 | ; RV32-LABEL: stest_f16i16: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    fcvt.w.s a0, fa0, rtz | 
 | ; RV32-NEXT:    lui a1, 8 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    blt a0, a1, .LBB15_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    mv a0, a1 | 
 | ; RV32-NEXT:  .LBB15_2: # %entry | 
 | ; RV32-NEXT:    lui a1, 1048568 | 
 | ; RV32-NEXT:    blt a1, a0, .LBB15_4 | 
 | ; RV32-NEXT:  # %bb.3: # %entry | 
 | ; RV32-NEXT:    lui a0, 1048568 | 
 | ; RV32-NEXT:  .LBB15_4: # %entry | 
 | ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: stest_f16i16: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz | 
 | ; RV64-NEXT:    lui a1, 8 | 
 | ; RV64-NEXT:    addiw a1, a1, -1 | 
 | ; RV64-NEXT:    blt a0, a1, .LBB15_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB15_2: # %entry | 
 | ; RV64-NEXT:    lui a1, 1048568 | 
 | ; RV64-NEXT:    blt a1, a0, .LBB15_4 | 
 | ; RV64-NEXT:  # %bb.3: # %entry | 
 | ; RV64-NEXT:    lui a0, 1048568 | 
 | ; RV64-NEXT:  .LBB15_4: # %entry | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi half %x to i32 | 
 |   %0 = icmp slt i32 %conv, 32767 | 
 |   %spec.store.select = select i1 %0, i32 %conv, i32 32767 | 
 |   %1 = icmp sgt i32 %spec.store.select, -32768 | 
 |   %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 -32768 | 
 |   %conv6 = trunc i32 %spec.store.select7 to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | define i16 @utesth_f16i16(half %x) { | 
 | ; RV32-LABEL: utesth_f16i16: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    fcvt.wu.s a0, fa0, rtz | 
 | ; RV32-NEXT:    lui a1, 16 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    bltu a0, a1, .LBB16_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    mv a0, a1 | 
 | ; RV32-NEXT:  .LBB16_2: # %entry | 
 | ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: utesth_f16i16: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    fcvt.lu.s a0, fa0, rtz | 
 | ; RV64-NEXT:    lui a1, 16 | 
 | ; RV64-NEXT:    addiw a1, a1, -1 | 
 | ; RV64-NEXT:    bltu a0, a1, .LBB16_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB16_2: # %entry | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui half %x to i32 | 
 |   %0 = icmp ult i32 %conv, 65535 | 
 |   %spec.store.select = select i1 %0, i32 %conv, i32 65535 | 
 |   %conv6 = trunc i32 %spec.store.select to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | define i16 @ustest_f16i16(half %x) { | 
 | ; RV32-LABEL: ustest_f16i16: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    fcvt.w.s a0, fa0, rtz | 
 | ; RV32-NEXT:    lui a1, 16 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    blt a0, a1, .LBB17_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    mv a0, a1 | 
 | ; RV32-NEXT:  .LBB17_2: # %entry | 
 | ; RV32-NEXT:    sgtz a1, a0 | 
 | ; RV32-NEXT:    neg a1, a1 | 
 | ; RV32-NEXT:    and a0, a1, a0 | 
 | ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: ustest_f16i16: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz | 
 | ; RV64-NEXT:    lui a1, 16 | 
 | ; RV64-NEXT:    addiw a1, a1, -1 | 
 | ; RV64-NEXT:    blt a0, a1, .LBB17_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB17_2: # %entry | 
 | ; RV64-NEXT:    sgtz a1, a0 | 
 | ; RV64-NEXT:    neg a1, a1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi half %x to i32 | 
 |   %0 = icmp slt i32 %conv, 65535 | 
 |   %spec.store.select = select i1 %0, i32 %conv, i32 65535 | 
 |   %1 = icmp sgt i32 %spec.store.select, 0 | 
 |   %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 0 | 
 |   %conv6 = trunc i32 %spec.store.select7 to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | ; i64 saturate | 
 |  | 
 | define i64 @stest_f64i64(double %x) { | 
 | ; RV32IF-LABEL: stest_f64i64: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -32 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32IF-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    mv a2, a1 | 
 | ; RV32IF-NEXT:    mv a1, a0 | 
 | ; RV32IF-NEXT:    addi a0, sp, 8 | 
 | ; RV32IF-NEXT:    call __fixdfti | 
 | ; RV32IF-NEXT:    lw a3, 8(sp) | 
 | ; RV32IF-NEXT:    lw a1, 12(sp) | 
 | ; RV32IF-NEXT:    lw a2, 16(sp) | 
 | ; RV32IF-NEXT:    lw a4, 20(sp) | 
 | ; RV32IF-NEXT:    lui a0, 524288 | 
 | ; RV32IF-NEXT:    addi a5, a0, -1 | 
 | ; RV32IF-NEXT:    beq a1, a5, .LBB18_2 | 
 | ; RV32IF-NEXT:  # %bb.1: # %entry | 
 | ; RV32IF-NEXT:    sltu a6, a1, a5 | 
 | ; RV32IF-NEXT:    or a7, a2, a4 | 
 | ; RV32IF-NEXT:    bnez a7, .LBB18_3 | 
 | ; RV32IF-NEXT:    j .LBB18_4 | 
 | ; RV32IF-NEXT:  .LBB18_2: | 
 | ; RV32IF-NEXT:    sltiu a6, a3, -1 | 
 | ; RV32IF-NEXT:    or a7, a2, a4 | 
 | ; RV32IF-NEXT:    beqz a7, .LBB18_4 | 
 | ; RV32IF-NEXT:  .LBB18_3: # %entry | 
 | ; RV32IF-NEXT:    slti a6, a4, 0 | 
 | ; RV32IF-NEXT:  .LBB18_4: # %entry | 
 | ; RV32IF-NEXT:    addi a7, a6, -1 | 
 | ; RV32IF-NEXT:    neg t0, a6 | 
 | ; RV32IF-NEXT:    bnez a6, .LBB18_6 | 
 | ; RV32IF-NEXT:  # %bb.5: # %entry | 
 | ; RV32IF-NEXT:    mv a1, a5 | 
 | ; RV32IF-NEXT:  .LBB18_6: # %entry | 
 | ; RV32IF-NEXT:    or a3, a7, a3 | 
 | ; RV32IF-NEXT:    and a4, t0, a4 | 
 | ; RV32IF-NEXT:    and a2, t0, a2 | 
 | ; RV32IF-NEXT:    beq a1, a0, .LBB18_8 | 
 | ; RV32IF-NEXT:  # %bb.7: # %entry | 
 | ; RV32IF-NEXT:    sltu a0, a0, a1 | 
 | ; RV32IF-NEXT:    j .LBB18_9 | 
 | ; RV32IF-NEXT:  .LBB18_8: | 
 | ; RV32IF-NEXT:    snez a0, a3 | 
 | ; RV32IF-NEXT:  .LBB18_9: # %entry | 
 | ; RV32IF-NEXT:    and a2, a2, a4 | 
 | ; RV32IF-NEXT:    li a5, -1 | 
 | ; RV32IF-NEXT:    beq a2, a5, .LBB18_11 | 
 | ; RV32IF-NEXT:  # %bb.10: # %entry | 
 | ; RV32IF-NEXT:    slti a0, a4, 0 | 
 | ; RV32IF-NEXT:    xori a0, a0, 1 | 
 | ; RV32IF-NEXT:  .LBB18_11: # %entry | 
 | ; RV32IF-NEXT:    bnez a0, .LBB18_13 | 
 | ; RV32IF-NEXT:  # %bb.12: # %entry | 
 | ; RV32IF-NEXT:    lui a1, 524288 | 
 | ; RV32IF-NEXT:  .LBB18_13: # %entry | 
 | ; RV32IF-NEXT:    neg a0, a0 | 
 | ; RV32IF-NEXT:    and a0, a0, a3 | 
 | ; RV32IF-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 32 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64IF-LABEL: stest_f64i64: | 
 | ; RV64IF:       # %bb.0: # %entry | 
 | ; RV64IF-NEXT:    addi sp, sp, -16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64IF-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64IF-NEXT:    call __fixdfti | 
 | ; RV64IF-NEXT:    li a2, -1 | 
 | ; RV64IF-NEXT:    srli a3, a2, 1 | 
 | ; RV64IF-NEXT:    beqz a1, .LBB18_2 | 
 | ; RV64IF-NEXT:  # %bb.1: # %entry | 
 | ; RV64IF-NEXT:    slti a4, a1, 0 | 
 | ; RV64IF-NEXT:    j .LBB18_3 | 
 | ; RV64IF-NEXT:  .LBB18_2: | 
 | ; RV64IF-NEXT:    sltu a4, a0, a3 | 
 | ; RV64IF-NEXT:  .LBB18_3: # %entry | 
 | ; RV64IF-NEXT:    neg a5, a4 | 
 | ; RV64IF-NEXT:    and a5, a5, a1 | 
 | ; RV64IF-NEXT:    bnez a4, .LBB18_5 | 
 | ; RV64IF-NEXT:  # %bb.4: # %entry | 
 | ; RV64IF-NEXT:    mv a0, a3 | 
 | ; RV64IF-NEXT:  .LBB18_5: # %entry | 
 | ; RV64IF-NEXT:    slli a1, a2, 63 | 
 | ; RV64IF-NEXT:    beq a5, a2, .LBB18_7 | 
 | ; RV64IF-NEXT:  # %bb.6: # %entry | 
 | ; RV64IF-NEXT:    slti a2, a5, 0 | 
 | ; RV64IF-NEXT:    xori a2, a2, 1 | 
 | ; RV64IF-NEXT:    beqz a2, .LBB18_8 | 
 | ; RV64IF-NEXT:    j .LBB18_9 | 
 | ; RV64IF-NEXT:  .LBB18_7: | 
 | ; RV64IF-NEXT:    sltu a2, a1, a0 | 
 | ; RV64IF-NEXT:    bnez a2, .LBB18_9 | 
 | ; RV64IF-NEXT:  .LBB18_8: # %entry | 
 | ; RV64IF-NEXT:    mv a0, a1 | 
 | ; RV64IF-NEXT:  .LBB18_9: # %entry | 
 | ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64IF-NEXT:    .cfi_restore ra | 
 | ; RV64IF-NEXT:    addi sp, sp, 16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64IF-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: stest_f64i64: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    addi sp, sp, -32 | 
 | ; RV32IFD-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32IFD-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IFD-NEXT:    addi a0, sp, 8 | 
 | ; RV32IFD-NEXT:    call __fixdfti | 
 | ; RV32IFD-NEXT:    lw a3, 8(sp) | 
 | ; RV32IFD-NEXT:    lw a1, 12(sp) | 
 | ; RV32IFD-NEXT:    lw a2, 16(sp) | 
 | ; RV32IFD-NEXT:    lw a4, 20(sp) | 
 | ; RV32IFD-NEXT:    lui a0, 524288 | 
 | ; RV32IFD-NEXT:    addi a5, a0, -1 | 
 | ; RV32IFD-NEXT:    beq a1, a5, .LBB18_2 | 
 | ; RV32IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV32IFD-NEXT:    sltu a6, a1, a5 | 
 | ; RV32IFD-NEXT:    or a7, a2, a4 | 
 | ; RV32IFD-NEXT:    bnez a7, .LBB18_3 | 
 | ; RV32IFD-NEXT:    j .LBB18_4 | 
 | ; RV32IFD-NEXT:  .LBB18_2: | 
 | ; RV32IFD-NEXT:    sltiu a6, a3, -1 | 
 | ; RV32IFD-NEXT:    or a7, a2, a4 | 
 | ; RV32IFD-NEXT:    beqz a7, .LBB18_4 | 
 | ; RV32IFD-NEXT:  .LBB18_3: # %entry | 
 | ; RV32IFD-NEXT:    slti a6, a4, 0 | 
 | ; RV32IFD-NEXT:  .LBB18_4: # %entry | 
 | ; RV32IFD-NEXT:    addi a7, a6, -1 | 
 | ; RV32IFD-NEXT:    neg t0, a6 | 
 | ; RV32IFD-NEXT:    bnez a6, .LBB18_6 | 
 | ; RV32IFD-NEXT:  # %bb.5: # %entry | 
 | ; RV32IFD-NEXT:    mv a1, a5 | 
 | ; RV32IFD-NEXT:  .LBB18_6: # %entry | 
 | ; RV32IFD-NEXT:    or a3, a7, a3 | 
 | ; RV32IFD-NEXT:    and a4, t0, a4 | 
 | ; RV32IFD-NEXT:    and a2, t0, a2 | 
 | ; RV32IFD-NEXT:    beq a1, a0, .LBB18_8 | 
 | ; RV32IFD-NEXT:  # %bb.7: # %entry | 
 | ; RV32IFD-NEXT:    sltu a0, a0, a1 | 
 | ; RV32IFD-NEXT:    j .LBB18_9 | 
 | ; RV32IFD-NEXT:  .LBB18_8: | 
 | ; RV32IFD-NEXT:    snez a0, a3 | 
 | ; RV32IFD-NEXT:  .LBB18_9: # %entry | 
 | ; RV32IFD-NEXT:    and a2, a2, a4 | 
 | ; RV32IFD-NEXT:    li a5, -1 | 
 | ; RV32IFD-NEXT:    beq a2, a5, .LBB18_11 | 
 | ; RV32IFD-NEXT:  # %bb.10: # %entry | 
 | ; RV32IFD-NEXT:    slti a0, a4, 0 | 
 | ; RV32IFD-NEXT:    xori a0, a0, 1 | 
 | ; RV32IFD-NEXT:  .LBB18_11: # %entry | 
 | ; RV32IFD-NEXT:    bnez a0, .LBB18_13 | 
 | ; RV32IFD-NEXT:  # %bb.12: # %entry | 
 | ; RV32IFD-NEXT:    lui a1, 524288 | 
 | ; RV32IFD-NEXT:  .LBB18_13: # %entry | 
 | ; RV32IFD-NEXT:    neg a0, a0 | 
 | ; RV32IFD-NEXT:    and a0, a0, a3 | 
 | ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32IFD-NEXT:    .cfi_restore ra | 
 | ; RV32IFD-NEXT:    addi sp, sp, 32 | 
 | ; RV32IFD-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IFD-NEXT:    ret | 
 | ; | 
 | ; RV64IFD-LABEL: stest_f64i64: | 
 | ; RV64IFD:       # %bb.0: # %entry | 
 | ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz | 
 | ; RV64IFD-NEXT:    feq.d a1, fa0, fa0 | 
 | ; RV64IFD-NEXT:    seqz a1, a1 | 
 | ; RV64IFD-NEXT:    addi a1, a1, -1 | 
 | ; RV64IFD-NEXT:    and a0, a1, a0 | 
 | ; RV64IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi double %x to i128 | 
 |   %0 = icmp slt i128 %conv, 9223372036854775807 | 
 |   %spec.store.select = select i1 %0, i128 %conv, i128 9223372036854775807 | 
 |   %1 = icmp sgt i128 %spec.store.select, -9223372036854775808 | 
 |   %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 -9223372036854775808 | 
 |   %conv6 = trunc i128 %spec.store.select7 to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 | define i64 @utest_f64i64(double %x) { | 
 | ; RV32IF-LABEL: utest_f64i64: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -32 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32IF-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    mv a2, a1 | 
 | ; RV32IF-NEXT:    mv a1, a0 | 
 | ; RV32IF-NEXT:    addi a0, sp, 8 | 
 | ; RV32IF-NEXT:    call __fixunsdfti | 
 | ; RV32IF-NEXT:    lw a0, 16(sp) | 
 | ; RV32IF-NEXT:    lw a1, 20(sp) | 
 | ; RV32IF-NEXT:    lw a2, 12(sp) | 
 | ; RV32IF-NEXT:    lw a3, 8(sp) | 
 | ; RV32IF-NEXT:    or a4, a1, a0 | 
 | ; RV32IF-NEXT:    xori a0, a0, 1 | 
 | ; RV32IF-NEXT:    seqz a4, a4 | 
 | ; RV32IF-NEXT:    or a0, a0, a1 | 
 | ; RV32IF-NEXT:    seqz a0, a0 | 
 | ; RV32IF-NEXT:    addi a0, a0, -1 | 
 | ; RV32IF-NEXT:    and a0, a0, a4 | 
 | ; RV32IF-NEXT:    neg a1, a0 | 
 | ; RV32IF-NEXT:    and a0, a1, a3 | 
 | ; RV32IF-NEXT:    and a1, a1, a2 | 
 | ; RV32IF-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 32 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: utest_f64i64: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __fixunsdfti | 
 | ; RV64-NEXT:    snez a1, a1 | 
 | ; RV64-NEXT:    addi a1, a1, -1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: utest_f64i64: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    addi sp, sp, -32 | 
 | ; RV32IFD-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32IFD-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IFD-NEXT:    addi a0, sp, 8 | 
 | ; RV32IFD-NEXT:    call __fixunsdfti | 
 | ; RV32IFD-NEXT:    lw a0, 16(sp) | 
 | ; RV32IFD-NEXT:    lw a1, 20(sp) | 
 | ; RV32IFD-NEXT:    lw a2, 12(sp) | 
 | ; RV32IFD-NEXT:    lw a3, 8(sp) | 
 | ; RV32IFD-NEXT:    or a4, a1, a0 | 
 | ; RV32IFD-NEXT:    xori a0, a0, 1 | 
 | ; RV32IFD-NEXT:    seqz a4, a4 | 
 | ; RV32IFD-NEXT:    or a0, a0, a1 | 
 | ; RV32IFD-NEXT:    seqz a0, a0 | 
 | ; RV32IFD-NEXT:    addi a0, a0, -1 | 
 | ; RV32IFD-NEXT:    and a0, a0, a4 | 
 | ; RV32IFD-NEXT:    neg a1, a0 | 
 | ; RV32IFD-NEXT:    and a0, a1, a3 | 
 | ; RV32IFD-NEXT:    and a1, a1, a2 | 
 | ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32IFD-NEXT:    .cfi_restore ra | 
 | ; RV32IFD-NEXT:    addi sp, sp, 32 | 
 | ; RV32IFD-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui double %x to i128 | 
 |   %0 = icmp ult i128 %conv, 18446744073709551616 | 
 |   %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 | 
 |   %conv6 = trunc i128 %spec.store.select to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 | define i64 @ustest_f64i64(double %x) { | 
 | ; RV32IF-LABEL: ustest_f64i64: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -32 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32IF-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    mv a2, a1 | 
 | ; RV32IF-NEXT:    mv a1, a0 | 
 | ; RV32IF-NEXT:    addi a0, sp, 8 | 
 | ; RV32IF-NEXT:    call __fixdfti | 
 | ; RV32IF-NEXT:    lw a1, 20(sp) | 
 | ; RV32IF-NEXT:    lw a0, 16(sp) | 
 | ; RV32IF-NEXT:    beqz a1, .LBB20_2 | 
 | ; RV32IF-NEXT:  # %bb.1: # %entry | 
 | ; RV32IF-NEXT:    slti a2, a1, 0 | 
 | ; RV32IF-NEXT:    j .LBB20_3 | 
 | ; RV32IF-NEXT:  .LBB20_2: | 
 | ; RV32IF-NEXT:    seqz a2, a0 | 
 | ; RV32IF-NEXT:  .LBB20_3: # %entry | 
 | ; RV32IF-NEXT:    xori a3, a0, 1 | 
 | ; RV32IF-NEXT:    or a3, a3, a1 | 
 | ; RV32IF-NEXT:    seqz a3, a3 | 
 | ; RV32IF-NEXT:    addi a3, a3, -1 | 
 | ; RV32IF-NEXT:    and a3, a3, a2 | 
 | ; RV32IF-NEXT:    neg a2, a3 | 
 | ; RV32IF-NEXT:    bnez a3, .LBB20_5 | 
 | ; RV32IF-NEXT:  # %bb.4: # %entry | 
 | ; RV32IF-NEXT:    li a0, 1 | 
 | ; RV32IF-NEXT:  .LBB20_5: # %entry | 
 | ; RV32IF-NEXT:    lw a3, 8(sp) | 
 | ; RV32IF-NEXT:    lw a4, 12(sp) | 
 | ; RV32IF-NEXT:    and a5, a2, a1 | 
 | ; RV32IF-NEXT:    beqz a5, .LBB20_7 | 
 | ; RV32IF-NEXT:  # %bb.6: # %entry | 
 | ; RV32IF-NEXT:    sgtz a1, a5 | 
 | ; RV32IF-NEXT:    j .LBB20_8 | 
 | ; RV32IF-NEXT:  .LBB20_7: | 
 | ; RV32IF-NEXT:    snez a1, a0 | 
 | ; RV32IF-NEXT:  .LBB20_8: # %entry | 
 | ; RV32IF-NEXT:    and a4, a2, a4 | 
 | ; RV32IF-NEXT:    or a0, a0, a5 | 
 | ; RV32IF-NEXT:    and a2, a2, a3 | 
 | ; RV32IF-NEXT:    bnez a0, .LBB20_10 | 
 | ; RV32IF-NEXT:  # %bb.9: | 
 | ; RV32IF-NEXT:    or a0, a2, a4 | 
 | ; RV32IF-NEXT:    snez a1, a0 | 
 | ; RV32IF-NEXT:  .LBB20_10: # %entry | 
 | ; RV32IF-NEXT:    neg a1, a1 | 
 | ; RV32IF-NEXT:    and a0, a1, a2 | 
 | ; RV32IF-NEXT:    and a1, a1, a4 | 
 | ; RV32IF-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 32 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: ustest_f64i64: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __fixdfti | 
 | ; RV64-NEXT:    slti a2, a1, 1 | 
 | ; RV64-NEXT:    blez a1, .LBB20_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    li a1, 1 | 
 | ; RV64-NEXT:  .LBB20_2: # %entry | 
 | ; RV64-NEXT:    neg a2, a2 | 
 | ; RV64-NEXT:    and a0, a2, a0 | 
 | ; RV64-NEXT:    beqz a1, .LBB20_4 | 
 | ; RV64-NEXT:  # %bb.3: # %entry | 
 | ; RV64-NEXT:    sgtz a1, a1 | 
 | ; RV64-NEXT:    j .LBB20_5 | 
 | ; RV64-NEXT:  .LBB20_4: | 
 | ; RV64-NEXT:    snez a1, a0 | 
 | ; RV64-NEXT:  .LBB20_5: # %entry | 
 | ; RV64-NEXT:    neg a1, a1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: ustest_f64i64: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    addi sp, sp, -32 | 
 | ; RV32IFD-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32IFD-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IFD-NEXT:    addi a0, sp, 8 | 
 | ; RV32IFD-NEXT:    call __fixdfti | 
 | ; RV32IFD-NEXT:    lw a1, 20(sp) | 
 | ; RV32IFD-NEXT:    lw a0, 16(sp) | 
 | ; RV32IFD-NEXT:    beqz a1, .LBB20_2 | 
 | ; RV32IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV32IFD-NEXT:    slti a2, a1, 0 | 
 | ; RV32IFD-NEXT:    j .LBB20_3 | 
 | ; RV32IFD-NEXT:  .LBB20_2: | 
 | ; RV32IFD-NEXT:    seqz a2, a0 | 
 | ; RV32IFD-NEXT:  .LBB20_3: # %entry | 
 | ; RV32IFD-NEXT:    xori a3, a0, 1 | 
 | ; RV32IFD-NEXT:    or a3, a3, a1 | 
 | ; RV32IFD-NEXT:    seqz a3, a3 | 
 | ; RV32IFD-NEXT:    addi a3, a3, -1 | 
 | ; RV32IFD-NEXT:    and a3, a3, a2 | 
 | ; RV32IFD-NEXT:    neg a2, a3 | 
 | ; RV32IFD-NEXT:    bnez a3, .LBB20_5 | 
 | ; RV32IFD-NEXT:  # %bb.4: # %entry | 
 | ; RV32IFD-NEXT:    li a0, 1 | 
 | ; RV32IFD-NEXT:  .LBB20_5: # %entry | 
 | ; RV32IFD-NEXT:    lw a3, 8(sp) | 
 | ; RV32IFD-NEXT:    lw a4, 12(sp) | 
 | ; RV32IFD-NEXT:    and a5, a2, a1 | 
 | ; RV32IFD-NEXT:    beqz a5, .LBB20_7 | 
 | ; RV32IFD-NEXT:  # %bb.6: # %entry | 
 | ; RV32IFD-NEXT:    sgtz a1, a5 | 
 | ; RV32IFD-NEXT:    j .LBB20_8 | 
 | ; RV32IFD-NEXT:  .LBB20_7: | 
 | ; RV32IFD-NEXT:    snez a1, a0 | 
 | ; RV32IFD-NEXT:  .LBB20_8: # %entry | 
 | ; RV32IFD-NEXT:    and a4, a2, a4 | 
 | ; RV32IFD-NEXT:    or a0, a0, a5 | 
 | ; RV32IFD-NEXT:    and a2, a2, a3 | 
 | ; RV32IFD-NEXT:    bnez a0, .LBB20_10 | 
 | ; RV32IFD-NEXT:  # %bb.9: | 
 | ; RV32IFD-NEXT:    or a0, a2, a4 | 
 | ; RV32IFD-NEXT:    snez a1, a0 | 
 | ; RV32IFD-NEXT:  .LBB20_10: # %entry | 
 | ; RV32IFD-NEXT:    neg a1, a1 | 
 | ; RV32IFD-NEXT:    and a0, a1, a2 | 
 | ; RV32IFD-NEXT:    and a1, a1, a4 | 
 | ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32IFD-NEXT:    .cfi_restore ra | 
 | ; RV32IFD-NEXT:    addi sp, sp, 32 | 
 | ; RV32IFD-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi double %x to i128 | 
 |   %0 = icmp slt i128 %conv, 18446744073709551616 | 
 |   %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 | 
 |   %1 = icmp sgt i128 %spec.store.select, 0 | 
 |   %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 0 | 
 |   %conv6 = trunc i128 %spec.store.select7 to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 | define i64 @stest_f32i64(float %x) { | 
 | ; RV32-LABEL: stest_f32i64: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    addi a0, sp, 8 | 
 | ; RV32-NEXT:    call __fixsfti | 
 | ; RV32-NEXT:    lw a3, 8(sp) | 
 | ; RV32-NEXT:    lw a1, 12(sp) | 
 | ; RV32-NEXT:    lw a2, 16(sp) | 
 | ; RV32-NEXT:    lw a4, 20(sp) | 
 | ; RV32-NEXT:    lui a0, 524288 | 
 | ; RV32-NEXT:    addi a5, a0, -1 | 
 | ; RV32-NEXT:    beq a1, a5, .LBB21_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    sltu a6, a1, a5 | 
 | ; RV32-NEXT:    or a7, a2, a4 | 
 | ; RV32-NEXT:    bnez a7, .LBB21_3 | 
 | ; RV32-NEXT:    j .LBB21_4 | 
 | ; RV32-NEXT:  .LBB21_2: | 
 | ; RV32-NEXT:    sltiu a6, a3, -1 | 
 | ; RV32-NEXT:    or a7, a2, a4 | 
 | ; RV32-NEXT:    beqz a7, .LBB21_4 | 
 | ; RV32-NEXT:  .LBB21_3: # %entry | 
 | ; RV32-NEXT:    slti a6, a4, 0 | 
 | ; RV32-NEXT:  .LBB21_4: # %entry | 
 | ; RV32-NEXT:    addi a7, a6, -1 | 
 | ; RV32-NEXT:    neg t0, a6 | 
 | ; RV32-NEXT:    bnez a6, .LBB21_6 | 
 | ; RV32-NEXT:  # %bb.5: # %entry | 
 | ; RV32-NEXT:    mv a1, a5 | 
 | ; RV32-NEXT:  .LBB21_6: # %entry | 
 | ; RV32-NEXT:    or a3, a7, a3 | 
 | ; RV32-NEXT:    and a4, t0, a4 | 
 | ; RV32-NEXT:    and a2, t0, a2 | 
 | ; RV32-NEXT:    beq a1, a0, .LBB21_8 | 
 | ; RV32-NEXT:  # %bb.7: # %entry | 
 | ; RV32-NEXT:    sltu a0, a0, a1 | 
 | ; RV32-NEXT:    j .LBB21_9 | 
 | ; RV32-NEXT:  .LBB21_8: | 
 | ; RV32-NEXT:    snez a0, a3 | 
 | ; RV32-NEXT:  .LBB21_9: # %entry | 
 | ; RV32-NEXT:    and a2, a2, a4 | 
 | ; RV32-NEXT:    li a5, -1 | 
 | ; RV32-NEXT:    beq a2, a5, .LBB21_11 | 
 | ; RV32-NEXT:  # %bb.10: # %entry | 
 | ; RV32-NEXT:    slti a0, a4, 0 | 
 | ; RV32-NEXT:    xori a0, a0, 1 | 
 | ; RV32-NEXT:  .LBB21_11: # %entry | 
 | ; RV32-NEXT:    bnez a0, .LBB21_13 | 
 | ; RV32-NEXT:  # %bb.12: # %entry | 
 | ; RV32-NEXT:    lui a1, 524288 | 
 | ; RV32-NEXT:  .LBB21_13: # %entry | 
 | ; RV32-NEXT:    neg a0, a0 | 
 | ; RV32-NEXT:    and a0, a0, a3 | 
 | ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: stest_f32i64: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz | 
 | ; RV64-NEXT:    feq.s a1, fa0, fa0 | 
 | ; RV64-NEXT:    seqz a1, a1 | 
 | ; RV64-NEXT:    addi a1, a1, -1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi float %x to i128 | 
 |   %0 = icmp slt i128 %conv, 9223372036854775807 | 
 |   %spec.store.select = select i1 %0, i128 %conv, i128 9223372036854775807 | 
 |   %1 = icmp sgt i128 %spec.store.select, -9223372036854775808 | 
 |   %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 -9223372036854775808 | 
 |   %conv6 = trunc i128 %spec.store.select7 to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 | define i64 @utest_f32i64(float %x) { | 
 | ; RV32-LABEL: utest_f32i64: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    addi a0, sp, 8 | 
 | ; RV32-NEXT:    call __fixunssfti | 
 | ; RV32-NEXT:    lw a0, 16(sp) | 
 | ; RV32-NEXT:    lw a1, 20(sp) | 
 | ; RV32-NEXT:    lw a2, 12(sp) | 
 | ; RV32-NEXT:    lw a3, 8(sp) | 
 | ; RV32-NEXT:    or a4, a1, a0 | 
 | ; RV32-NEXT:    xori a0, a0, 1 | 
 | ; RV32-NEXT:    seqz a4, a4 | 
 | ; RV32-NEXT:    or a0, a0, a1 | 
 | ; RV32-NEXT:    seqz a0, a0 | 
 | ; RV32-NEXT:    addi a0, a0, -1 | 
 | ; RV32-NEXT:    and a0, a0, a4 | 
 | ; RV32-NEXT:    neg a1, a0 | 
 | ; RV32-NEXT:    and a0, a1, a3 | 
 | ; RV32-NEXT:    and a1, a1, a2 | 
 | ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: utest_f32i64: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __fixunssfti | 
 | ; RV64-NEXT:    snez a1, a1 | 
 | ; RV64-NEXT:    addi a1, a1, -1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui float %x to i128 | 
 |   %0 = icmp ult i128 %conv, 18446744073709551616 | 
 |   %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 | 
 |   %conv6 = trunc i128 %spec.store.select to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 | define i64 @ustest_f32i64(float %x) { | 
 | ; RV32-LABEL: ustest_f32i64: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    addi a0, sp, 8 | 
 | ; RV32-NEXT:    call __fixsfti | 
 | ; RV32-NEXT:    lw a1, 20(sp) | 
 | ; RV32-NEXT:    lw a0, 16(sp) | 
 | ; RV32-NEXT:    beqz a1, .LBB23_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    slti a2, a1, 0 | 
 | ; RV32-NEXT:    j .LBB23_3 | 
 | ; RV32-NEXT:  .LBB23_2: | 
 | ; RV32-NEXT:    seqz a2, a0 | 
 | ; RV32-NEXT:  .LBB23_3: # %entry | 
 | ; RV32-NEXT:    xori a3, a0, 1 | 
 | ; RV32-NEXT:    or a3, a3, a1 | 
 | ; RV32-NEXT:    seqz a3, a3 | 
 | ; RV32-NEXT:    addi a3, a3, -1 | 
 | ; RV32-NEXT:    and a3, a3, a2 | 
 | ; RV32-NEXT:    neg a2, a3 | 
 | ; RV32-NEXT:    bnez a3, .LBB23_5 | 
 | ; RV32-NEXT:  # %bb.4: # %entry | 
 | ; RV32-NEXT:    li a0, 1 | 
 | ; RV32-NEXT:  .LBB23_5: # %entry | 
 | ; RV32-NEXT:    lw a3, 8(sp) | 
 | ; RV32-NEXT:    lw a4, 12(sp) | 
 | ; RV32-NEXT:    and a5, a2, a1 | 
 | ; RV32-NEXT:    beqz a5, .LBB23_7 | 
 | ; RV32-NEXT:  # %bb.6: # %entry | 
 | ; RV32-NEXT:    sgtz a1, a5 | 
 | ; RV32-NEXT:    j .LBB23_8 | 
 | ; RV32-NEXT:  .LBB23_7: | 
 | ; RV32-NEXT:    snez a1, a0 | 
 | ; RV32-NEXT:  .LBB23_8: # %entry | 
 | ; RV32-NEXT:    and a4, a2, a4 | 
 | ; RV32-NEXT:    or a0, a0, a5 | 
 | ; RV32-NEXT:    and a2, a2, a3 | 
 | ; RV32-NEXT:    bnez a0, .LBB23_10 | 
 | ; RV32-NEXT:  # %bb.9: | 
 | ; RV32-NEXT:    or a0, a2, a4 | 
 | ; RV32-NEXT:    snez a1, a0 | 
 | ; RV32-NEXT:  .LBB23_10: # %entry | 
 | ; RV32-NEXT:    neg a1, a1 | 
 | ; RV32-NEXT:    and a0, a1, a2 | 
 | ; RV32-NEXT:    and a1, a1, a4 | 
 | ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: ustest_f32i64: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __fixsfti | 
 | ; RV64-NEXT:    slti a2, a1, 1 | 
 | ; RV64-NEXT:    blez a1, .LBB23_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    li a1, 1 | 
 | ; RV64-NEXT:  .LBB23_2: # %entry | 
 | ; RV64-NEXT:    neg a2, a2 | 
 | ; RV64-NEXT:    and a0, a2, a0 | 
 | ; RV64-NEXT:    beqz a1, .LBB23_4 | 
 | ; RV64-NEXT:  # %bb.3: # %entry | 
 | ; RV64-NEXT:    sgtz a1, a1 | 
 | ; RV64-NEXT:    j .LBB23_5 | 
 | ; RV64-NEXT:  .LBB23_4: | 
 | ; RV64-NEXT:    snez a1, a0 | 
 | ; RV64-NEXT:  .LBB23_5: # %entry | 
 | ; RV64-NEXT:    neg a1, a1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi float %x to i128 | 
 |   %0 = icmp slt i128 %conv, 18446744073709551616 | 
 |   %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 | 
 |   %1 = icmp sgt i128 %spec.store.select, 0 | 
 |   %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 0 | 
 |   %conv6 = trunc i128 %spec.store.select7 to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 | define i64 @stest_f16i64(half %x) { | 
 | ; RV32-LABEL: stest_f16i64: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    addi a0, sp, 8 | 
 | ; RV32-NEXT:    call __fixsfti | 
 | ; RV32-NEXT:    lw a3, 8(sp) | 
 | ; RV32-NEXT:    lw a1, 12(sp) | 
 | ; RV32-NEXT:    lw a2, 16(sp) | 
 | ; RV32-NEXT:    lw a4, 20(sp) | 
 | ; RV32-NEXT:    lui a0, 524288 | 
 | ; RV32-NEXT:    addi a5, a0, -1 | 
 | ; RV32-NEXT:    beq a1, a5, .LBB24_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    sltu a6, a1, a5 | 
 | ; RV32-NEXT:    or a7, a2, a4 | 
 | ; RV32-NEXT:    bnez a7, .LBB24_3 | 
 | ; RV32-NEXT:    j .LBB24_4 | 
 | ; RV32-NEXT:  .LBB24_2: | 
 | ; RV32-NEXT:    sltiu a6, a3, -1 | 
 | ; RV32-NEXT:    or a7, a2, a4 | 
 | ; RV32-NEXT:    beqz a7, .LBB24_4 | 
 | ; RV32-NEXT:  .LBB24_3: # %entry | 
 | ; RV32-NEXT:    slti a6, a4, 0 | 
 | ; RV32-NEXT:  .LBB24_4: # %entry | 
 | ; RV32-NEXT:    addi a7, a6, -1 | 
 | ; RV32-NEXT:    neg t0, a6 | 
 | ; RV32-NEXT:    bnez a6, .LBB24_6 | 
 | ; RV32-NEXT:  # %bb.5: # %entry | 
 | ; RV32-NEXT:    mv a1, a5 | 
 | ; RV32-NEXT:  .LBB24_6: # %entry | 
 | ; RV32-NEXT:    or a3, a7, a3 | 
 | ; RV32-NEXT:    and a4, t0, a4 | 
 | ; RV32-NEXT:    and a2, t0, a2 | 
 | ; RV32-NEXT:    beq a1, a0, .LBB24_8 | 
 | ; RV32-NEXT:  # %bb.7: # %entry | 
 | ; RV32-NEXT:    sltu a0, a0, a1 | 
 | ; RV32-NEXT:    j .LBB24_9 | 
 | ; RV32-NEXT:  .LBB24_8: | 
 | ; RV32-NEXT:    snez a0, a3 | 
 | ; RV32-NEXT:  .LBB24_9: # %entry | 
 | ; RV32-NEXT:    and a2, a2, a4 | 
 | ; RV32-NEXT:    li a5, -1 | 
 | ; RV32-NEXT:    beq a2, a5, .LBB24_11 | 
 | ; RV32-NEXT:  # %bb.10: # %entry | 
 | ; RV32-NEXT:    slti a0, a4, 0 | 
 | ; RV32-NEXT:    xori a0, a0, 1 | 
 | ; RV32-NEXT:  .LBB24_11: # %entry | 
 | ; RV32-NEXT:    bnez a0, .LBB24_13 | 
 | ; RV32-NEXT:  # %bb.12: # %entry | 
 | ; RV32-NEXT:    lui a1, 524288 | 
 | ; RV32-NEXT:  .LBB24_13: # %entry | 
 | ; RV32-NEXT:    neg a0, a0 | 
 | ; RV32-NEXT:    and a0, a0, a3 | 
 | ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: stest_f16i64: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    call __fixsfti | 
 | ; RV64-NEXT:    li a2, -1 | 
 | ; RV64-NEXT:    srli a3, a2, 1 | 
 | ; RV64-NEXT:    beqz a1, .LBB24_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    slti a4, a1, 0 | 
 | ; RV64-NEXT:    j .LBB24_3 | 
 | ; RV64-NEXT:  .LBB24_2: | 
 | ; RV64-NEXT:    sltu a4, a0, a3 | 
 | ; RV64-NEXT:  .LBB24_3: # %entry | 
 | ; RV64-NEXT:    neg a5, a4 | 
 | ; RV64-NEXT:    and a5, a5, a1 | 
 | ; RV64-NEXT:    bnez a4, .LBB24_5 | 
 | ; RV64-NEXT:  # %bb.4: # %entry | 
 | ; RV64-NEXT:    mv a0, a3 | 
 | ; RV64-NEXT:  .LBB24_5: # %entry | 
 | ; RV64-NEXT:    slli a1, a2, 63 | 
 | ; RV64-NEXT:    beq a5, a2, .LBB24_7 | 
 | ; RV64-NEXT:  # %bb.6: # %entry | 
 | ; RV64-NEXT:    slti a2, a5, 0 | 
 | ; RV64-NEXT:    xori a2, a2, 1 | 
 | ; RV64-NEXT:    beqz a2, .LBB24_8 | 
 | ; RV64-NEXT:    j .LBB24_9 | 
 | ; RV64-NEXT:  .LBB24_7: | 
 | ; RV64-NEXT:    sltu a2, a1, a0 | 
 | ; RV64-NEXT:    bnez a2, .LBB24_9 | 
 | ; RV64-NEXT:  .LBB24_8: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB24_9: # %entry | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi half %x to i128 | 
 |   %0 = icmp slt i128 %conv, 9223372036854775807 | 
 |   %spec.store.select = select i1 %0, i128 %conv, i128 9223372036854775807 | 
 |   %1 = icmp sgt i128 %spec.store.select, -9223372036854775808 | 
 |   %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 -9223372036854775808 | 
 |   %conv6 = trunc i128 %spec.store.select7 to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 | define i64 @utesth_f16i64(half %x) { | 
 | ; RV32-LABEL: utesth_f16i64: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    addi a0, sp, 8 | 
 | ; RV32-NEXT:    call __fixunssfti | 
 | ; RV32-NEXT:    lw a0, 16(sp) | 
 | ; RV32-NEXT:    lw a1, 20(sp) | 
 | ; RV32-NEXT:    lw a2, 12(sp) | 
 | ; RV32-NEXT:    lw a3, 8(sp) | 
 | ; RV32-NEXT:    or a4, a1, a0 | 
 | ; RV32-NEXT:    xori a0, a0, 1 | 
 | ; RV32-NEXT:    seqz a4, a4 | 
 | ; RV32-NEXT:    or a0, a0, a1 | 
 | ; RV32-NEXT:    seqz a0, a0 | 
 | ; RV32-NEXT:    addi a0, a0, -1 | 
 | ; RV32-NEXT:    and a0, a0, a4 | 
 | ; RV32-NEXT:    neg a1, a0 | 
 | ; RV32-NEXT:    and a0, a1, a3 | 
 | ; RV32-NEXT:    and a1, a1, a2 | 
 | ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: utesth_f16i64: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    call __fixunssfti | 
 | ; RV64-NEXT:    snez a1, a1 | 
 | ; RV64-NEXT:    addi a1, a1, -1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui half %x to i128 | 
 |   %0 = icmp ult i128 %conv, 18446744073709551616 | 
 |   %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 | 
 |   %conv6 = trunc i128 %spec.store.select to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 | define i64 @ustest_f16i64(half %x) { | 
 | ; RV32-LABEL: ustest_f16i64: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    addi a0, sp, 8 | 
 | ; RV32-NEXT:    call __fixsfti | 
 | ; RV32-NEXT:    lw a1, 20(sp) | 
 | ; RV32-NEXT:    lw a0, 16(sp) | 
 | ; RV32-NEXT:    beqz a1, .LBB26_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    slti a2, a1, 0 | 
 | ; RV32-NEXT:    j .LBB26_3 | 
 | ; RV32-NEXT:  .LBB26_2: | 
 | ; RV32-NEXT:    seqz a2, a0 | 
 | ; RV32-NEXT:  .LBB26_3: # %entry | 
 | ; RV32-NEXT:    xori a3, a0, 1 | 
 | ; RV32-NEXT:    or a3, a3, a1 | 
 | ; RV32-NEXT:    seqz a3, a3 | 
 | ; RV32-NEXT:    addi a3, a3, -1 | 
 | ; RV32-NEXT:    and a3, a3, a2 | 
 | ; RV32-NEXT:    neg a2, a3 | 
 | ; RV32-NEXT:    bnez a3, .LBB26_5 | 
 | ; RV32-NEXT:  # %bb.4: # %entry | 
 | ; RV32-NEXT:    li a0, 1 | 
 | ; RV32-NEXT:  .LBB26_5: # %entry | 
 | ; RV32-NEXT:    lw a3, 8(sp) | 
 | ; RV32-NEXT:    lw a4, 12(sp) | 
 | ; RV32-NEXT:    and a5, a2, a1 | 
 | ; RV32-NEXT:    beqz a5, .LBB26_7 | 
 | ; RV32-NEXT:  # %bb.6: # %entry | 
 | ; RV32-NEXT:    sgtz a1, a5 | 
 | ; RV32-NEXT:    j .LBB26_8 | 
 | ; RV32-NEXT:  .LBB26_7: | 
 | ; RV32-NEXT:    snez a1, a0 | 
 | ; RV32-NEXT:  .LBB26_8: # %entry | 
 | ; RV32-NEXT:    and a4, a2, a4 | 
 | ; RV32-NEXT:    or a0, a0, a5 | 
 | ; RV32-NEXT:    and a2, a2, a3 | 
 | ; RV32-NEXT:    bnez a0, .LBB26_10 | 
 | ; RV32-NEXT:  # %bb.9: | 
 | ; RV32-NEXT:    or a0, a2, a4 | 
 | ; RV32-NEXT:    snez a1, a0 | 
 | ; RV32-NEXT:  .LBB26_10: # %entry | 
 | ; RV32-NEXT:    neg a1, a1 | 
 | ; RV32-NEXT:    and a0, a1, a2 | 
 | ; RV32-NEXT:    and a1, a1, a4 | 
 | ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: ustest_f16i64: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    call __fixsfti | 
 | ; RV64-NEXT:    slti a2, a1, 1 | 
 | ; RV64-NEXT:    blez a1, .LBB26_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    li a1, 1 | 
 | ; RV64-NEXT:  .LBB26_2: # %entry | 
 | ; RV64-NEXT:    neg a2, a2 | 
 | ; RV64-NEXT:    and a0, a2, a0 | 
 | ; RV64-NEXT:    beqz a1, .LBB26_4 | 
 | ; RV64-NEXT:  # %bb.3: # %entry | 
 | ; RV64-NEXT:    sgtz a1, a1 | 
 | ; RV64-NEXT:    j .LBB26_5 | 
 | ; RV64-NEXT:  .LBB26_4: | 
 | ; RV64-NEXT:    snez a1, a0 | 
 | ; RV64-NEXT:  .LBB26_5: # %entry | 
 | ; RV64-NEXT:    neg a1, a1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi half %x to i128 | 
 |   %0 = icmp slt i128 %conv, 18446744073709551616 | 
 |   %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 | 
 |   %1 = icmp sgt i128 %spec.store.select, 0 | 
 |   %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 0 | 
 |   %conv6 = trunc i128 %spec.store.select7 to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 |  | 
 |  | 
 |  | 
 | ; i32 saturate | 
 |  | 
 | define i32 @stest_f64i32_mm(double %x) { | 
 | ; RV32IF-LABEL: stest_f64i32_mm: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    call __fixdfdi | 
 | ; RV32IF-NEXT:    lui a2, 524288 | 
 | ; RV32IF-NEXT:    addi a3, a2, -1 | 
 | ; RV32IF-NEXT:    beqz a1, .LBB27_2 | 
 | ; RV32IF-NEXT:  # %bb.1: # %entry | 
 | ; RV32IF-NEXT:    slti a4, a1, 0 | 
 | ; RV32IF-NEXT:    j .LBB27_3 | 
 | ; RV32IF-NEXT:  .LBB27_2: | 
 | ; RV32IF-NEXT:    sltu a4, a0, a3 | 
 | ; RV32IF-NEXT:  .LBB27_3: # %entry | 
 | ; RV32IF-NEXT:    neg a5, a4 | 
 | ; RV32IF-NEXT:    and a1, a5, a1 | 
 | ; RV32IF-NEXT:    bnez a4, .LBB27_5 | 
 | ; RV32IF-NEXT:  # %bb.4: # %entry | 
 | ; RV32IF-NEXT:    mv a0, a3 | 
 | ; RV32IF-NEXT:  .LBB27_5: # %entry | 
 | ; RV32IF-NEXT:    li a3, -1 | 
 | ; RV32IF-NEXT:    beq a1, a3, .LBB27_7 | 
 | ; RV32IF-NEXT:  # %bb.6: # %entry | 
 | ; RV32IF-NEXT:    slti a1, a1, 0 | 
 | ; RV32IF-NEXT:    xori a1, a1, 1 | 
 | ; RV32IF-NEXT:    beqz a1, .LBB27_8 | 
 | ; RV32IF-NEXT:    j .LBB27_9 | 
 | ; RV32IF-NEXT:  .LBB27_7: | 
 | ; RV32IF-NEXT:    sltu a1, a2, a0 | 
 | ; RV32IF-NEXT:    bnez a1, .LBB27_9 | 
 | ; RV32IF-NEXT:  .LBB27_8: # %entry | 
 | ; RV32IF-NEXT:    lui a0, 524288 | 
 | ; RV32IF-NEXT:  .LBB27_9: # %entry | 
 | ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64IF-LABEL: stest_f64i32_mm: | 
 | ; RV64IF:       # %bb.0: # %entry | 
 | ; RV64IF-NEXT:    addi sp, sp, -16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64IF-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64IF-NEXT:    call __fixdfdi | 
 | ; RV64IF-NEXT:    lui a1, 524288 | 
 | ; RV64IF-NEXT:    addiw a2, a1, -1 | 
 | ; RV64IF-NEXT:    blt a0, a2, .LBB27_2 | 
 | ; RV64IF-NEXT:  # %bb.1: # %entry | 
 | ; RV64IF-NEXT:    mv a0, a2 | 
 | ; RV64IF-NEXT:  .LBB27_2: # %entry | 
 | ; RV64IF-NEXT:    blt a1, a0, .LBB27_4 | 
 | ; RV64IF-NEXT:  # %bb.3: # %entry | 
 | ; RV64IF-NEXT:    lui a0, 524288 | 
 | ; RV64IF-NEXT:  .LBB27_4: # %entry | 
 | ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64IF-NEXT:    .cfi_restore ra | 
 | ; RV64IF-NEXT:    addi sp, sp, 16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64IF-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: stest_f64i32_mm: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rtz | 
 | ; RV32IFD-NEXT:    feq.d a1, fa0, fa0 | 
 | ; RV32IFD-NEXT:    seqz a1, a1 | 
 | ; RV32IFD-NEXT:    addi a1, a1, -1 | 
 | ; RV32IFD-NEXT:    and a0, a1, a0 | 
 | ; RV32IFD-NEXT:    ret | 
 | ; | 
 | ; RV64IFD-LABEL: stest_f64i32_mm: | 
 | ; RV64IFD:       # %bb.0: # %entry | 
 | ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz | 
 | ; RV64IFD-NEXT:    lui a1, 524288 | 
 | ; RV64IFD-NEXT:    addiw a2, a1, -1 | 
 | ; RV64IFD-NEXT:    bge a0, a2, .LBB27_3 | 
 | ; RV64IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV64IFD-NEXT:    bge a1, a0, .LBB27_4 | 
 | ; RV64IFD-NEXT:  .LBB27_2: # %entry | 
 | ; RV64IFD-NEXT:    ret | 
 | ; RV64IFD-NEXT:  .LBB27_3: # %entry | 
 | ; RV64IFD-NEXT:    mv a0, a2 | 
 | ; RV64IFD-NEXT:    blt a1, a2, .LBB27_2 | 
 | ; RV64IFD-NEXT:  .LBB27_4: # %entry | 
 | ; RV64IFD-NEXT:    lui a0, 524288 | 
 | ; RV64IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi double %x to i64 | 
 |   %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 2147483647) | 
 |   %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 -2147483648) | 
 |   %conv6 = trunc i64 %spec.store.select7 to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | define i32 @utest_f64i32_mm(double %x) { | 
 | ; RV32IF-LABEL: utest_f64i32_mm: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    call __fixunsdfdi | 
 | ; RV32IF-NEXT:    seqz a1, a1 | 
 | ; RV32IF-NEXT:    addi a1, a1, -1 | 
 | ; RV32IF-NEXT:    or a0, a1, a0 | 
 | ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64IF-LABEL: utest_f64i32_mm: | 
 | ; RV64IF:       # %bb.0: # %entry | 
 | ; RV64IF-NEXT:    addi sp, sp, -16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64IF-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64IF-NEXT:    call __fixunsdfdi | 
 | ; RV64IF-NEXT:    li a1, -1 | 
 | ; RV64IF-NEXT:    srli a1, a1, 32 | 
 | ; RV64IF-NEXT:    bltu a0, a1, .LBB28_2 | 
 | ; RV64IF-NEXT:  # %bb.1: # %entry | 
 | ; RV64IF-NEXT:    mv a0, a1 | 
 | ; RV64IF-NEXT:  .LBB28_2: # %entry | 
 | ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64IF-NEXT:    .cfi_restore ra | 
 | ; RV64IF-NEXT:    addi sp, sp, 16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64IF-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: utest_f64i32_mm: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz | 
 | ; RV32IFD-NEXT:    feq.d a1, fa0, fa0 | 
 | ; RV32IFD-NEXT:    seqz a1, a1 | 
 | ; RV32IFD-NEXT:    addi a1, a1, -1 | 
 | ; RV32IFD-NEXT:    and a0, a1, a0 | 
 | ; RV32IFD-NEXT:    ret | 
 | ; | 
 | ; RV64IFD-LABEL: utest_f64i32_mm: | 
 | ; RV64IFD:       # %bb.0: # %entry | 
 | ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz | 
 | ; RV64IFD-NEXT:    li a1, -1 | 
 | ; RV64IFD-NEXT:    srli a1, a1, 32 | 
 | ; RV64IFD-NEXT:    bltu a0, a1, .LBB28_2 | 
 | ; RV64IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV64IFD-NEXT:    mv a0, a1 | 
 | ; RV64IFD-NEXT:  .LBB28_2: # %entry | 
 | ; RV64IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui double %x to i64 | 
 |   %spec.store.select = call i64 @llvm.umin.i64(i64 %conv, i64 4294967295) | 
 |   %conv6 = trunc i64 %spec.store.select to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | define i32 @ustest_f64i32_mm(double %x) { | 
 | ; RV32IF-LABEL: ustest_f64i32_mm: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    call __fixdfdi | 
 | ; RV32IF-NEXT:    bnez a1, .LBB29_2 | 
 | ; RV32IF-NEXT:  # %bb.1: # %entry | 
 | ; RV32IF-NEXT:    li a2, 1 | 
 | ; RV32IF-NEXT:    j .LBB29_3 | 
 | ; RV32IF-NEXT:  .LBB29_2: | 
 | ; RV32IF-NEXT:    slti a2, a1, 1 | 
 | ; RV32IF-NEXT:  .LBB29_3: # %entry | 
 | ; RV32IF-NEXT:    addi a3, a2, -1 | 
 | ; RV32IF-NEXT:    neg a2, a2 | 
 | ; RV32IF-NEXT:    or a0, a3, a0 | 
 | ; RV32IF-NEXT:    and a1, a2, a1 | 
 | ; RV32IF-NEXT:    slti a1, a1, 0 | 
 | ; RV32IF-NEXT:    addi a1, a1, -1 | 
 | ; RV32IF-NEXT:    and a0, a1, a0 | 
 | ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64IF-LABEL: ustest_f64i32_mm: | 
 | ; RV64IF:       # %bb.0: # %entry | 
 | ; RV64IF-NEXT:    addi sp, sp, -16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64IF-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64IF-NEXT:    call __fixdfdi | 
 | ; RV64IF-NEXT:    li a1, -1 | 
 | ; RV64IF-NEXT:    srli a1, a1, 32 | 
 | ; RV64IF-NEXT:    blt a0, a1, .LBB29_2 | 
 | ; RV64IF-NEXT:  # %bb.1: # %entry | 
 | ; RV64IF-NEXT:    mv a0, a1 | 
 | ; RV64IF-NEXT:  .LBB29_2: # %entry | 
 | ; RV64IF-NEXT:    sgtz a1, a0 | 
 | ; RV64IF-NEXT:    neg a1, a1 | 
 | ; RV64IF-NEXT:    and a0, a1, a0 | 
 | ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64IF-NEXT:    .cfi_restore ra | 
 | ; RV64IF-NEXT:    addi sp, sp, 16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64IF-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: ustest_f64i32_mm: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz | 
 | ; RV32IFD-NEXT:    feq.d a1, fa0, fa0 | 
 | ; RV32IFD-NEXT:    seqz a1, a1 | 
 | ; RV32IFD-NEXT:    addi a1, a1, -1 | 
 | ; RV32IFD-NEXT:    and a0, a1, a0 | 
 | ; RV32IFD-NEXT:    ret | 
 | ; | 
 | ; RV64IFD-LABEL: ustest_f64i32_mm: | 
 | ; RV64IFD:       # %bb.0: # %entry | 
 | ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz | 
 | ; RV64IFD-NEXT:    li a1, -1 | 
 | ; RV64IFD-NEXT:    srli a1, a1, 32 | 
 | ; RV64IFD-NEXT:    blt a0, a1, .LBB29_2 | 
 | ; RV64IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV64IFD-NEXT:    mv a0, a1 | 
 | ; RV64IFD-NEXT:  .LBB29_2: # %entry | 
 | ; RV64IFD-NEXT:    sgtz a1, a0 | 
 | ; RV64IFD-NEXT:    neg a1, a1 | 
 | ; RV64IFD-NEXT:    and a0, a1, a0 | 
 | ; RV64IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi double %x to i64 | 
 |   %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 4294967295) | 
 |   %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 0) | 
 |   %conv6 = trunc i64 %spec.store.select7 to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | define i32 @stest_f32i32_mm(float %x) { | 
 | ; RV32-LABEL: stest_f32i32_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    fcvt.w.s a0, fa0, rtz | 
 | ; RV32-NEXT:    feq.s a1, fa0, fa0 | 
 | ; RV32-NEXT:    seqz a1, a1 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    and a0, a1, a0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: stest_f32i32_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz | 
 | ; RV64-NEXT:    lui a1, 524288 | 
 | ; RV64-NEXT:    addiw a2, a1, -1 | 
 | ; RV64-NEXT:    bge a0, a2, .LBB30_3 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    bge a1, a0, .LBB30_4 | 
 | ; RV64-NEXT:  .LBB30_2: # %entry | 
 | ; RV64-NEXT:    ret | 
 | ; RV64-NEXT:  .LBB30_3: # %entry | 
 | ; RV64-NEXT:    mv a0, a2 | 
 | ; RV64-NEXT:    blt a1, a2, .LBB30_2 | 
 | ; RV64-NEXT:  .LBB30_4: # %entry | 
 | ; RV64-NEXT:    lui a0, 524288 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi float %x to i64 | 
 |   %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 2147483647) | 
 |   %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 -2147483648) | 
 |   %conv6 = trunc i64 %spec.store.select7 to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | define i32 @utest_f32i32_mm(float %x) { | 
 | ; RV32-LABEL: utest_f32i32_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    fcvt.wu.s a0, fa0, rtz | 
 | ; RV32-NEXT:    feq.s a1, fa0, fa0 | 
 | ; RV32-NEXT:    seqz a1, a1 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    and a0, a1, a0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: utest_f32i32_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    fcvt.lu.s a0, fa0, rtz | 
 | ; RV64-NEXT:    li a1, -1 | 
 | ; RV64-NEXT:    srli a1, a1, 32 | 
 | ; RV64-NEXT:    bltu a0, a1, .LBB31_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB31_2: # %entry | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui float %x to i64 | 
 |   %spec.store.select = call i64 @llvm.umin.i64(i64 %conv, i64 4294967295) | 
 |   %conv6 = trunc i64 %spec.store.select to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | define i32 @ustest_f32i32_mm(float %x) { | 
 | ; RV32-LABEL: ustest_f32i32_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    fcvt.wu.s a0, fa0, rtz | 
 | ; RV32-NEXT:    feq.s a1, fa0, fa0 | 
 | ; RV32-NEXT:    seqz a1, a1 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    and a0, a1, a0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: ustest_f32i32_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz | 
 | ; RV64-NEXT:    li a1, -1 | 
 | ; RV64-NEXT:    srli a1, a1, 32 | 
 | ; RV64-NEXT:    blt a0, a1, .LBB32_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB32_2: # %entry | 
 | ; RV64-NEXT:    sgtz a1, a0 | 
 | ; RV64-NEXT:    neg a1, a1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi float %x to i64 | 
 |   %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 4294967295) | 
 |   %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 0) | 
 |   %conv6 = trunc i64 %spec.store.select7 to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | define i32 @stest_f16i32_mm(half %x) { | 
 | ; RV32-LABEL: stest_f16i32_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    call __fixsfdi | 
 | ; RV32-NEXT:    lui a2, 524288 | 
 | ; RV32-NEXT:    addi a3, a2, -1 | 
 | ; RV32-NEXT:    beqz a1, .LBB33_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    slti a4, a1, 0 | 
 | ; RV32-NEXT:    j .LBB33_3 | 
 | ; RV32-NEXT:  .LBB33_2: | 
 | ; RV32-NEXT:    sltu a4, a0, a3 | 
 | ; RV32-NEXT:  .LBB33_3: # %entry | 
 | ; RV32-NEXT:    neg a5, a4 | 
 | ; RV32-NEXT:    and a1, a5, a1 | 
 | ; RV32-NEXT:    bnez a4, .LBB33_5 | 
 | ; RV32-NEXT:  # %bb.4: # %entry | 
 | ; RV32-NEXT:    mv a0, a3 | 
 | ; RV32-NEXT:  .LBB33_5: # %entry | 
 | ; RV32-NEXT:    li a3, -1 | 
 | ; RV32-NEXT:    beq a1, a3, .LBB33_7 | 
 | ; RV32-NEXT:  # %bb.6: # %entry | 
 | ; RV32-NEXT:    slti a1, a1, 0 | 
 | ; RV32-NEXT:    xori a1, a1, 1 | 
 | ; RV32-NEXT:    beqz a1, .LBB33_8 | 
 | ; RV32-NEXT:    j .LBB33_9 | 
 | ; RV32-NEXT:  .LBB33_7: | 
 | ; RV32-NEXT:    sltu a1, a2, a0 | 
 | ; RV32-NEXT:    bnez a1, .LBB33_9 | 
 | ; RV32-NEXT:  .LBB33_8: # %entry | 
 | ; RV32-NEXT:    lui a0, 524288 | 
 | ; RV32-NEXT:  .LBB33_9: # %entry | 
 | ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: stest_f16i32_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz | 
 | ; RV64-NEXT:    lui a1, 524288 | 
 | ; RV64-NEXT:    addiw a2, a1, -1 | 
 | ; RV64-NEXT:    blt a0, a2, .LBB33_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a2 | 
 | ; RV64-NEXT:  .LBB33_2: # %entry | 
 | ; RV64-NEXT:    blt a1, a0, .LBB33_4 | 
 | ; RV64-NEXT:  # %bb.3: # %entry | 
 | ; RV64-NEXT:    lui a0, 524288 | 
 | ; RV64-NEXT:  .LBB33_4: # %entry | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi half %x to i64 | 
 |   %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 2147483647) | 
 |   %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 -2147483648) | 
 |   %conv6 = trunc i64 %spec.store.select7 to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | define i32 @utesth_f16i32_mm(half %x) { | 
 | ; RV32-LABEL: utesth_f16i32_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    call __fixunssfdi | 
 | ; RV32-NEXT:    seqz a1, a1 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    or a0, a1, a0 | 
 | ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: utesth_f16i32_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    fcvt.lu.s a0, fa0, rtz | 
 | ; RV64-NEXT:    li a1, -1 | 
 | ; RV64-NEXT:    srli a1, a1, 32 | 
 | ; RV64-NEXT:    bltu a0, a1, .LBB34_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB34_2: # %entry | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui half %x to i64 | 
 |   %spec.store.select = call i64 @llvm.umin.i64(i64 %conv, i64 4294967295) | 
 |   %conv6 = trunc i64 %spec.store.select to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | define i32 @ustest_f16i32_mm(half %x) { | 
 | ; RV32-LABEL: ustest_f16i32_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    call __fixsfdi | 
 | ; RV32-NEXT:    bnez a1, .LBB35_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    li a2, 1 | 
 | ; RV32-NEXT:    j .LBB35_3 | 
 | ; RV32-NEXT:  .LBB35_2: | 
 | ; RV32-NEXT:    slti a2, a1, 1 | 
 | ; RV32-NEXT:  .LBB35_3: # %entry | 
 | ; RV32-NEXT:    addi a3, a2, -1 | 
 | ; RV32-NEXT:    neg a2, a2 | 
 | ; RV32-NEXT:    or a0, a3, a0 | 
 | ; RV32-NEXT:    and a1, a2, a1 | 
 | ; RV32-NEXT:    slti a1, a1, 0 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    and a0, a1, a0 | 
 | ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: ustest_f16i32_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz | 
 | ; RV64-NEXT:    li a1, -1 | 
 | ; RV64-NEXT:    srli a1, a1, 32 | 
 | ; RV64-NEXT:    blt a0, a1, .LBB35_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB35_2: # %entry | 
 | ; RV64-NEXT:    sgtz a1, a0 | 
 | ; RV64-NEXT:    neg a1, a1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi half %x to i64 | 
 |   %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 4294967295) | 
 |   %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 0) | 
 |   %conv6 = trunc i64 %spec.store.select7 to i32 | 
 |   ret i32 %conv6 | 
 | } | 
 |  | 
 | ; i16 saturate | 
 |  | 
 | define i16 @stest_f64i16_mm(double %x) { | 
 | ; RV32IF-LABEL: stest_f64i16_mm: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    call __fixdfsi | 
 | ; RV32IF-NEXT:    lui a1, 8 | 
 | ; RV32IF-NEXT:    addi a1, a1, -1 | 
 | ; RV32IF-NEXT:    blt a0, a1, .LBB36_2 | 
 | ; RV32IF-NEXT:  # %bb.1: # %entry | 
 | ; RV32IF-NEXT:    mv a0, a1 | 
 | ; RV32IF-NEXT:  .LBB36_2: # %entry | 
 | ; RV32IF-NEXT:    lui a1, 1048568 | 
 | ; RV32IF-NEXT:    blt a1, a0, .LBB36_4 | 
 | ; RV32IF-NEXT:  # %bb.3: # %entry | 
 | ; RV32IF-NEXT:    lui a0, 1048568 | 
 | ; RV32IF-NEXT:  .LBB36_4: # %entry | 
 | ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64IF-LABEL: stest_f64i16_mm: | 
 | ; RV64IF:       # %bb.0: # %entry | 
 | ; RV64IF-NEXT:    addi sp, sp, -16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64IF-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64IF-NEXT:    call __fixdfsi | 
 | ; RV64IF-NEXT:    lui a1, 8 | 
 | ; RV64IF-NEXT:    addiw a1, a1, -1 | 
 | ; RV64IF-NEXT:    blt a0, a1, .LBB36_2 | 
 | ; RV64IF-NEXT:  # %bb.1: # %entry | 
 | ; RV64IF-NEXT:    mv a0, a1 | 
 | ; RV64IF-NEXT:  .LBB36_2: # %entry | 
 | ; RV64IF-NEXT:    lui a1, 1048568 | 
 | ; RV64IF-NEXT:    blt a1, a0, .LBB36_4 | 
 | ; RV64IF-NEXT:  # %bb.3: # %entry | 
 | ; RV64IF-NEXT:    lui a0, 1048568 | 
 | ; RV64IF-NEXT:  .LBB36_4: # %entry | 
 | ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64IF-NEXT:    .cfi_restore ra | 
 | ; RV64IF-NEXT:    addi sp, sp, 16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64IF-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: stest_f64i16_mm: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rtz | 
 | ; RV32IFD-NEXT:    lui a1, 8 | 
 | ; RV32IFD-NEXT:    addi a1, a1, -1 | 
 | ; RV32IFD-NEXT:    bge a0, a1, .LBB36_3 | 
 | ; RV32IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV32IFD-NEXT:    lui a1, 1048568 | 
 | ; RV32IFD-NEXT:    bge a1, a0, .LBB36_4 | 
 | ; RV32IFD-NEXT:  .LBB36_2: # %entry | 
 | ; RV32IFD-NEXT:    ret | 
 | ; RV32IFD-NEXT:  .LBB36_3: # %entry | 
 | ; RV32IFD-NEXT:    mv a0, a1 | 
 | ; RV32IFD-NEXT:    lui a1, 1048568 | 
 | ; RV32IFD-NEXT:    blt a1, a0, .LBB36_2 | 
 | ; RV32IFD-NEXT:  .LBB36_4: # %entry | 
 | ; RV32IFD-NEXT:    lui a0, 1048568 | 
 | ; RV32IFD-NEXT:    ret | 
 | ; | 
 | ; RV64IFD-LABEL: stest_f64i16_mm: | 
 | ; RV64IFD:       # %bb.0: # %entry | 
 | ; RV64IFD-NEXT:    fcvt.w.d a0, fa0, rtz | 
 | ; RV64IFD-NEXT:    lui a1, 8 | 
 | ; RV64IFD-NEXT:    addiw a1, a1, -1 | 
 | ; RV64IFD-NEXT:    bge a0, a1, .LBB36_3 | 
 | ; RV64IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV64IFD-NEXT:    lui a1, 1048568 | 
 | ; RV64IFD-NEXT:    bge a1, a0, .LBB36_4 | 
 | ; RV64IFD-NEXT:  .LBB36_2: # %entry | 
 | ; RV64IFD-NEXT:    ret | 
 | ; RV64IFD-NEXT:  .LBB36_3: # %entry | 
 | ; RV64IFD-NEXT:    mv a0, a1 | 
 | ; RV64IFD-NEXT:    lui a1, 1048568 | 
 | ; RV64IFD-NEXT:    blt a1, a0, .LBB36_2 | 
 | ; RV64IFD-NEXT:  .LBB36_4: # %entry | 
 | ; RV64IFD-NEXT:    lui a0, 1048568 | 
 | ; RV64IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi double %x to i32 | 
 |   %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 32767) | 
 |   %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 -32768) | 
 |   %conv6 = trunc i32 %spec.store.select7 to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | define i16 @utest_f64i16_mm(double %x) { | 
 | ; RV32IF-LABEL: utest_f64i16_mm: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    call __fixunsdfsi | 
 | ; RV32IF-NEXT:    lui a1, 16 | 
 | ; RV32IF-NEXT:    addi a1, a1, -1 | 
 | ; RV32IF-NEXT:    bltu a0, a1, .LBB37_2 | 
 | ; RV32IF-NEXT:  # %bb.1: # %entry | 
 | ; RV32IF-NEXT:    mv a0, a1 | 
 | ; RV32IF-NEXT:  .LBB37_2: # %entry | 
 | ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64IF-LABEL: utest_f64i16_mm: | 
 | ; RV64IF:       # %bb.0: # %entry | 
 | ; RV64IF-NEXT:    addi sp, sp, -16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64IF-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64IF-NEXT:    call __fixunsdfsi | 
 | ; RV64IF-NEXT:    lui a1, 16 | 
 | ; RV64IF-NEXT:    addiw a1, a1, -1 | 
 | ; RV64IF-NEXT:    bltu a0, a1, .LBB37_2 | 
 | ; RV64IF-NEXT:  # %bb.1: # %entry | 
 | ; RV64IF-NEXT:    mv a0, a1 | 
 | ; RV64IF-NEXT:  .LBB37_2: # %entry | 
 | ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64IF-NEXT:    .cfi_restore ra | 
 | ; RV64IF-NEXT:    addi sp, sp, 16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64IF-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: utest_f64i16_mm: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz | 
 | ; RV32IFD-NEXT:    lui a1, 16 | 
 | ; RV32IFD-NEXT:    addi a1, a1, -1 | 
 | ; RV32IFD-NEXT:    bltu a0, a1, .LBB37_2 | 
 | ; RV32IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV32IFD-NEXT:    mv a0, a1 | 
 | ; RV32IFD-NEXT:  .LBB37_2: # %entry | 
 | ; RV32IFD-NEXT:    ret | 
 | ; | 
 | ; RV64IFD-LABEL: utest_f64i16_mm: | 
 | ; RV64IFD:       # %bb.0: # %entry | 
 | ; RV64IFD-NEXT:    fcvt.wu.d a0, fa0, rtz | 
 | ; RV64IFD-NEXT:    lui a1, 16 | 
 | ; RV64IFD-NEXT:    addiw a1, a1, -1 | 
 | ; RV64IFD-NEXT:    bltu a0, a1, .LBB37_2 | 
 | ; RV64IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV64IFD-NEXT:    mv a0, a1 | 
 | ; RV64IFD-NEXT:  .LBB37_2: # %entry | 
 | ; RV64IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui double %x to i32 | 
 |   %spec.store.select = call i32 @llvm.umin.i32(i32 %conv, i32 65535) | 
 |   %conv6 = trunc i32 %spec.store.select to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | define i16 @ustest_f64i16_mm(double %x) { | 
 | ; RV32IF-LABEL: ustest_f64i16_mm: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    call __fixdfsi | 
 | ; RV32IF-NEXT:    lui a1, 16 | 
 | ; RV32IF-NEXT:    addi a1, a1, -1 | 
 | ; RV32IF-NEXT:    blt a0, a1, .LBB38_2 | 
 | ; RV32IF-NEXT:  # %bb.1: # %entry | 
 | ; RV32IF-NEXT:    mv a0, a1 | 
 | ; RV32IF-NEXT:  .LBB38_2: # %entry | 
 | ; RV32IF-NEXT:    sgtz a1, a0 | 
 | ; RV32IF-NEXT:    neg a1, a1 | 
 | ; RV32IF-NEXT:    and a0, a1, a0 | 
 | ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 16 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64IF-LABEL: ustest_f64i16_mm: | 
 | ; RV64IF:       # %bb.0: # %entry | 
 | ; RV64IF-NEXT:    addi sp, sp, -16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64IF-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64IF-NEXT:    call __fixdfsi | 
 | ; RV64IF-NEXT:    lui a1, 16 | 
 | ; RV64IF-NEXT:    addiw a1, a1, -1 | 
 | ; RV64IF-NEXT:    blt a0, a1, .LBB38_2 | 
 | ; RV64IF-NEXT:  # %bb.1: # %entry | 
 | ; RV64IF-NEXT:    mv a0, a1 | 
 | ; RV64IF-NEXT:  .LBB38_2: # %entry | 
 | ; RV64IF-NEXT:    sgtz a1, a0 | 
 | ; RV64IF-NEXT:    neg a1, a1 | 
 | ; RV64IF-NEXT:    and a0, a1, a0 | 
 | ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64IF-NEXT:    .cfi_restore ra | 
 | ; RV64IF-NEXT:    addi sp, sp, 16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64IF-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: ustest_f64i16_mm: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rtz | 
 | ; RV32IFD-NEXT:    lui a1, 16 | 
 | ; RV32IFD-NEXT:    addi a1, a1, -1 | 
 | ; RV32IFD-NEXT:    blt a0, a1, .LBB38_2 | 
 | ; RV32IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV32IFD-NEXT:    mv a0, a1 | 
 | ; RV32IFD-NEXT:  .LBB38_2: # %entry | 
 | ; RV32IFD-NEXT:    sgtz a1, a0 | 
 | ; RV32IFD-NEXT:    neg a1, a1 | 
 | ; RV32IFD-NEXT:    and a0, a1, a0 | 
 | ; RV32IFD-NEXT:    ret | 
 | ; | 
 | ; RV64IFD-LABEL: ustest_f64i16_mm: | 
 | ; RV64IFD:       # %bb.0: # %entry | 
 | ; RV64IFD-NEXT:    fcvt.w.d a0, fa0, rtz | 
 | ; RV64IFD-NEXT:    lui a1, 16 | 
 | ; RV64IFD-NEXT:    addiw a1, a1, -1 | 
 | ; RV64IFD-NEXT:    blt a0, a1, .LBB38_2 | 
 | ; RV64IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV64IFD-NEXT:    mv a0, a1 | 
 | ; RV64IFD-NEXT:  .LBB38_2: # %entry | 
 | ; RV64IFD-NEXT:    sgtz a1, a0 | 
 | ; RV64IFD-NEXT:    neg a1, a1 | 
 | ; RV64IFD-NEXT:    and a0, a1, a0 | 
 | ; RV64IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi double %x to i32 | 
 |   %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 65535) | 
 |   %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0) | 
 |   %conv6 = trunc i32 %spec.store.select7 to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | define i16 @stest_f32i16_mm(float %x) { | 
 | ; RV32-LABEL: stest_f32i16_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    fcvt.w.s a0, fa0, rtz | 
 | ; RV32-NEXT:    lui a1, 8 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    bge a0, a1, .LBB39_3 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    lui a1, 1048568 | 
 | ; RV32-NEXT:    bge a1, a0, .LBB39_4 | 
 | ; RV32-NEXT:  .LBB39_2: # %entry | 
 | ; RV32-NEXT:    ret | 
 | ; RV32-NEXT:  .LBB39_3: # %entry | 
 | ; RV32-NEXT:    mv a0, a1 | 
 | ; RV32-NEXT:    lui a1, 1048568 | 
 | ; RV32-NEXT:    blt a1, a0, .LBB39_2 | 
 | ; RV32-NEXT:  .LBB39_4: # %entry | 
 | ; RV32-NEXT:    lui a0, 1048568 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: stest_f32i16_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    fcvt.w.s a0, fa0, rtz | 
 | ; RV64-NEXT:    lui a1, 8 | 
 | ; RV64-NEXT:    addiw a1, a1, -1 | 
 | ; RV64-NEXT:    bge a0, a1, .LBB39_3 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    lui a1, 1048568 | 
 | ; RV64-NEXT:    bge a1, a0, .LBB39_4 | 
 | ; RV64-NEXT:  .LBB39_2: # %entry | 
 | ; RV64-NEXT:    ret | 
 | ; RV64-NEXT:  .LBB39_3: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:    lui a1, 1048568 | 
 | ; RV64-NEXT:    blt a1, a0, .LBB39_2 | 
 | ; RV64-NEXT:  .LBB39_4: # %entry | 
 | ; RV64-NEXT:    lui a0, 1048568 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi float %x to i32 | 
 |   %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 32767) | 
 |   %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 -32768) | 
 |   %conv6 = trunc i32 %spec.store.select7 to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | define i16 @utest_f32i16_mm(float %x) { | 
 | ; RV32-LABEL: utest_f32i16_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    fcvt.wu.s a0, fa0, rtz | 
 | ; RV32-NEXT:    lui a1, 16 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    bltu a0, a1, .LBB40_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    mv a0, a1 | 
 | ; RV32-NEXT:  .LBB40_2: # %entry | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: utest_f32i16_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    fcvt.wu.s a0, fa0, rtz | 
 | ; RV64-NEXT:    lui a1, 16 | 
 | ; RV64-NEXT:    addiw a1, a1, -1 | 
 | ; RV64-NEXT:    bltu a0, a1, .LBB40_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB40_2: # %entry | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui float %x to i32 | 
 |   %spec.store.select = call i32 @llvm.umin.i32(i32 %conv, i32 65535) | 
 |   %conv6 = trunc i32 %spec.store.select to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | define i16 @ustest_f32i16_mm(float %x) { | 
 | ; RV32-LABEL: ustest_f32i16_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    fcvt.w.s a0, fa0, rtz | 
 | ; RV32-NEXT:    lui a1, 16 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    blt a0, a1, .LBB41_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    mv a0, a1 | 
 | ; RV32-NEXT:  .LBB41_2: # %entry | 
 | ; RV32-NEXT:    sgtz a1, a0 | 
 | ; RV32-NEXT:    neg a1, a1 | 
 | ; RV32-NEXT:    and a0, a1, a0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: ustest_f32i16_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    fcvt.w.s a0, fa0, rtz | 
 | ; RV64-NEXT:    lui a1, 16 | 
 | ; RV64-NEXT:    addiw a1, a1, -1 | 
 | ; RV64-NEXT:    blt a0, a1, .LBB41_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB41_2: # %entry | 
 | ; RV64-NEXT:    sgtz a1, a0 | 
 | ; RV64-NEXT:    neg a1, a1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi float %x to i32 | 
 |   %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 65535) | 
 |   %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0) | 
 |   %conv6 = trunc i32 %spec.store.select7 to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | define i16 @stest_f16i16_mm(half %x) { | 
 | ; RV32-LABEL: stest_f16i16_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    fcvt.w.s a0, fa0, rtz | 
 | ; RV32-NEXT:    lui a1, 8 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    blt a0, a1, .LBB42_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    mv a0, a1 | 
 | ; RV32-NEXT:  .LBB42_2: # %entry | 
 | ; RV32-NEXT:    lui a1, 1048568 | 
 | ; RV32-NEXT:    blt a1, a0, .LBB42_4 | 
 | ; RV32-NEXT:  # %bb.3: # %entry | 
 | ; RV32-NEXT:    lui a0, 1048568 | 
 | ; RV32-NEXT:  .LBB42_4: # %entry | 
 | ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: stest_f16i16_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz | 
 | ; RV64-NEXT:    lui a1, 8 | 
 | ; RV64-NEXT:    addiw a1, a1, -1 | 
 | ; RV64-NEXT:    blt a0, a1, .LBB42_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB42_2: # %entry | 
 | ; RV64-NEXT:    lui a1, 1048568 | 
 | ; RV64-NEXT:    blt a1, a0, .LBB42_4 | 
 | ; RV64-NEXT:  # %bb.3: # %entry | 
 | ; RV64-NEXT:    lui a0, 1048568 | 
 | ; RV64-NEXT:  .LBB42_4: # %entry | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi half %x to i32 | 
 |   %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 32767) | 
 |   %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 -32768) | 
 |   %conv6 = trunc i32 %spec.store.select7 to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | define i16 @utesth_f16i16_mm(half %x) { | 
 | ; RV32-LABEL: utesth_f16i16_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    fcvt.wu.s a0, fa0, rtz | 
 | ; RV32-NEXT:    lui a1, 16 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    bltu a0, a1, .LBB43_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    mv a0, a1 | 
 | ; RV32-NEXT:  .LBB43_2: # %entry | 
 | ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: utesth_f16i16_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    fcvt.lu.s a0, fa0, rtz | 
 | ; RV64-NEXT:    lui a1, 16 | 
 | ; RV64-NEXT:    addiw a1, a1, -1 | 
 | ; RV64-NEXT:    bltu a0, a1, .LBB43_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB43_2: # %entry | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui half %x to i32 | 
 |   %spec.store.select = call i32 @llvm.umin.i32(i32 %conv, i32 65535) | 
 |   %conv6 = trunc i32 %spec.store.select to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | define i16 @ustest_f16i16_mm(half %x) { | 
 | ; RV32-LABEL: ustest_f16i16_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    fcvt.w.s a0, fa0, rtz | 
 | ; RV32-NEXT:    lui a1, 16 | 
 | ; RV32-NEXT:    addi a1, a1, -1 | 
 | ; RV32-NEXT:    blt a0, a1, .LBB44_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    mv a0, a1 | 
 | ; RV32-NEXT:  .LBB44_2: # %entry | 
 | ; RV32-NEXT:    sgtz a1, a0 | 
 | ; RV32-NEXT:    neg a1, a1 | 
 | ; RV32-NEXT:    and a0, a1, a0 | 
 | ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 16 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: ustest_f16i16_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz | 
 | ; RV64-NEXT:    lui a1, 16 | 
 | ; RV64-NEXT:    addiw a1, a1, -1 | 
 | ; RV64-NEXT:    blt a0, a1, .LBB44_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB44_2: # %entry | 
 | ; RV64-NEXT:    sgtz a1, a0 | 
 | ; RV64-NEXT:    neg a1, a1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi half %x to i32 | 
 |   %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 65535) | 
 |   %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0) | 
 |   %conv6 = trunc i32 %spec.store.select7 to i16 | 
 |   ret i16 %conv6 | 
 | } | 
 |  | 
 | ; i64 saturate | 
 |  | 
 | define i64 @stest_f64i64_mm(double %x) { | 
 | ; RV32IF-LABEL: stest_f64i64_mm: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -32 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32IF-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    mv a2, a1 | 
 | ; RV32IF-NEXT:    mv a1, a0 | 
 | ; RV32IF-NEXT:    addi a0, sp, 8 | 
 | ; RV32IF-NEXT:    call __fixdfti | 
 | ; RV32IF-NEXT:    lw a3, 8(sp) | 
 | ; RV32IF-NEXT:    lw a1, 12(sp) | 
 | ; RV32IF-NEXT:    lw a2, 16(sp) | 
 | ; RV32IF-NEXT:    lw a4, 20(sp) | 
 | ; RV32IF-NEXT:    lui a0, 524288 | 
 | ; RV32IF-NEXT:    addi a5, a0, -1 | 
 | ; RV32IF-NEXT:    beq a1, a5, .LBB45_2 | 
 | ; RV32IF-NEXT:  # %bb.1: # %entry | 
 | ; RV32IF-NEXT:    sltu a6, a1, a5 | 
 | ; RV32IF-NEXT:    or a7, a2, a4 | 
 | ; RV32IF-NEXT:    bnez a7, .LBB45_3 | 
 | ; RV32IF-NEXT:    j .LBB45_4 | 
 | ; RV32IF-NEXT:  .LBB45_2: | 
 | ; RV32IF-NEXT:    sltiu a6, a3, -1 | 
 | ; RV32IF-NEXT:    or a7, a2, a4 | 
 | ; RV32IF-NEXT:    beqz a7, .LBB45_4 | 
 | ; RV32IF-NEXT:  .LBB45_3: # %entry | 
 | ; RV32IF-NEXT:    slti a6, a4, 0 | 
 | ; RV32IF-NEXT:  .LBB45_4: # %entry | 
 | ; RV32IF-NEXT:    addi a7, a6, -1 | 
 | ; RV32IF-NEXT:    neg t0, a6 | 
 | ; RV32IF-NEXT:    bnez a6, .LBB45_6 | 
 | ; RV32IF-NEXT:  # %bb.5: # %entry | 
 | ; RV32IF-NEXT:    mv a1, a5 | 
 | ; RV32IF-NEXT:  .LBB45_6: # %entry | 
 | ; RV32IF-NEXT:    or a3, a7, a3 | 
 | ; RV32IF-NEXT:    and a4, t0, a4 | 
 | ; RV32IF-NEXT:    and a2, t0, a2 | 
 | ; RV32IF-NEXT:    beq a1, a0, .LBB45_8 | 
 | ; RV32IF-NEXT:  # %bb.7: # %entry | 
 | ; RV32IF-NEXT:    sltu a0, a0, a1 | 
 | ; RV32IF-NEXT:    j .LBB45_9 | 
 | ; RV32IF-NEXT:  .LBB45_8: | 
 | ; RV32IF-NEXT:    snez a0, a3 | 
 | ; RV32IF-NEXT:  .LBB45_9: # %entry | 
 | ; RV32IF-NEXT:    and a2, a2, a4 | 
 | ; RV32IF-NEXT:    li a5, -1 | 
 | ; RV32IF-NEXT:    beq a2, a5, .LBB45_11 | 
 | ; RV32IF-NEXT:  # %bb.10: # %entry | 
 | ; RV32IF-NEXT:    slti a0, a4, 0 | 
 | ; RV32IF-NEXT:    xori a0, a0, 1 | 
 | ; RV32IF-NEXT:  .LBB45_11: # %entry | 
 | ; RV32IF-NEXT:    bnez a0, .LBB45_13 | 
 | ; RV32IF-NEXT:  # %bb.12: # %entry | 
 | ; RV32IF-NEXT:    lui a1, 524288 | 
 | ; RV32IF-NEXT:  .LBB45_13: # %entry | 
 | ; RV32IF-NEXT:    neg a0, a0 | 
 | ; RV32IF-NEXT:    and a0, a0, a3 | 
 | ; RV32IF-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 32 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64IF-LABEL: stest_f64i64_mm: | 
 | ; RV64IF:       # %bb.0: # %entry | 
 | ; RV64IF-NEXT:    addi sp, sp, -16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64IF-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64IF-NEXT:    call __fixdfti | 
 | ; RV64IF-NEXT:    li a2, -1 | 
 | ; RV64IF-NEXT:    srli a3, a2, 1 | 
 | ; RV64IF-NEXT:    beqz a1, .LBB45_2 | 
 | ; RV64IF-NEXT:  # %bb.1: # %entry | 
 | ; RV64IF-NEXT:    slti a4, a1, 0 | 
 | ; RV64IF-NEXT:    j .LBB45_3 | 
 | ; RV64IF-NEXT:  .LBB45_2: | 
 | ; RV64IF-NEXT:    sltu a4, a0, a3 | 
 | ; RV64IF-NEXT:  .LBB45_3: # %entry | 
 | ; RV64IF-NEXT:    neg a5, a4 | 
 | ; RV64IF-NEXT:    and a5, a5, a1 | 
 | ; RV64IF-NEXT:    bnez a4, .LBB45_5 | 
 | ; RV64IF-NEXT:  # %bb.4: # %entry | 
 | ; RV64IF-NEXT:    mv a0, a3 | 
 | ; RV64IF-NEXT:  .LBB45_5: # %entry | 
 | ; RV64IF-NEXT:    slli a1, a2, 63 | 
 | ; RV64IF-NEXT:    beq a5, a2, .LBB45_7 | 
 | ; RV64IF-NEXT:  # %bb.6: # %entry | 
 | ; RV64IF-NEXT:    slti a2, a5, 0 | 
 | ; RV64IF-NEXT:    xori a2, a2, 1 | 
 | ; RV64IF-NEXT:    beqz a2, .LBB45_8 | 
 | ; RV64IF-NEXT:    j .LBB45_9 | 
 | ; RV64IF-NEXT:  .LBB45_7: | 
 | ; RV64IF-NEXT:    sltu a2, a1, a0 | 
 | ; RV64IF-NEXT:    bnez a2, .LBB45_9 | 
 | ; RV64IF-NEXT:  .LBB45_8: # %entry | 
 | ; RV64IF-NEXT:    mv a0, a1 | 
 | ; RV64IF-NEXT:  .LBB45_9: # %entry | 
 | ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64IF-NEXT:    .cfi_restore ra | 
 | ; RV64IF-NEXT:    addi sp, sp, 16 | 
 | ; RV64IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64IF-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: stest_f64i64_mm: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    addi sp, sp, -32 | 
 | ; RV32IFD-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32IFD-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IFD-NEXT:    addi a0, sp, 8 | 
 | ; RV32IFD-NEXT:    call __fixdfti | 
 | ; RV32IFD-NEXT:    lw a3, 8(sp) | 
 | ; RV32IFD-NEXT:    lw a1, 12(sp) | 
 | ; RV32IFD-NEXT:    lw a2, 16(sp) | 
 | ; RV32IFD-NEXT:    lw a4, 20(sp) | 
 | ; RV32IFD-NEXT:    lui a0, 524288 | 
 | ; RV32IFD-NEXT:    addi a5, a0, -1 | 
 | ; RV32IFD-NEXT:    beq a1, a5, .LBB45_2 | 
 | ; RV32IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV32IFD-NEXT:    sltu a6, a1, a5 | 
 | ; RV32IFD-NEXT:    or a7, a2, a4 | 
 | ; RV32IFD-NEXT:    bnez a7, .LBB45_3 | 
 | ; RV32IFD-NEXT:    j .LBB45_4 | 
 | ; RV32IFD-NEXT:  .LBB45_2: | 
 | ; RV32IFD-NEXT:    sltiu a6, a3, -1 | 
 | ; RV32IFD-NEXT:    or a7, a2, a4 | 
 | ; RV32IFD-NEXT:    beqz a7, .LBB45_4 | 
 | ; RV32IFD-NEXT:  .LBB45_3: # %entry | 
 | ; RV32IFD-NEXT:    slti a6, a4, 0 | 
 | ; RV32IFD-NEXT:  .LBB45_4: # %entry | 
 | ; RV32IFD-NEXT:    addi a7, a6, -1 | 
 | ; RV32IFD-NEXT:    neg t0, a6 | 
 | ; RV32IFD-NEXT:    bnez a6, .LBB45_6 | 
 | ; RV32IFD-NEXT:  # %bb.5: # %entry | 
 | ; RV32IFD-NEXT:    mv a1, a5 | 
 | ; RV32IFD-NEXT:  .LBB45_6: # %entry | 
 | ; RV32IFD-NEXT:    or a3, a7, a3 | 
 | ; RV32IFD-NEXT:    and a4, t0, a4 | 
 | ; RV32IFD-NEXT:    and a2, t0, a2 | 
 | ; RV32IFD-NEXT:    beq a1, a0, .LBB45_8 | 
 | ; RV32IFD-NEXT:  # %bb.7: # %entry | 
 | ; RV32IFD-NEXT:    sltu a0, a0, a1 | 
 | ; RV32IFD-NEXT:    j .LBB45_9 | 
 | ; RV32IFD-NEXT:  .LBB45_8: | 
 | ; RV32IFD-NEXT:    snez a0, a3 | 
 | ; RV32IFD-NEXT:  .LBB45_9: # %entry | 
 | ; RV32IFD-NEXT:    and a2, a2, a4 | 
 | ; RV32IFD-NEXT:    li a5, -1 | 
 | ; RV32IFD-NEXT:    beq a2, a5, .LBB45_11 | 
 | ; RV32IFD-NEXT:  # %bb.10: # %entry | 
 | ; RV32IFD-NEXT:    slti a0, a4, 0 | 
 | ; RV32IFD-NEXT:    xori a0, a0, 1 | 
 | ; RV32IFD-NEXT:  .LBB45_11: # %entry | 
 | ; RV32IFD-NEXT:    bnez a0, .LBB45_13 | 
 | ; RV32IFD-NEXT:  # %bb.12: # %entry | 
 | ; RV32IFD-NEXT:    lui a1, 524288 | 
 | ; RV32IFD-NEXT:  .LBB45_13: # %entry | 
 | ; RV32IFD-NEXT:    neg a0, a0 | 
 | ; RV32IFD-NEXT:    and a0, a0, a3 | 
 | ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32IFD-NEXT:    .cfi_restore ra | 
 | ; RV32IFD-NEXT:    addi sp, sp, 32 | 
 | ; RV32IFD-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IFD-NEXT:    ret | 
 | ; | 
 | ; RV64IFD-LABEL: stest_f64i64_mm: | 
 | ; RV64IFD:       # %bb.0: # %entry | 
 | ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz | 
 | ; RV64IFD-NEXT:    feq.d a1, fa0, fa0 | 
 | ; RV64IFD-NEXT:    seqz a1, a1 | 
 | ; RV64IFD-NEXT:    addi a1, a1, -1 | 
 | ; RV64IFD-NEXT:    and a0, a1, a0 | 
 | ; RV64IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi double %x to i128 | 
 |   %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 9223372036854775807) | 
 |   %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 -9223372036854775808) | 
 |   %conv6 = trunc i128 %spec.store.select7 to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 | define i64 @utest_f64i64_mm(double %x) { | 
 | ; RV32IF-LABEL: utest_f64i64_mm: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -32 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32IF-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    mv a2, a1 | 
 | ; RV32IF-NEXT:    mv a1, a0 | 
 | ; RV32IF-NEXT:    addi a0, sp, 8 | 
 | ; RV32IF-NEXT:    call __fixunsdfti | 
 | ; RV32IF-NEXT:    lw a0, 16(sp) | 
 | ; RV32IF-NEXT:    lw a1, 20(sp) | 
 | ; RV32IF-NEXT:    lw a2, 12(sp) | 
 | ; RV32IF-NEXT:    lw a3, 8(sp) | 
 | ; RV32IF-NEXT:    or a4, a1, a0 | 
 | ; RV32IF-NEXT:    xori a0, a0, 1 | 
 | ; RV32IF-NEXT:    seqz a4, a4 | 
 | ; RV32IF-NEXT:    or a0, a0, a1 | 
 | ; RV32IF-NEXT:    seqz a0, a0 | 
 | ; RV32IF-NEXT:    addi a0, a0, -1 | 
 | ; RV32IF-NEXT:    and a0, a0, a4 | 
 | ; RV32IF-NEXT:    neg a1, a0 | 
 | ; RV32IF-NEXT:    and a0, a1, a3 | 
 | ; RV32IF-NEXT:    and a1, a1, a2 | 
 | ; RV32IF-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 32 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: utest_f64i64_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __fixunsdfti | 
 | ; RV64-NEXT:    snez a1, a1 | 
 | ; RV64-NEXT:    addi a1, a1, -1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: utest_f64i64_mm: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    addi sp, sp, -32 | 
 | ; RV32IFD-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32IFD-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IFD-NEXT:    addi a0, sp, 8 | 
 | ; RV32IFD-NEXT:    call __fixunsdfti | 
 | ; RV32IFD-NEXT:    lw a0, 16(sp) | 
 | ; RV32IFD-NEXT:    lw a1, 20(sp) | 
 | ; RV32IFD-NEXT:    lw a2, 12(sp) | 
 | ; RV32IFD-NEXT:    lw a3, 8(sp) | 
 | ; RV32IFD-NEXT:    or a4, a1, a0 | 
 | ; RV32IFD-NEXT:    xori a0, a0, 1 | 
 | ; RV32IFD-NEXT:    seqz a4, a4 | 
 | ; RV32IFD-NEXT:    or a0, a0, a1 | 
 | ; RV32IFD-NEXT:    seqz a0, a0 | 
 | ; RV32IFD-NEXT:    addi a0, a0, -1 | 
 | ; RV32IFD-NEXT:    and a0, a0, a4 | 
 | ; RV32IFD-NEXT:    neg a1, a0 | 
 | ; RV32IFD-NEXT:    and a0, a1, a3 | 
 | ; RV32IFD-NEXT:    and a1, a1, a2 | 
 | ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32IFD-NEXT:    .cfi_restore ra | 
 | ; RV32IFD-NEXT:    addi sp, sp, 32 | 
 | ; RV32IFD-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui double %x to i128 | 
 |   %spec.store.select = call i128 @llvm.umin.i128(i128 %conv, i128 18446744073709551616) | 
 |   %conv6 = trunc i128 %spec.store.select to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 | define i64 @ustest_f64i64_mm(double %x) { | 
 | ; RV32IF-LABEL: ustest_f64i64_mm: | 
 | ; RV32IF:       # %bb.0: # %entry | 
 | ; RV32IF-NEXT:    addi sp, sp, -32 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32IF-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32IF-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IF-NEXT:    mv a2, a1 | 
 | ; RV32IF-NEXT:    mv a1, a0 | 
 | ; RV32IF-NEXT:    addi a0, sp, 8 | 
 | ; RV32IF-NEXT:    call __fixdfti | 
 | ; RV32IF-NEXT:    lw a0, 20(sp) | 
 | ; RV32IF-NEXT:    lw a1, 8(sp) | 
 | ; RV32IF-NEXT:    lw a2, 12(sp) | 
 | ; RV32IF-NEXT:    lw a3, 16(sp) | 
 | ; RV32IF-NEXT:    beqz a0, .LBB47_2 | 
 | ; RV32IF-NEXT:  # %bb.1: # %entry | 
 | ; RV32IF-NEXT:    slti a4, a0, 0 | 
 | ; RV32IF-NEXT:    j .LBB47_3 | 
 | ; RV32IF-NEXT:  .LBB47_2: | 
 | ; RV32IF-NEXT:    seqz a4, a3 | 
 | ; RV32IF-NEXT:  .LBB47_3: # %entry | 
 | ; RV32IF-NEXT:    xori a3, a3, 1 | 
 | ; RV32IF-NEXT:    or a3, a3, a0 | 
 | ; RV32IF-NEXT:    seqz a3, a3 | 
 | ; RV32IF-NEXT:    addi a3, a3, -1 | 
 | ; RV32IF-NEXT:    and a3, a3, a4 | 
 | ; RV32IF-NEXT:    neg a3, a3 | 
 | ; RV32IF-NEXT:    and a2, a3, a2 | 
 | ; RV32IF-NEXT:    and a1, a3, a1 | 
 | ; RV32IF-NEXT:    and a0, a3, a0 | 
 | ; RV32IF-NEXT:    slti a0, a0, 0 | 
 | ; RV32IF-NEXT:    addi a3, a0, -1 | 
 | ; RV32IF-NEXT:    and a0, a3, a1 | 
 | ; RV32IF-NEXT:    and a1, a3, a2 | 
 | ; RV32IF-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32IF-NEXT:    .cfi_restore ra | 
 | ; RV32IF-NEXT:    addi sp, sp, 32 | 
 | ; RV32IF-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IF-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: ustest_f64i64_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __fixdfti | 
 | ; RV64-NEXT:    mv a2, a1 | 
 | ; RV64-NEXT:    blez a1, .LBB47_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    li a2, 1 | 
 | ; RV64-NEXT:  .LBB47_2: # %entry | 
 | ; RV64-NEXT:    slti a1, a1, 1 | 
 | ; RV64-NEXT:    slti a2, a2, 0 | 
 | ; RV64-NEXT:    neg a1, a1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    addi a2, a2, -1 | 
 | ; RV64-NEXT:    and a0, a2, a0 | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | ; | 
 | ; RV32IFD-LABEL: ustest_f64i64_mm: | 
 | ; RV32IFD:       # %bb.0: # %entry | 
 | ; RV32IFD-NEXT:    addi sp, sp, -32 | 
 | ; RV32IFD-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32IFD-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32IFD-NEXT:    addi a0, sp, 8 | 
 | ; RV32IFD-NEXT:    call __fixdfti | 
 | ; RV32IFD-NEXT:    lw a0, 20(sp) | 
 | ; RV32IFD-NEXT:    lw a1, 8(sp) | 
 | ; RV32IFD-NEXT:    lw a2, 12(sp) | 
 | ; RV32IFD-NEXT:    lw a3, 16(sp) | 
 | ; RV32IFD-NEXT:    beqz a0, .LBB47_2 | 
 | ; RV32IFD-NEXT:  # %bb.1: # %entry | 
 | ; RV32IFD-NEXT:    slti a4, a0, 0 | 
 | ; RV32IFD-NEXT:    j .LBB47_3 | 
 | ; RV32IFD-NEXT:  .LBB47_2: | 
 | ; RV32IFD-NEXT:    seqz a4, a3 | 
 | ; RV32IFD-NEXT:  .LBB47_3: # %entry | 
 | ; RV32IFD-NEXT:    xori a3, a3, 1 | 
 | ; RV32IFD-NEXT:    or a3, a3, a0 | 
 | ; RV32IFD-NEXT:    seqz a3, a3 | 
 | ; RV32IFD-NEXT:    addi a3, a3, -1 | 
 | ; RV32IFD-NEXT:    and a3, a3, a4 | 
 | ; RV32IFD-NEXT:    neg a3, a3 | 
 | ; RV32IFD-NEXT:    and a2, a3, a2 | 
 | ; RV32IFD-NEXT:    and a1, a3, a1 | 
 | ; RV32IFD-NEXT:    and a0, a3, a0 | 
 | ; RV32IFD-NEXT:    slti a0, a0, 0 | 
 | ; RV32IFD-NEXT:    addi a3, a0, -1 | 
 | ; RV32IFD-NEXT:    and a0, a3, a1 | 
 | ; RV32IFD-NEXT:    and a1, a3, a2 | 
 | ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32IFD-NEXT:    .cfi_restore ra | 
 | ; RV32IFD-NEXT:    addi sp, sp, 32 | 
 | ; RV32IFD-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32IFD-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi double %x to i128 | 
 |   %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) | 
 |   %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 0) | 
 |   %conv6 = trunc i128 %spec.store.select7 to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 | define i64 @stest_f32i64_mm(float %x) { | 
 | ; RV32-LABEL: stest_f32i64_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    addi a0, sp, 8 | 
 | ; RV32-NEXT:    call __fixsfti | 
 | ; RV32-NEXT:    lw a3, 8(sp) | 
 | ; RV32-NEXT:    lw a1, 12(sp) | 
 | ; RV32-NEXT:    lw a2, 16(sp) | 
 | ; RV32-NEXT:    lw a4, 20(sp) | 
 | ; RV32-NEXT:    lui a0, 524288 | 
 | ; RV32-NEXT:    addi a5, a0, -1 | 
 | ; RV32-NEXT:    beq a1, a5, .LBB48_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    sltu a6, a1, a5 | 
 | ; RV32-NEXT:    or a7, a2, a4 | 
 | ; RV32-NEXT:    bnez a7, .LBB48_3 | 
 | ; RV32-NEXT:    j .LBB48_4 | 
 | ; RV32-NEXT:  .LBB48_2: | 
 | ; RV32-NEXT:    sltiu a6, a3, -1 | 
 | ; RV32-NEXT:    or a7, a2, a4 | 
 | ; RV32-NEXT:    beqz a7, .LBB48_4 | 
 | ; RV32-NEXT:  .LBB48_3: # %entry | 
 | ; RV32-NEXT:    slti a6, a4, 0 | 
 | ; RV32-NEXT:  .LBB48_4: # %entry | 
 | ; RV32-NEXT:    addi a7, a6, -1 | 
 | ; RV32-NEXT:    neg t0, a6 | 
 | ; RV32-NEXT:    bnez a6, .LBB48_6 | 
 | ; RV32-NEXT:  # %bb.5: # %entry | 
 | ; RV32-NEXT:    mv a1, a5 | 
 | ; RV32-NEXT:  .LBB48_6: # %entry | 
 | ; RV32-NEXT:    or a3, a7, a3 | 
 | ; RV32-NEXT:    and a4, t0, a4 | 
 | ; RV32-NEXT:    and a2, t0, a2 | 
 | ; RV32-NEXT:    beq a1, a0, .LBB48_8 | 
 | ; RV32-NEXT:  # %bb.7: # %entry | 
 | ; RV32-NEXT:    sltu a0, a0, a1 | 
 | ; RV32-NEXT:    j .LBB48_9 | 
 | ; RV32-NEXT:  .LBB48_8: | 
 | ; RV32-NEXT:    snez a0, a3 | 
 | ; RV32-NEXT:  .LBB48_9: # %entry | 
 | ; RV32-NEXT:    and a2, a2, a4 | 
 | ; RV32-NEXT:    li a5, -1 | 
 | ; RV32-NEXT:    beq a2, a5, .LBB48_11 | 
 | ; RV32-NEXT:  # %bb.10: # %entry | 
 | ; RV32-NEXT:    slti a0, a4, 0 | 
 | ; RV32-NEXT:    xori a0, a0, 1 | 
 | ; RV32-NEXT:  .LBB48_11: # %entry | 
 | ; RV32-NEXT:    bnez a0, .LBB48_13 | 
 | ; RV32-NEXT:  # %bb.12: # %entry | 
 | ; RV32-NEXT:    lui a1, 524288 | 
 | ; RV32-NEXT:  .LBB48_13: # %entry | 
 | ; RV32-NEXT:    neg a0, a0 | 
 | ; RV32-NEXT:    and a0, a0, a3 | 
 | ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: stest_f32i64_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz | 
 | ; RV64-NEXT:    feq.s a1, fa0, fa0 | 
 | ; RV64-NEXT:    seqz a1, a1 | 
 | ; RV64-NEXT:    addi a1, a1, -1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi float %x to i128 | 
 |   %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 9223372036854775807) | 
 |   %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 -9223372036854775808) | 
 |   %conv6 = trunc i128 %spec.store.select7 to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 | define i64 @utest_f32i64_mm(float %x) { | 
 | ; RV32-LABEL: utest_f32i64_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    addi a0, sp, 8 | 
 | ; RV32-NEXT:    call __fixunssfti | 
 | ; RV32-NEXT:    lw a0, 16(sp) | 
 | ; RV32-NEXT:    lw a1, 20(sp) | 
 | ; RV32-NEXT:    lw a2, 12(sp) | 
 | ; RV32-NEXT:    lw a3, 8(sp) | 
 | ; RV32-NEXT:    or a4, a1, a0 | 
 | ; RV32-NEXT:    xori a0, a0, 1 | 
 | ; RV32-NEXT:    seqz a4, a4 | 
 | ; RV32-NEXT:    or a0, a0, a1 | 
 | ; RV32-NEXT:    seqz a0, a0 | 
 | ; RV32-NEXT:    addi a0, a0, -1 | 
 | ; RV32-NEXT:    and a0, a0, a4 | 
 | ; RV32-NEXT:    neg a1, a0 | 
 | ; RV32-NEXT:    and a0, a1, a3 | 
 | ; RV32-NEXT:    and a1, a1, a2 | 
 | ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: utest_f32i64_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __fixunssfti | 
 | ; RV64-NEXT:    snez a1, a1 | 
 | ; RV64-NEXT:    addi a1, a1, -1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui float %x to i128 | 
 |   %spec.store.select = call i128 @llvm.umin.i128(i128 %conv, i128 18446744073709551616) | 
 |   %conv6 = trunc i128 %spec.store.select to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 | define i64 @ustest_f32i64_mm(float %x) { | 
 | ; RV32-LABEL: ustest_f32i64_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    addi a0, sp, 8 | 
 | ; RV32-NEXT:    call __fixsfti | 
 | ; RV32-NEXT:    lw a0, 20(sp) | 
 | ; RV32-NEXT:    lw a1, 8(sp) | 
 | ; RV32-NEXT:    lw a2, 12(sp) | 
 | ; RV32-NEXT:    lw a3, 16(sp) | 
 | ; RV32-NEXT:    beqz a0, .LBB50_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    slti a4, a0, 0 | 
 | ; RV32-NEXT:    j .LBB50_3 | 
 | ; RV32-NEXT:  .LBB50_2: | 
 | ; RV32-NEXT:    seqz a4, a3 | 
 | ; RV32-NEXT:  .LBB50_3: # %entry | 
 | ; RV32-NEXT:    xori a3, a3, 1 | 
 | ; RV32-NEXT:    or a3, a3, a0 | 
 | ; RV32-NEXT:    seqz a3, a3 | 
 | ; RV32-NEXT:    addi a3, a3, -1 | 
 | ; RV32-NEXT:    and a3, a3, a4 | 
 | ; RV32-NEXT:    neg a3, a3 | 
 | ; RV32-NEXT:    and a2, a3, a2 | 
 | ; RV32-NEXT:    and a1, a3, a1 | 
 | ; RV32-NEXT:    and a0, a3, a0 | 
 | ; RV32-NEXT:    slti a0, a0, 0 | 
 | ; RV32-NEXT:    addi a3, a0, -1 | 
 | ; RV32-NEXT:    and a0, a3, a1 | 
 | ; RV32-NEXT:    and a1, a3, a2 | 
 | ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: ustest_f32i64_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __fixsfti | 
 | ; RV64-NEXT:    mv a2, a1 | 
 | ; RV64-NEXT:    blez a1, .LBB50_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    li a2, 1 | 
 | ; RV64-NEXT:  .LBB50_2: # %entry | 
 | ; RV64-NEXT:    slti a1, a1, 1 | 
 | ; RV64-NEXT:    slti a2, a2, 0 | 
 | ; RV64-NEXT:    neg a1, a1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    addi a2, a2, -1 | 
 | ; RV64-NEXT:    and a0, a2, a0 | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi float %x to i128 | 
 |   %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) | 
 |   %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 0) | 
 |   %conv6 = trunc i128 %spec.store.select7 to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 | define i64 @stest_f16i64_mm(half %x) { | 
 | ; RV32-LABEL: stest_f16i64_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    addi a0, sp, 8 | 
 | ; RV32-NEXT:    call __fixsfti | 
 | ; RV32-NEXT:    lw a3, 8(sp) | 
 | ; RV32-NEXT:    lw a1, 12(sp) | 
 | ; RV32-NEXT:    lw a2, 16(sp) | 
 | ; RV32-NEXT:    lw a4, 20(sp) | 
 | ; RV32-NEXT:    lui a0, 524288 | 
 | ; RV32-NEXT:    addi a5, a0, -1 | 
 | ; RV32-NEXT:    beq a1, a5, .LBB51_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    sltu a6, a1, a5 | 
 | ; RV32-NEXT:    or a7, a2, a4 | 
 | ; RV32-NEXT:    bnez a7, .LBB51_3 | 
 | ; RV32-NEXT:    j .LBB51_4 | 
 | ; RV32-NEXT:  .LBB51_2: | 
 | ; RV32-NEXT:    sltiu a6, a3, -1 | 
 | ; RV32-NEXT:    or a7, a2, a4 | 
 | ; RV32-NEXT:    beqz a7, .LBB51_4 | 
 | ; RV32-NEXT:  .LBB51_3: # %entry | 
 | ; RV32-NEXT:    slti a6, a4, 0 | 
 | ; RV32-NEXT:  .LBB51_4: # %entry | 
 | ; RV32-NEXT:    addi a7, a6, -1 | 
 | ; RV32-NEXT:    neg t0, a6 | 
 | ; RV32-NEXT:    bnez a6, .LBB51_6 | 
 | ; RV32-NEXT:  # %bb.5: # %entry | 
 | ; RV32-NEXT:    mv a1, a5 | 
 | ; RV32-NEXT:  .LBB51_6: # %entry | 
 | ; RV32-NEXT:    or a3, a7, a3 | 
 | ; RV32-NEXT:    and a4, t0, a4 | 
 | ; RV32-NEXT:    and a2, t0, a2 | 
 | ; RV32-NEXT:    beq a1, a0, .LBB51_8 | 
 | ; RV32-NEXT:  # %bb.7: # %entry | 
 | ; RV32-NEXT:    sltu a0, a0, a1 | 
 | ; RV32-NEXT:    j .LBB51_9 | 
 | ; RV32-NEXT:  .LBB51_8: | 
 | ; RV32-NEXT:    snez a0, a3 | 
 | ; RV32-NEXT:  .LBB51_9: # %entry | 
 | ; RV32-NEXT:    and a2, a2, a4 | 
 | ; RV32-NEXT:    li a5, -1 | 
 | ; RV32-NEXT:    beq a2, a5, .LBB51_11 | 
 | ; RV32-NEXT:  # %bb.10: # %entry | 
 | ; RV32-NEXT:    slti a0, a4, 0 | 
 | ; RV32-NEXT:    xori a0, a0, 1 | 
 | ; RV32-NEXT:  .LBB51_11: # %entry | 
 | ; RV32-NEXT:    bnez a0, .LBB51_13 | 
 | ; RV32-NEXT:  # %bb.12: # %entry | 
 | ; RV32-NEXT:    lui a1, 524288 | 
 | ; RV32-NEXT:  .LBB51_13: # %entry | 
 | ; RV32-NEXT:    neg a0, a0 | 
 | ; RV32-NEXT:    and a0, a0, a3 | 
 | ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: stest_f16i64_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    call __fixsfti | 
 | ; RV64-NEXT:    li a2, -1 | 
 | ; RV64-NEXT:    srli a3, a2, 1 | 
 | ; RV64-NEXT:    beqz a1, .LBB51_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    slti a4, a1, 0 | 
 | ; RV64-NEXT:    j .LBB51_3 | 
 | ; RV64-NEXT:  .LBB51_2: | 
 | ; RV64-NEXT:    sltu a4, a0, a3 | 
 | ; RV64-NEXT:  .LBB51_3: # %entry | 
 | ; RV64-NEXT:    neg a5, a4 | 
 | ; RV64-NEXT:    and a5, a5, a1 | 
 | ; RV64-NEXT:    bnez a4, .LBB51_5 | 
 | ; RV64-NEXT:  # %bb.4: # %entry | 
 | ; RV64-NEXT:    mv a0, a3 | 
 | ; RV64-NEXT:  .LBB51_5: # %entry | 
 | ; RV64-NEXT:    slli a1, a2, 63 | 
 | ; RV64-NEXT:    beq a5, a2, .LBB51_7 | 
 | ; RV64-NEXT:  # %bb.6: # %entry | 
 | ; RV64-NEXT:    slti a2, a5, 0 | 
 | ; RV64-NEXT:    xori a2, a2, 1 | 
 | ; RV64-NEXT:    beqz a2, .LBB51_8 | 
 | ; RV64-NEXT:    j .LBB51_9 | 
 | ; RV64-NEXT:  .LBB51_7: | 
 | ; RV64-NEXT:    sltu a2, a1, a0 | 
 | ; RV64-NEXT:    bnez a2, .LBB51_9 | 
 | ; RV64-NEXT:  .LBB51_8: # %entry | 
 | ; RV64-NEXT:    mv a0, a1 | 
 | ; RV64-NEXT:  .LBB51_9: # %entry | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi half %x to i128 | 
 |   %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 9223372036854775807) | 
 |   %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 -9223372036854775808) | 
 |   %conv6 = trunc i128 %spec.store.select7 to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 | define i64 @utesth_f16i64_mm(half %x) { | 
 | ; RV32-LABEL: utesth_f16i64_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    addi a0, sp, 8 | 
 | ; RV32-NEXT:    call __fixunssfti | 
 | ; RV32-NEXT:    lw a0, 16(sp) | 
 | ; RV32-NEXT:    lw a1, 20(sp) | 
 | ; RV32-NEXT:    lw a2, 12(sp) | 
 | ; RV32-NEXT:    lw a3, 8(sp) | 
 | ; RV32-NEXT:    or a4, a1, a0 | 
 | ; RV32-NEXT:    xori a0, a0, 1 | 
 | ; RV32-NEXT:    seqz a4, a4 | 
 | ; RV32-NEXT:    or a0, a0, a1 | 
 | ; RV32-NEXT:    seqz a0, a0 | 
 | ; RV32-NEXT:    addi a0, a0, -1 | 
 | ; RV32-NEXT:    and a0, a0, a4 | 
 | ; RV32-NEXT:    neg a1, a0 | 
 | ; RV32-NEXT:    and a0, a1, a3 | 
 | ; RV32-NEXT:    and a1, a1, a2 | 
 | ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: utesth_f16i64_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    call __fixunssfti | 
 | ; RV64-NEXT:    snez a1, a1 | 
 | ; RV64-NEXT:    addi a1, a1, -1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptoui half %x to i128 | 
 |   %spec.store.select = call i128 @llvm.umin.i128(i128 %conv, i128 18446744073709551616) | 
 |   %conv6 = trunc i128 %spec.store.select to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 | define i64 @ustest_f16i64_mm(half %x) { | 
 | ; RV32-LABEL: ustest_f16i64_mm: | 
 | ; RV32:       # %bb.0: # %entry | 
 | ; RV32-NEXT:    addi sp, sp, -32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 32 | 
 | ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill | 
 | ; RV32-NEXT:    .cfi_offset ra, -4 | 
 | ; RV32-NEXT:    call __extendhfsf2 | 
 | ; RV32-NEXT:    addi a0, sp, 8 | 
 | ; RV32-NEXT:    call __fixsfti | 
 | ; RV32-NEXT:    lw a0, 20(sp) | 
 | ; RV32-NEXT:    lw a1, 8(sp) | 
 | ; RV32-NEXT:    lw a2, 12(sp) | 
 | ; RV32-NEXT:    lw a3, 16(sp) | 
 | ; RV32-NEXT:    beqz a0, .LBB53_2 | 
 | ; RV32-NEXT:  # %bb.1: # %entry | 
 | ; RV32-NEXT:    slti a4, a0, 0 | 
 | ; RV32-NEXT:    j .LBB53_3 | 
 | ; RV32-NEXT:  .LBB53_2: | 
 | ; RV32-NEXT:    seqz a4, a3 | 
 | ; RV32-NEXT:  .LBB53_3: # %entry | 
 | ; RV32-NEXT:    xori a3, a3, 1 | 
 | ; RV32-NEXT:    or a3, a3, a0 | 
 | ; RV32-NEXT:    seqz a3, a3 | 
 | ; RV32-NEXT:    addi a3, a3, -1 | 
 | ; RV32-NEXT:    and a3, a3, a4 | 
 | ; RV32-NEXT:    neg a3, a3 | 
 | ; RV32-NEXT:    and a2, a3, a2 | 
 | ; RV32-NEXT:    and a1, a3, a1 | 
 | ; RV32-NEXT:    and a0, a3, a0 | 
 | ; RV32-NEXT:    slti a0, a0, 0 | 
 | ; RV32-NEXT:    addi a3, a0, -1 | 
 | ; RV32-NEXT:    and a0, a3, a1 | 
 | ; RV32-NEXT:    and a1, a3, a2 | 
 | ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload | 
 | ; RV32-NEXT:    .cfi_restore ra | 
 | ; RV32-NEXT:    addi sp, sp, 32 | 
 | ; RV32-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV32-NEXT:    ret | 
 | ; | 
 | ; RV64-LABEL: ustest_f16i64_mm: | 
 | ; RV64:       # %bb.0: # %entry | 
 | ; RV64-NEXT:    addi sp, sp, -16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 16 | 
 | ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill | 
 | ; RV64-NEXT:    .cfi_offset ra, -8 | 
 | ; RV64-NEXT:    call __extendhfsf2 | 
 | ; RV64-NEXT:    call __fixsfti | 
 | ; RV64-NEXT:    mv a2, a1 | 
 | ; RV64-NEXT:    blez a1, .LBB53_2 | 
 | ; RV64-NEXT:  # %bb.1: # %entry | 
 | ; RV64-NEXT:    li a2, 1 | 
 | ; RV64-NEXT:  .LBB53_2: # %entry | 
 | ; RV64-NEXT:    slti a1, a1, 1 | 
 | ; RV64-NEXT:    slti a2, a2, 0 | 
 | ; RV64-NEXT:    neg a1, a1 | 
 | ; RV64-NEXT:    and a0, a1, a0 | 
 | ; RV64-NEXT:    addi a2, a2, -1 | 
 | ; RV64-NEXT:    and a0, a2, a0 | 
 | ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload | 
 | ; RV64-NEXT:    .cfi_restore ra | 
 | ; RV64-NEXT:    addi sp, sp, 16 | 
 | ; RV64-NEXT:    .cfi_def_cfa_offset 0 | 
 | ; RV64-NEXT:    ret | 
 | entry: | 
 |   %conv = fptosi half %x to i128 | 
 |   %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) | 
 |   %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 0) | 
 |   %conv6 = trunc i128 %spec.store.select7 to i64 | 
 |   ret i64 %conv6 | 
 | } | 
 |  | 
 | declare i32 @llvm.smin.i32(i32, i32) | 
 | declare i32 @llvm.smax.i32(i32, i32) | 
 | declare i32 @llvm.umin.i32(i32, i32) | 
 | declare i64 @llvm.smin.i64(i64, i64) | 
 | declare i64 @llvm.smax.i64(i64, i64) | 
 | declare i64 @llvm.umin.i64(i64, i64) | 
 | declare i128 @llvm.smin.i128(i128, i128) | 
 | declare i128 @llvm.smax.i128(i128, i128) | 
 | declare i128 @llvm.umin.i128(i128, i128) |