blob: 52f6cde83d656835fb94e9c46434547f8e318d6b [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
; RUN: -target-abi=ilp32f | FileCheck -check-prefix=RV32IF %s
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
; RUN: -target-abi=lp64f | FileCheck -check-prefix=RV64IF %s
define signext i8 @test_floor_si8(float %x) {
; RV32IF-LABEL: test_floor_si8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_si8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
}
define signext i16 @test_floor_si16(float %x) {
; RV32IF-LABEL: test_floor_si16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_si16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
}
define signext i32 @test_floor_si32(float %x) {
; RV32IF-LABEL: test_floor_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
}
define i64 @test_floor_si64(float %x) {
; RV32IF-LABEL: test_floor_si64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call floorf@plt
; RV32IF-NEXT: call __fixsfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
}
define zeroext i8 @test_floor_ui8(float %x) {
; RV32IF-LABEL: test_floor_ui8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_ui8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
}
define zeroext i16 @test_floor_ui16(float %x) {
; RV32IF-LABEL: test_floor_ui16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_ui16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
}
define signext i32 @test_floor_ui32(float %x) {
; RV32IF-LABEL: test_floor_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
}
define i64 @test_floor_ui64(float %x) {
; RV32IF-LABEL: test_floor_ui64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call floorf@plt
; RV32IF-NEXT: call __fixunssfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
}
define signext i8 @test_ceil_si8(float %x) {
; RV32IF-LABEL: test_ceil_si8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_si8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
}
define signext i16 @test_ceil_si16(float %x) {
; RV32IF-LABEL: test_ceil_si16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_si16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
}
define signext i32 @test_ceil_si32(float %x) {
; RV32IF-LABEL: test_ceil_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
}
define i64 @test_ceil_si64(float %x) {
; RV32IF-LABEL: test_ceil_si64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call ceilf@plt
; RV32IF-NEXT: call __fixsfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
}
define zeroext i8 @test_ceil_ui8(float %x) {
; RV32IF-LABEL: test_ceil_ui8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_ui8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
}
define zeroext i16 @test_ceil_ui16(float %x) {
; RV32IF-LABEL: test_ceil_ui16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_ui16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
}
define signext i32 @test_ceil_ui32(float %x) {
; RV32IF-LABEL: test_ceil_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
}
define i64 @test_ceil_ui64(float %x) {
; RV32IF-LABEL: test_ceil_ui64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call ceilf@plt
; RV32IF-NEXT: call __fixunssfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
}
define signext i8 @test_trunc_si8(float %x) {
; RV32IF-LABEL: test_trunc_si8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_si8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
}
define signext i16 @test_trunc_si16(float %x) {
; RV32IF-LABEL: test_trunc_si16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_si16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
}
define signext i32 @test_trunc_si32(float %x) {
; RV32IF-LABEL: test_trunc_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
}
define i64 @test_trunc_si64(float %x) {
; RV32IF-LABEL: test_trunc_si64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call truncf@plt
; RV32IF-NEXT: call __fixsfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
}
define zeroext i8 @test_trunc_ui8(float %x) {
; RV32IF-LABEL: test_trunc_ui8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_ui8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
}
define zeroext i16 @test_trunc_ui16(float %x) {
; RV32IF-LABEL: test_trunc_ui16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_ui16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
}
define signext i32 @test_trunc_ui32(float %x) {
; RV32IF-LABEL: test_trunc_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
}
define i64 @test_trunc_ui64(float %x) {
; RV32IF-LABEL: test_trunc_ui64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call truncf@plt
; RV32IF-NEXT: call __fixunssfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
}
define signext i8 @test_round_si8(float %x) {
; RV32IF-LABEL: test_round_si8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_si8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
}
define signext i16 @test_round_si16(float %x) {
; RV32IF-LABEL: test_round_si16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_si16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
}
define signext i32 @test_round_si32(float %x) {
; RV32IF-LABEL: test_round_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
}
define i64 @test_round_si64(float %x) {
; RV32IF-LABEL: test_round_si64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call roundf@plt
; RV32IF-NEXT: call __fixsfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
}
define zeroext i8 @test_round_ui8(float %x) {
; RV32IF-LABEL: test_round_ui8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_ui8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
}
define zeroext i16 @test_round_ui16(float %x) {
; RV32IF-LABEL: test_round_ui16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_ui16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
}
define signext i32 @test_round_ui32(float %x) {
; RV32IF-LABEL: test_round_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
}
define i64 @test_round_ui64(float %x) {
; RV32IF-LABEL: test_round_ui64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call roundf@plt
; RV32IF-NEXT: call __fixunssfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
}
define signext i8 @test_roundeven_si8(float %x) {
; RV32IF-LABEL: test_roundeven_si8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_si8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
}
define signext i16 @test_roundeven_si16(float %x) {
; RV32IF-LABEL: test_roundeven_si16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_si16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
}
define signext i32 @test_roundeven_si32(float %x) {
; RV32IF-LABEL: test_roundeven_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
}
define i64 @test_roundeven_si64(float %x) {
; RV32IF-LABEL: test_roundeven_si64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call roundevenf@plt
; RV32IF-NEXT: call __fixsfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
}
define zeroext i8 @test_roundeven_ui8(float %x) {
; RV32IF-LABEL: test_roundeven_ui8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_ui8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
}
define zeroext i16 @test_roundeven_ui16(float %x) {
; RV32IF-LABEL: test_roundeven_ui16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_ui16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
}
define signext i32 @test_roundeven_ui32(float %x) {
; RV32IF-LABEL: test_roundeven_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
}
define i64 @test_roundeven_ui64(float %x) {
; RV32IF-LABEL: test_roundeven_ui64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call roundevenf@plt
; RV32IF-NEXT: call __fixunssfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
}
define float @test_floor_float(float %x) {
; RV32IFD-LABEL: test_floor_float:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: call floor@plt
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_floor_float:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: .cfi_def_cfa_offset 16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
; RV64IFD-NEXT: call floor@plt
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV32IF-LABEL: test_floor_float:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call floorf@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_float:
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
; RV64IF-NEXT: call floorf@plt
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
ret float %a
}
define float @test_ceil_float(float %x) {
; RV32IFD-LABEL: test_ceil_float:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: call ceil@plt
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_ceil_float:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: .cfi_def_cfa_offset 16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
; RV64IFD-NEXT: call ceil@plt
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV32IF-LABEL: test_ceil_float:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call ceilf@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_float:
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
; RV64IF-NEXT: call ceilf@plt
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
ret float %a
}
define float @test_trunc_float(float %x) {
; RV32IFD-LABEL: test_trunc_float:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: call trunc@plt
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_trunc_float:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: .cfi_def_cfa_offset 16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
; RV64IFD-NEXT: call trunc@plt
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV32IF-LABEL: test_trunc_float:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call truncf@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_float:
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
; RV64IF-NEXT: call truncf@plt
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
ret float %a
}
define float @test_round_float(float %x) {
; RV32IFD-LABEL: test_round_float:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: call round@plt
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_round_float:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: .cfi_def_cfa_offset 16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
; RV64IFD-NEXT: call round@plt
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV32IF-LABEL: test_round_float:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call roundf@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_float:
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
; RV64IF-NEXT: call roundf@plt
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
ret float %a
}
define float @test_roundeven_float(float %x) {
; RV32IFD-LABEL: test_roundeven_float:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: call roundeven@plt
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_roundeven_float:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: .cfi_def_cfa_offset 16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
; RV64IFD-NEXT: call roundeven@plt
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV32IF-LABEL: test_roundeven_float:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call roundevenf@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_float:
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
; RV64IF-NEXT: call roundevenf@plt
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
ret float %a
}
declare float @llvm.floor.f32(float)
declare float @llvm.ceil.f32(float)
declare float @llvm.trunc.f32(float)
declare float @llvm.round.f32(float)
declare float @llvm.roundeven.f32(float)