blob: 8a1ab85a1b3366a172b392f0023d3bbdccac8c5b [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=R32
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=R64
define float @maxnum_f32(float %x, float %y) nounwind {
; R32-LABEL: maxnum_f32:
; R32: # %bb.0:
; R32-NEXT: addi sp, sp, -16
; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; R32-NEXT: call fmaxf@plt
; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; R32-NEXT: addi sp, sp, 16
; R32-NEXT: ret
;
; R64-LABEL: maxnum_f32:
; R64: # %bb.0:
; R64-NEXT: addi sp, sp, -16
; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; R64-NEXT: call fmaxf@plt
; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; R64-NEXT: addi sp, sp, 16
; R64-NEXT: ret
%r = call float @llvm.maxnum.f32(float %x, float %y)
ret float %r
}
define float @maxnum_f32_fast(float %x, float %y) nounwind {
; R32-LABEL: maxnum_f32_fast:
; R32: # %bb.0:
; R32-NEXT: addi sp, sp, -16
; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; R32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; R32-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; R32-NEXT: mv s1, a1
; R32-NEXT: mv s0, a0
; R32-NEXT: call __gtsf2@plt
; R32-NEXT: bgtz a0, .LBB1_2
; R32-NEXT: # %bb.1:
; R32-NEXT: mv s0, s1
; R32-NEXT: .LBB1_2:
; R32-NEXT: mv a0, s0
; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; R32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; R32-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; R32-NEXT: addi sp, sp, 16
; R32-NEXT: ret
;
; R64-LABEL: maxnum_f32_fast:
; R64: # %bb.0:
; R64-NEXT: addi sp, sp, -32
; R64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; R64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; R64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; R64-NEXT: mv s1, a1
; R64-NEXT: mv s0, a0
; R64-NEXT: call __gtsf2@plt
; R64-NEXT: bgtz a0, .LBB1_2
; R64-NEXT: # %bb.1:
; R64-NEXT: mv s0, s1
; R64-NEXT: .LBB1_2:
; R64-NEXT: mv a0, s0
; R64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; R64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; R64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; R64-NEXT: addi sp, sp, 32
; R64-NEXT: ret
%r = call fast float @llvm.maxnum.f32(float %x, float %y)
ret float %r
}
define double @maxnum_f64(double %x, double %y) nounwind {
; R32-LABEL: maxnum_f64:
; R32: # %bb.0:
; R32-NEXT: addi sp, sp, -16
; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; R32-NEXT: call fmax@plt
; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; R32-NEXT: addi sp, sp, 16
; R32-NEXT: ret
;
; R64-LABEL: maxnum_f64:
; R64: # %bb.0:
; R64-NEXT: addi sp, sp, -16
; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; R64-NEXT: call fmax@plt
; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; R64-NEXT: addi sp, sp, 16
; R64-NEXT: ret
%r = call double @llvm.maxnum.f64(double %x, double %y)
ret double %r
}
define double @maxnum_f64_nnan(double %x, double %y) nounwind {
; R32-LABEL: maxnum_f64_nnan:
; R32: # %bb.0:
; R32-NEXT: addi sp, sp, -32
; R32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; R32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; R32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; R32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; R32-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; R32-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
; R32-NEXT: mv s1, a3
; R32-NEXT: mv s2, a2
; R32-NEXT: mv s0, a1
; R32-NEXT: mv s4, a0
; R32-NEXT: call __gtdf2@plt
; R32-NEXT: mv s3, s4
; R32-NEXT: bgtz a0, .LBB3_2
; R32-NEXT: # %bb.1:
; R32-NEXT: mv s3, s2
; R32-NEXT: .LBB3_2:
; R32-NEXT: mv a0, s4
; R32-NEXT: mv a1, s0
; R32-NEXT: mv a2, s2
; R32-NEXT: mv a3, s1
; R32-NEXT: call __gtdf2@plt
; R32-NEXT: bgtz a0, .LBB3_4
; R32-NEXT: # %bb.3:
; R32-NEXT: mv s0, s1
; R32-NEXT: .LBB3_4:
; R32-NEXT: mv a0, s3
; R32-NEXT: mv a1, s0
; R32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; R32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; R32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; R32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; R32-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
; R32-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
; R32-NEXT: addi sp, sp, 32
; R32-NEXT: ret
;
; R64-LABEL: maxnum_f64_nnan:
; R64: # %bb.0:
; R64-NEXT: addi sp, sp, -32
; R64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; R64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; R64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; R64-NEXT: mv s1, a1
; R64-NEXT: mv s0, a0
; R64-NEXT: call __gtdf2@plt
; R64-NEXT: bgtz a0, .LBB3_2
; R64-NEXT: # %bb.1:
; R64-NEXT: mv s0, s1
; R64-NEXT: .LBB3_2:
; R64-NEXT: mv a0, s0
; R64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; R64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; R64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; R64-NEXT: addi sp, sp, 32
; R64-NEXT: ret
%r = call nnan double @llvm.maxnum.f64(double %x, double %y)
ret double %r
}
define float @minnum_f32(float %x, float %y) nounwind {
; R32-LABEL: minnum_f32:
; R32: # %bb.0:
; R32-NEXT: addi sp, sp, -16
; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; R32-NEXT: call fminf@plt
; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; R32-NEXT: addi sp, sp, 16
; R32-NEXT: ret
;
; R64-LABEL: minnum_f32:
; R64: # %bb.0:
; R64-NEXT: addi sp, sp, -16
; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; R64-NEXT: call fminf@plt
; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; R64-NEXT: addi sp, sp, 16
; R64-NEXT: ret
%r = call float @llvm.minnum.f32(float %x, float %y)
ret float %r
}
define float @minnum_f32_nnan(float %x, float %y) nounwind {
; R32-LABEL: minnum_f32_nnan:
; R32: # %bb.0:
; R32-NEXT: addi sp, sp, -16
; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; R32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; R32-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; R32-NEXT: mv s1, a1
; R32-NEXT: mv s0, a0
; R32-NEXT: call __ltsf2@plt
; R32-NEXT: bltz a0, .LBB5_2
; R32-NEXT: # %bb.1:
; R32-NEXT: mv s0, s1
; R32-NEXT: .LBB5_2:
; R32-NEXT: mv a0, s0
; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; R32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; R32-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; R32-NEXT: addi sp, sp, 16
; R32-NEXT: ret
;
; R64-LABEL: minnum_f32_nnan:
; R64: # %bb.0:
; R64-NEXT: addi sp, sp, -32
; R64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; R64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; R64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; R64-NEXT: mv s1, a1
; R64-NEXT: mv s0, a0
; R64-NEXT: call __ltsf2@plt
; R64-NEXT: bltz a0, .LBB5_2
; R64-NEXT: # %bb.1:
; R64-NEXT: mv s0, s1
; R64-NEXT: .LBB5_2:
; R64-NEXT: mv a0, s0
; R64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; R64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; R64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; R64-NEXT: addi sp, sp, 32
; R64-NEXT: ret
%r = call nnan float @llvm.minnum.f32(float %x, float %y)
ret float %r
}
define double @minnum_f64(double %x, double %y) nounwind {
; R32-LABEL: minnum_f64:
; R32: # %bb.0:
; R32-NEXT: addi sp, sp, -16
; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; R32-NEXT: call fmin@plt
; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; R32-NEXT: addi sp, sp, 16
; R32-NEXT: ret
;
; R64-LABEL: minnum_f64:
; R64: # %bb.0:
; R64-NEXT: addi sp, sp, -16
; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; R64-NEXT: call fmin@plt
; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; R64-NEXT: addi sp, sp, 16
; R64-NEXT: ret
%r = call double @llvm.minnum.f64(double %x, double %y)
ret double %r
}
define double @minnum_f64_fast(double %x, double %y) nounwind {
; R32-LABEL: minnum_f64_fast:
; R32: # %bb.0:
; R32-NEXT: addi sp, sp, -32
; R32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; R32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; R32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; R32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; R32-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; R32-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
; R32-NEXT: mv s1, a3
; R32-NEXT: mv s2, a2
; R32-NEXT: mv s0, a1
; R32-NEXT: mv s4, a0
; R32-NEXT: call __ltdf2@plt
; R32-NEXT: mv s3, s4
; R32-NEXT: bltz a0, .LBB7_2
; R32-NEXT: # %bb.1:
; R32-NEXT: mv s3, s2
; R32-NEXT: .LBB7_2:
; R32-NEXT: mv a0, s4
; R32-NEXT: mv a1, s0
; R32-NEXT: mv a2, s2
; R32-NEXT: mv a3, s1
; R32-NEXT: call __ltdf2@plt
; R32-NEXT: bltz a0, .LBB7_4
; R32-NEXT: # %bb.3:
; R32-NEXT: mv s0, s1
; R32-NEXT: .LBB7_4:
; R32-NEXT: mv a0, s3
; R32-NEXT: mv a1, s0
; R32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; R32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; R32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; R32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; R32-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
; R32-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
; R32-NEXT: addi sp, sp, 32
; R32-NEXT: ret
;
; R64-LABEL: minnum_f64_fast:
; R64: # %bb.0:
; R64-NEXT: addi sp, sp, -32
; R64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; R64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; R64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; R64-NEXT: mv s1, a1
; R64-NEXT: mv s0, a0
; R64-NEXT: call __ltdf2@plt
; R64-NEXT: bltz a0, .LBB7_2
; R64-NEXT: # %bb.1:
; R64-NEXT: mv s0, s1
; R64-NEXT: .LBB7_2:
; R64-NEXT: mv a0, s0
; R64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; R64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; R64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; R64-NEXT: addi sp, sp, 32
; R64-NEXT: ret
%r = call fast double @llvm.minnum.f64(double %x, double %y)
ret double %r
}
declare float @llvm.maxnum.f32(float, float)
declare double @llvm.maxnum.f64(double, double)
declare float @llvm.minnum.f32(float, float)
declare double @llvm.minnum.f64(double, double)