blob: faeeb396a698bab29cd770890ba58eed3addbc8c [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=CHECK,RV32I %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=CHECK,RV64I %s
define i32 @fold_urem_constants(i32 %v0) nounwind {
; RV32I-LABEL: fold_urem_constants:
; RV32I: # %bb.0:
; RV32I-NEXT: li a1, 5
; RV32I-NEXT: tail __umodsi3
;
; RV64I-LABEL: fold_urem_constants:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: li a1, 5
; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%v1 = urem i32 %v0, 25
%v2 = urem i32 %v1, 5
ret i32 %v2
}
define i32 @dont_fold_urem_constants(i32 %v0) nounwind {
; RV32I-LABEL: dont_fold_urem_constants:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 25
; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: li a1, 3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: tail __umodsi3
;
; RV64I-LABEL: dont_fold_urem_constants:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: li a1, 25
; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: li a1, 3
; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%v1 = urem i32 %v0, 25
%v2 = urem i32 %v1, 3
ret i32 %v2
}
define i32 @dont_fold_urem_srem_mixed_constants(i32 %v0) nounwind {
; RV32I-LABEL: dont_fold_urem_srem_mixed_constants:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 25
; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: li a1, 3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: tail __umodsi3
;
; RV64I-LABEL: dont_fold_urem_srem_mixed_constants:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: li a1, 25
; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: li a1, 3
; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%v1 = urem i32 %v0, 25
%v2 = srem i32 %v1, 3
ret i32 %v2
}
define i32 @dont_fold_srem_urem_mixed_constants(i32 %v0) nounwind {
; RV32I-LABEL: dont_fold_srem_urem_mixed_constants:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 25
; RV32I-NEXT: call __modsi3
; RV32I-NEXT: li a1, 3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: tail __umodsi3
;
; RV64I-LABEL: dont_fold_srem_urem_mixed_constants:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: li a1, 25
; RV64I-NEXT: call __moddi3
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: li a1, 3
; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%v1 = srem i32 %v0, 25
%v2 = urem i32 %v1, 3
ret i32 %v2
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; CHECK: {{.*}}