blob: a75a7355fa407aa89d4d0e29141741c7ef65fa99 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
define i32 @muli32_0x555(i32 %a) nounwind {
; RV32I-LABEL: muli32_0x555:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 2
; RV32I-NEXT: slli a2, a0, 4
; RV32I-NEXT: slli a3, a0, 6
; RV32I-NEXT: add a2, a2, a3
; RV32I-NEXT: slli a3, a0, 8
; RV32I-NEXT: add a1, a0, a1
; RV32I-NEXT: slli a0, a0, 10
; RV32I-NEXT: add a1, a1, a2
; RV32I-NEXT: add a0, a3, a0
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli32_0x555:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 2
; RV64I-NEXT: slli a2, a0, 4
; RV64I-NEXT: slli a3, a0, 6
; RV64I-NEXT: add a2, a2, a3
; RV64I-NEXT: slli a3, a0, 8
; RV64I-NEXT: add a1, a0, a1
; RV64I-NEXT: slli a0, a0, 10
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: add a0, a3, a0
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: ret
%a1 = mul i32 %a, 1365
ret i32 %a1
}
define i64 @muli64_0x555(i64 %a) nounwind {
; RV32I-LABEL: muli64_0x555:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 1365
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: call __muldi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli64_0x555:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 2
; RV64I-NEXT: slli a2, a0, 4
; RV64I-NEXT: slli a3, a0, 6
; RV64I-NEXT: add a2, a2, a3
; RV64I-NEXT: slli a3, a0, 8
; RV64I-NEXT: add a1, a0, a1
; RV64I-NEXT: slli a0, a0, 10
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: add a0, a3, a0
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: ret
%a1 = mul i64 %a, 1365
ret i64 %a1
}
define i32 @muli32_0x33333333(i32 %a) nounwind {
; RV32I-LABEL: muli32_0x33333333:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 4
; RV32I-NEXT: slli a2, a0, 6
; RV32I-NEXT: slli a3, a0, 8
; RV32I-NEXT: slli a4, a0, 10
; RV32I-NEXT: slli a5, a0, 14
; RV32I-NEXT: sub a1, a1, a2
; RV32I-NEXT: slli a2, a0, 16
; RV32I-NEXT: sub a3, a3, a4
; RV32I-NEXT: slli a4, a0, 22
; RV32I-NEXT: sub a5, a5, a2
; RV32I-NEXT: slli a2, a0, 24
; RV32I-NEXT: sub a4, a4, a2
; RV32I-NEXT: slli a2, a0, 2
; RV32I-NEXT: sub a2, a2, a0
; RV32I-NEXT: sub a2, a2, a1
; RV32I-NEXT: slli a1, a0, 12
; RV32I-NEXT: add a1, a3, a1
; RV32I-NEXT: slli a3, a0, 18
; RV32I-NEXT: add a3, a5, a3
; RV32I-NEXT: slli a5, a0, 26
; RV32I-NEXT: add a4, a4, a5
; RV32I-NEXT: sub a2, a2, a1
; RV32I-NEXT: slli a1, a0, 20
; RV32I-NEXT: sub a3, a3, a1
; RV32I-NEXT: slli a1, a0, 28
; RV32I-NEXT: sub a4, a4, a1
; RV32I-NEXT: slli a0, a0, 30
; RV32I-NEXT: add a2, a2, a3
; RV32I-NEXT: add a0, a4, a0
; RV32I-NEXT: add a0, a2, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli32_0x33333333:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 4
; RV64I-NEXT: slli a2, a0, 6
; RV64I-NEXT: slli a3, a0, 8
; RV64I-NEXT: slli a4, a0, 10
; RV64I-NEXT: slli a5, a0, 14
; RV64I-NEXT: sub a1, a1, a2
; RV64I-NEXT: slli a2, a0, 16
; RV64I-NEXT: sub a3, a3, a4
; RV64I-NEXT: slli a4, a0, 22
; RV64I-NEXT: sub a5, a5, a2
; RV64I-NEXT: slli a2, a0, 24
; RV64I-NEXT: sub a4, a4, a2
; RV64I-NEXT: slli a2, a0, 2
; RV64I-NEXT: sub a2, a2, a0
; RV64I-NEXT: sub a2, a2, a1
; RV64I-NEXT: slli a1, a0, 12
; RV64I-NEXT: add a1, a3, a1
; RV64I-NEXT: slli a3, a0, 18
; RV64I-NEXT: add a3, a5, a3
; RV64I-NEXT: slli a5, a0, 26
; RV64I-NEXT: add a4, a4, a5
; RV64I-NEXT: sub a2, a2, a1
; RV64I-NEXT: slli a1, a0, 20
; RV64I-NEXT: sub a3, a3, a1
; RV64I-NEXT: slli a1, a0, 28
; RV64I-NEXT: sub a4, a4, a1
; RV64I-NEXT: slli a0, a0, 30
; RV64I-NEXT: add a2, a2, a3
; RV64I-NEXT: add a0, a4, a0
; RV64I-NEXT: add a0, a2, a0
; RV64I-NEXT: ret
%a1 = mul i32 %a, 858993459
ret i32 %a1
}
define i64 @muli64_0x33333333(i64 %a) nounwind {
; RV32I-LABEL: muli64_0x33333333:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lui a2, 209715
; RV32I-NEXT: addi a2, a2, 819
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: call __muldi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli64_0x33333333:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 4
; RV64I-NEXT: slli a2, a0, 6
; RV64I-NEXT: slli a3, a0, 8
; RV64I-NEXT: slli a4, a0, 10
; RV64I-NEXT: slli a5, a0, 14
; RV64I-NEXT: sub a1, a1, a2
; RV64I-NEXT: slli a2, a0, 16
; RV64I-NEXT: sub a3, a3, a4
; RV64I-NEXT: slli a4, a0, 22
; RV64I-NEXT: sub a5, a5, a2
; RV64I-NEXT: slli a2, a0, 24
; RV64I-NEXT: sub a4, a4, a2
; RV64I-NEXT: slli a2, a0, 2
; RV64I-NEXT: sub a2, a2, a0
; RV64I-NEXT: sub a2, a2, a1
; RV64I-NEXT: slli a1, a0, 12
; RV64I-NEXT: add a1, a3, a1
; RV64I-NEXT: slli a3, a0, 18
; RV64I-NEXT: add a3, a5, a3
; RV64I-NEXT: slli a5, a0, 26
; RV64I-NEXT: add a4, a4, a5
; RV64I-NEXT: sub a2, a2, a1
; RV64I-NEXT: slli a1, a0, 20
; RV64I-NEXT: sub a3, a3, a1
; RV64I-NEXT: slli a1, a0, 28
; RV64I-NEXT: sub a4, a4, a1
; RV64I-NEXT: slli a0, a0, 30
; RV64I-NEXT: add a2, a2, a3
; RV64I-NEXT: add a0, a4, a0
; RV64I-NEXT: add a0, a2, a0
; RV64I-NEXT: ret
%a1 = mul i64 %a, 858993459
ret i64 %a1
}
define i32 @muli32_0xaaaaaaaa(i32 %a) nounwind {
; RV32I-LABEL: muli32_0xaaaaaaaa:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 3
; RV32I-NEXT: slli a2, a0, 1
; RV32I-NEXT: slli a3, a0, 5
; RV32I-NEXT: slli a4, a0, 7
; RV32I-NEXT: slli a5, a0, 9
; RV32I-NEXT: slli a6, a0, 11
; RV32I-NEXT: add a1, a2, a1
; RV32I-NEXT: slli a2, a0, 15
; RV32I-NEXT: add a3, a3, a4
; RV32I-NEXT: slli a4, a0, 17
; RV32I-NEXT: add a5, a5, a6
; RV32I-NEXT: slli a6, a0, 23
; RV32I-NEXT: add a2, a2, a4
; RV32I-NEXT: slli a4, a0, 25
; RV32I-NEXT: add a4, a6, a4
; RV32I-NEXT: add a1, a1, a3
; RV32I-NEXT: slli a3, a0, 13
; RV32I-NEXT: add a3, a5, a3
; RV32I-NEXT: slli a5, a0, 19
; RV32I-NEXT: add a2, a2, a5
; RV32I-NEXT: slli a5, a0, 27
; RV32I-NEXT: add a4, a4, a5
; RV32I-NEXT: add a1, a1, a3
; RV32I-NEXT: slli a3, a0, 21
; RV32I-NEXT: add a2, a2, a3
; RV32I-NEXT: slli a3, a0, 29
; RV32I-NEXT: add a3, a4, a3
; RV32I-NEXT: slli a0, a0, 31
; RV32I-NEXT: add a1, a1, a2
; RV32I-NEXT: add a0, a3, a0
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli32_0xaaaaaaaa:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 3
; RV64I-NEXT: slli a2, a0, 1
; RV64I-NEXT: slli a3, a0, 5
; RV64I-NEXT: slli a4, a0, 7
; RV64I-NEXT: slli a5, a0, 9
; RV64I-NEXT: slli a6, a0, 11
; RV64I-NEXT: add a1, a2, a1
; RV64I-NEXT: slli a2, a0, 15
; RV64I-NEXT: add a3, a3, a4
; RV64I-NEXT: slli a4, a0, 17
; RV64I-NEXT: add a5, a5, a6
; RV64I-NEXT: slli a6, a0, 23
; RV64I-NEXT: add a2, a2, a4
; RV64I-NEXT: slli a4, a0, 25
; RV64I-NEXT: add a4, a6, a4
; RV64I-NEXT: add a1, a1, a3
; RV64I-NEXT: slli a3, a0, 13
; RV64I-NEXT: add a3, a5, a3
; RV64I-NEXT: slli a5, a0, 19
; RV64I-NEXT: add a2, a2, a5
; RV64I-NEXT: slli a5, a0, 27
; RV64I-NEXT: add a4, a4, a5
; RV64I-NEXT: add a1, a1, a3
; RV64I-NEXT: slli a3, a0, 21
; RV64I-NEXT: add a2, a2, a3
; RV64I-NEXT: slli a3, a0, 29
; RV64I-NEXT: add a3, a4, a3
; RV64I-NEXT: slli a0, a0, 31
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: sub a0, a3, a0
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: ret
%a1 = mul i32 %a, -1431655766
ret i32 %a1
}
define i64 @muli64_0xaaaaaaaa(i64 %a) nounwind {
; RV32I-LABEL: muli64_0xaaaaaaaa:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lui a2, 699051
; RV32I-NEXT: addi a2, a2, -1366
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: call __muldi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli64_0xaaaaaaaa:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 3
; RV64I-NEXT: slli a2, a0, 1
; RV64I-NEXT: slli a3, a0, 5
; RV64I-NEXT: slli a4, a0, 7
; RV64I-NEXT: slli a5, a0, 9
; RV64I-NEXT: slli a6, a0, 11
; RV64I-NEXT: add a1, a2, a1
; RV64I-NEXT: slli a2, a0, 15
; RV64I-NEXT: add a3, a3, a4
; RV64I-NEXT: slli a4, a0, 17
; RV64I-NEXT: add a5, a5, a6
; RV64I-NEXT: slli a6, a0, 23
; RV64I-NEXT: add a2, a2, a4
; RV64I-NEXT: slli a4, a0, 25
; RV64I-NEXT: add a4, a6, a4
; RV64I-NEXT: add a1, a1, a3
; RV64I-NEXT: slli a3, a0, 13
; RV64I-NEXT: add a3, a5, a3
; RV64I-NEXT: slli a5, a0, 19
; RV64I-NEXT: add a2, a2, a5
; RV64I-NEXT: slli a5, a0, 27
; RV64I-NEXT: add a4, a4, a5
; RV64I-NEXT: add a1, a1, a3
; RV64I-NEXT: slli a3, a0, 21
; RV64I-NEXT: add a2, a2, a3
; RV64I-NEXT: slli a3, a0, 29
; RV64I-NEXT: add a3, a4, a3
; RV64I-NEXT: slli a0, a0, 31
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: add a0, a3, a0
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: ret
%a1 = mul i64 %a, 2863311530
ret i64 %a1
}
define i32 @muli32_0x0fffffff(i32 %a) nounwind {
; RV32I-LABEL: muli32_0x0fffffff:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 28
; RV32I-NEXT: sub a0, a1, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli32_0x0fffffff:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 28
; RV64I-NEXT: subw a0, a1, a0
; RV64I-NEXT: ret
%a1 = mul i32 %a, 268435455
ret i32 %a1
}
define i64 @muli64_0x0fffffff(i64 %a) nounwind {
; RV32I-LABEL: muli64_0x0fffffff:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a2, a0, 28
; RV32I-NEXT: srli a3, a0, 4
; RV32I-NEXT: slli a4, a1, 28
; RV32I-NEXT: sltu a5, a2, a0
; RV32I-NEXT: or a3, a4, a3
; RV32I-NEXT: sub a1, a3, a1
; RV32I-NEXT: sub a1, a1, a5
; RV32I-NEXT: sub a0, a2, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli64_0x0fffffff:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 28
; RV64I-NEXT: sub a0, a1, a0
; RV64I-NEXT: ret
%a1 = mul i64 %a, 268435455
ret i64 %a1
}
define i32 @muli32_0xf0f0f0f0(i32 %a) nounwind {
; RV32I-LABEL: muli32_0xf0f0f0f0:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 4
; RV32I-NEXT: slli a2, a0, 8
; RV32I-NEXT: slli a3, a0, 12
; RV32I-NEXT: slli a4, a0, 16
; RV32I-NEXT: sub a2, a2, a1
; RV32I-NEXT: slli a1, a0, 20
; RV32I-NEXT: sub a3, a3, a4
; RV32I-NEXT: slli a4, a0, 24
; RV32I-NEXT: sub a1, a1, a4
; RV32I-NEXT: slli a0, a0, 28
; RV32I-NEXT: sub a2, a2, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: sub a0, a2, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli32_0xf0f0f0f0:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 4
; RV64I-NEXT: slli a2, a0, 8
; RV64I-NEXT: slli a3, a0, 12
; RV64I-NEXT: slli a4, a0, 16
; RV64I-NEXT: sub a2, a2, a1
; RV64I-NEXT: slli a1, a0, 20
; RV64I-NEXT: sub a3, a3, a4
; RV64I-NEXT: slli a4, a0, 24
; RV64I-NEXT: sub a1, a1, a4
; RV64I-NEXT: slli a0, a0, 28
; RV64I-NEXT: sub a2, a2, a3
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: sub a0, a2, a0
; RV64I-NEXT: ret
%a1 = mul i32 %a, -252645136
ret i32 %a1
}
define i64 @muli64_0xf0f0f0f0(i64 %a) nounwind {
; RV32I-LABEL: muli64_0xf0f0f0f0:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lui a2, 986895
; RV32I-NEXT: addi a2, a2, 240
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: call __muldi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli64_0xf0f0f0f0:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 4
; RV64I-NEXT: slli a2, a0, 8
; RV64I-NEXT: slli a3, a0, 12
; RV64I-NEXT: slli a4, a0, 16
; RV64I-NEXT: sub a2, a2, a1
; RV64I-NEXT: slli a1, a0, 20
; RV64I-NEXT: sub a3, a3, a4
; RV64I-NEXT: slli a4, a0, 24
; RV64I-NEXT: sub a1, a1, a4
; RV64I-NEXT: sub a2, a2, a3
; RV64I-NEXT: slli a3, a0, 28
; RV64I-NEXT: add a1, a1, a3
; RV64I-NEXT: sub a2, a2, a1
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: add a0, a2, a0
; RV64I-NEXT: ret
%a1 = mul i64 %a, 4042322160
ret i64 %a1
}
define i32 @muli32_0xf7f7f7f7(i32 %a) nounwind {
; RV32I-LABEL: muli32_0xf7f7f7f7:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 3
; RV32I-NEXT: slli a2, a0, 11
; RV32I-NEXT: slli a3, a0, 19
; RV32I-NEXT: add a1, a0, a1
; RV32I-NEXT: slli a0, a0, 27
; RV32I-NEXT: add a2, a2, a3
; RV32I-NEXT: add a1, a1, a2
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli32_0xf7f7f7f7:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 3
; RV64I-NEXT: slli a2, a0, 11
; RV64I-NEXT: slli a3, a0, 19
; RV64I-NEXT: add a1, a0, a1
; RV64I-NEXT: slli a0, a0, 27
; RV64I-NEXT: add a2, a2, a3
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
%a1 = mul i32 %a, -134744073
ret i32 %a1
}
define i64 @muli64_0xf7f7f7f7(i64 %a) nounwind {
; RV32I-LABEL: muli64_0xf7f7f7f7:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lui a2, 1015679
; RV32I-NEXT: addi a2, a2, 2039
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: call __muldi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli64_0xf7f7f7f7:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 3
; RV64I-NEXT: slli a2, a0, 11
; RV64I-NEXT: slli a3, a0, 19
; RV64I-NEXT: add a2, a2, a3
; RV64I-NEXT: slli a3, a0, 27
; RV64I-NEXT: add a1, a0, a1
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: sub a0, a0, a3
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
%a1 = mul i64 %a, 4160223223
ret i64 %a1
}
define i32 @muli32_0x1000(i32 %a) nounwind {
; RV32I-LABEL: muli32_0x1000:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 12
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli32_0x1000:
; RV64I: # %bb.0:
; RV64I-NEXT: slliw a0, a0, 12
; RV64I-NEXT: ret
%a1 = mul i32 %a, 4096
ret i32 %a1
}
define i64 @muli64_0x1000(i64 %a) nounwind {
; RV32I-LABEL: muli64_0x1000:
; RV32I: # %bb.0:
; RV32I-NEXT: srli a2, a0, 20
; RV32I-NEXT: slli a1, a1, 12
; RV32I-NEXT: or a1, a1, a2
; RV32I-NEXT: slli a0, a0, 12
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli64_0x1000:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 12
; RV64I-NEXT: ret
%a1 = mul i64 %a, 4096
ret i64 %a1
}
define i32 @muli32_0x101(i32 %a) nounwind {
; RV32I-LABEL: muli32_0x101:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 8
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli32_0x101:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 8
; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: ret
%a1 = mul i32 %a, 257
ret i32 %a1
}
define i64 @muli64_0x101(i64 %a) nounwind {
; RV32I-LABEL: muli64_0x101:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a2, a0, 8
; RV32I-NEXT: srli a3, a0, 24
; RV32I-NEXT: slli a4, a1, 8
; RV32I-NEXT: add a0, a2, a0
; RV32I-NEXT: or a3, a4, a3
; RV32I-NEXT: sltu a2, a0, a2
; RV32I-NEXT: add a1, a3, a1
; RV32I-NEXT: add a1, a1, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli64_0x101:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 8
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: ret
%a1 = mul i64 %a, 257
ret i64 %a1
}
define i32 @muli32_0xfff(i32 %a) nounwind {
; RV32I-LABEL: muli32_0xfff:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 12
; RV32I-NEXT: sub a0, a1, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli32_0xfff:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 12
; RV64I-NEXT: subw a0, a1, a0
; RV64I-NEXT: ret
%a1 = mul i32 %a, 4095
ret i32 %a1
}
define i64 @muli64_0xfff(i64 %a) nounwind {
; RV32I-LABEL: muli64_0xfff:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a2, a0, 12
; RV32I-NEXT: srli a3, a0, 20
; RV32I-NEXT: slli a4, a1, 12
; RV32I-NEXT: sltu a5, a2, a0
; RV32I-NEXT: or a3, a4, a3
; RV32I-NEXT: sub a1, a3, a1
; RV32I-NEXT: sub a1, a1, a5
; RV32I-NEXT: sub a0, a2, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli64_0xfff:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 12
; RV64I-NEXT: sub a0, a1, a0
; RV64I-NEXT: ret
%a1 = mul i64 %a, 4095
ret i64 %a1
}
define i32 @muli32_0x7fffffff(i32 %a) nounwind {
; RV32I-LABEL: muli32_0x7fffffff:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 31
; RV32I-NEXT: sub a0, a1, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli32_0x7fffffff:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 31
; RV64I-NEXT: subw a0, a1, a0
; RV64I-NEXT: ret
%a1 = mul i32 %a, 2147483647
ret i32 %a1
}
define i64 @muli64_0x7fffffff(i64 %a) nounwind {
; RV32I-LABEL: muli64_0x7fffffff:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a2, a0, 31
; RV32I-NEXT: srli a3, a0, 1
; RV32I-NEXT: slli a4, a1, 31
; RV32I-NEXT: sltu a5, a2, a0
; RV32I-NEXT: or a3, a4, a3
; RV32I-NEXT: sub a1, a3, a1
; RV32I-NEXT: sub a1, a1, a5
; RV32I-NEXT: sub a0, a2, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli64_0x7fffffff:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 31
; RV64I-NEXT: sub a0, a1, a0
; RV64I-NEXT: ret
%a1 = mul i64 %a, 2147483647
ret i64 %a1
}
define i32 @muli32_0xdeadbeef(i32 %a) nounwind {
; RV32I-LABEL: muli32_0xdeadbeef:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 8
; RV32I-NEXT: slli a2, a0, 14
; RV32I-NEXT: slli a3, a0, 17
; RV32I-NEXT: slli a4, a0, 20
; RV32I-NEXT: add a1, a1, a2
; RV32I-NEXT: slli a2, a0, 24
; RV32I-NEXT: add a3, a3, a4
; RV32I-NEXT: slli a4, a0, 29
; RV32I-NEXT: add a2, a2, a4
; RV32I-NEXT: slli a4, a0, 4
; RV32I-NEXT: add a4, a0, a4
; RV32I-NEXT: add a1, a4, a1
; RV32I-NEXT: slli a0, a0, 22
; RV32I-NEXT: add a0, a3, a0
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: neg a1, a2
; RV32I-NEXT: sub a0, a1, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli32_0xdeadbeef:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 8
; RV64I-NEXT: slli a2, a0, 14
; RV64I-NEXT: slli a3, a0, 17
; RV64I-NEXT: slli a4, a0, 20
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: slli a2, a0, 24
; RV64I-NEXT: add a3, a3, a4
; RV64I-NEXT: slli a4, a0, 29
; RV64I-NEXT: add a2, a2, a4
; RV64I-NEXT: slli a4, a0, 4
; RV64I-NEXT: add a4, a0, a4
; RV64I-NEXT: add a1, a4, a1
; RV64I-NEXT: slli a0, a0, 22
; RV64I-NEXT: add a0, a3, a0
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: neg a1, a2
; RV64I-NEXT: sub a0, a1, a0
; RV64I-NEXT: ret
%a1 = mul i32 %a, -559038737
ret i32 %a1
}
define i64 @muli64_0xdeadbeef(i64 %a) nounwind {
; RV32I-LABEL: muli64_0xdeadbeef:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lui a2, 912092
; RV32I-NEXT: addi a2, a2, -273
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: call __muldi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli64_0xdeadbeef:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 8
; RV64I-NEXT: slli a2, a0, 14
; RV64I-NEXT: slli a3, a0, 17
; RV64I-NEXT: slli a4, a0, 20
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: slli a2, a0, 24
; RV64I-NEXT: add a3, a3, a4
; RV64I-NEXT: slli a4, a0, 29
; RV64I-NEXT: add a2, a2, a4
; RV64I-NEXT: slli a4, a0, 4
; RV64I-NEXT: add a4, a0, a4
; RV64I-NEXT: add a1, a4, a1
; RV64I-NEXT: slli a4, a0, 22
; RV64I-NEXT: add a3, a3, a4
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: add a1, a1, a3
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
%a1 = mul i64 %a, 3735928559
ret i64 %a1
}
define i32 @muli32_0x12345678(i32 %a) nounwind {
; RV32I-LABEL: muli32_0x12345678:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 3
; RV32I-NEXT: slli a2, a0, 7
; RV32I-NEXT: slli a3, a0, 9
; RV32I-NEXT: slli a4, a0, 11
; RV32I-NEXT: slli a5, a0, 13
; RV32I-NEXT: sub a2, a2, a1
; RV32I-NEXT: slli a1, a0, 15
; RV32I-NEXT: add a3, a3, a4
; RV32I-NEXT: slli a4, a0, 20
; RV32I-NEXT: sub a5, a5, a1
; RV32I-NEXT: slli a1, a0, 22
; RV32I-NEXT: sub a4, a4, a1
; RV32I-NEXT: sub a2, a2, a3
; RV32I-NEXT: slli a1, a0, 18
; RV32I-NEXT: sub a5, a5, a1
; RV32I-NEXT: slli a1, a0, 25
; RV32I-NEXT: sub a4, a4, a1
; RV32I-NEXT: slli a0, a0, 28
; RV32I-NEXT: sub a2, a2, a5
; RV32I-NEXT: sub a4, a4, a0
; RV32I-NEXT: sub a0, a2, a4
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli32_0x12345678:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 3
; RV64I-NEXT: slli a2, a0, 7
; RV64I-NEXT: slli a3, a0, 9
; RV64I-NEXT: slli a4, a0, 11
; RV64I-NEXT: slli a5, a0, 13
; RV64I-NEXT: sub a2, a2, a1
; RV64I-NEXT: slli a1, a0, 15
; RV64I-NEXT: add a3, a3, a4
; RV64I-NEXT: slli a4, a0, 20
; RV64I-NEXT: sub a5, a5, a1
; RV64I-NEXT: slli a1, a0, 22
; RV64I-NEXT: sub a4, a4, a1
; RV64I-NEXT: sub a2, a2, a3
; RV64I-NEXT: slli a1, a0, 18
; RV64I-NEXT: sub a5, a5, a1
; RV64I-NEXT: slli a1, a0, 25
; RV64I-NEXT: sub a4, a4, a1
; RV64I-NEXT: slli a0, a0, 28
; RV64I-NEXT: sub a2, a2, a5
; RV64I-NEXT: sub a4, a4, a0
; RV64I-NEXT: sub a0, a2, a4
; RV64I-NEXT: ret
%a1 = mul i32 %a, 305419896
ret i32 %a1
}
define i64 @muli64_0x12345678(i64 %a) nounwind {
; RV32I-LABEL: muli64_0x12345678:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lui a2, 74565
; RV32I-NEXT: addi a2, a2, 1656
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: call __muldi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: muli64_0x12345678:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 3
; RV64I-NEXT: slli a2, a0, 7
; RV64I-NEXT: slli a3, a0, 9
; RV64I-NEXT: slli a4, a0, 11
; RV64I-NEXT: slli a5, a0, 13
; RV64I-NEXT: sub a2, a2, a1
; RV64I-NEXT: slli a1, a0, 15
; RV64I-NEXT: add a3, a3, a4
; RV64I-NEXT: slli a4, a0, 20
; RV64I-NEXT: sub a5, a5, a1
; RV64I-NEXT: slli a1, a0, 22
; RV64I-NEXT: sub a4, a4, a1
; RV64I-NEXT: sub a2, a2, a3
; RV64I-NEXT: slli a1, a0, 18
; RV64I-NEXT: sub a5, a5, a1
; RV64I-NEXT: slli a1, a0, 25
; RV64I-NEXT: sub a4, a4, a1
; RV64I-NEXT: slli a0, a0, 28
; RV64I-NEXT: sub a2, a2, a5
; RV64I-NEXT: sub a4, a4, a0
; RV64I-NEXT: sub a0, a2, a4
; RV64I-NEXT: ret
%a1 = mul i64 %a, 305419896
ret i64 %a1
}