blob: 08045e814a35ef21da57f1cf0e9f5f1920590882 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=AARCH
define { i128, i8 } @muloti_test(i128 %l, i128 %r) unnamed_addr #0 {
; AARCH-LABEL: muloti_test:
; AARCH: // %bb.0: // %start
; AARCH-NEXT: mul x9, x3, x0
; AARCH-NEXT: cmp x1, #0
; AARCH-NEXT: ccmp x3, #0, #4, ne
; AARCH-NEXT: umulh x8, x1, x2
; AARCH-NEXT: umulh x10, x3, x0
; AARCH-NEXT: madd x9, x1, x2, x9
; AARCH-NEXT: ccmp xzr, x8, #0, eq
; AARCH-NEXT: umulh x11, x0, x2
; AARCH-NEXT: ccmp xzr, x10, #0, eq
; AARCH-NEXT: mul x0, x0, x2
; AARCH-NEXT: cset w8, ne
; AARCH-NEXT: adds x1, x11, x9
; AARCH-NEXT: csinc w2, w8, wzr, lo
; AARCH-NEXT: ret
start:
%0 = tail call { i128, i1 } @llvm.umul.with.overflow.i128(i128 %l, i128 %r) #2
%1 = extractvalue { i128, i1 } %0, 0
%2 = extractvalue { i128, i1 } %0, 1
%3 = zext i1 %2 to i8
%4 = insertvalue { i128, i8 } undef, i128 %1, 0
%5 = insertvalue { i128, i8 } %4, i8 %3, 1
ret { i128, i8 } %5
}
; PR56403
; We avoid lowering the intrinsic as a libcall because this function has the same name as
; the libcall we wanted to generate (that would create an infinite loop).
define i128 @__muloti4(i128 %0, i128 %1, ptr nocapture nonnull writeonly align 4 %2) #2 {
; AARCH-LABEL: __muloti4:
; AARCH: // %bb.0: // %Entry
; AARCH-NEXT: asr x10, x1, #63
; AARCH-NEXT: asr x9, x3, #63
; AARCH-NEXT: umulh x14, x0, x2
; AARCH-NEXT: mov x8, x1
; AARCH-NEXT: str wzr, [x4]
; AARCH-NEXT: mul x12, x2, x10
; AARCH-NEXT: umulh x13, x2, x10
; AARCH-NEXT: umulh x11, x9, x0
; AARCH-NEXT: mul x15, x1, x2
; AARCH-NEXT: add x13, x13, x12
; AARCH-NEXT: madd x11, x9, x1, x11
; AARCH-NEXT: mul x9, x9, x0
; AARCH-NEXT: madd x10, x3, x10, x13
; AARCH-NEXT: umulh x13, x1, x2
; AARCH-NEXT: add x11, x11, x9
; AARCH-NEXT: adds x9, x9, x12
; AARCH-NEXT: mul x16, x0, x3
; AARCH-NEXT: adc x10, x11, x10
; AARCH-NEXT: adds x11, x15, x14
; AARCH-NEXT: umulh x17, x0, x3
; AARCH-NEXT: cinc x13, x13, hs
; AARCH-NEXT: mul x12, x1, x3
; AARCH-NEXT: adds x1, x16, x11
; AARCH-NEXT: umulh x11, x8, x3
; AARCH-NEXT: cinc x14, x17, hs
; AARCH-NEXT: adds x13, x13, x14
; AARCH-NEXT: mul x0, x0, x2
; AARCH-NEXT: cset w14, hs
; AARCH-NEXT: adds x12, x12, x13
; AARCH-NEXT: asr x13, x1, #63
; AARCH-NEXT: adc x11, x11, x14
; AARCH-NEXT: adds x9, x12, x9
; AARCH-NEXT: adc x10, x11, x10
; AARCH-NEXT: cmp x9, x13
; AARCH-NEXT: ccmp x10, x13, #0, eq
; AARCH-NEXT: cset w9, ne
; AARCH-NEXT: tbz x8, #63, .LBB1_2
; AARCH-NEXT: // %bb.1: // %Entry
; AARCH-NEXT: eor x8, x3, #0x8000000000000000
; AARCH-NEXT: orr x8, x2, x8
; AARCH-NEXT: cbz x8, .LBB1_3
; AARCH-NEXT: .LBB1_2: // %Else2
; AARCH-NEXT: cbz w9, .LBB1_4
; AARCH-NEXT: .LBB1_3: // %Then7
; AARCH-NEXT: mov w8, #1 // =0x1
; AARCH-NEXT: str w8, [x4]
; AARCH-NEXT: .LBB1_4: // %Block9
; AARCH-NEXT: ret
Entry:
store i32 0, ptr %2, align 4
%.fr = freeze i128 %1
%mul = tail call { i128, i1 } @llvm.smul.with.overflow.i128(i128 %0, i128 %.fr)
%3 = icmp slt i128 %0, 0
%4 = icmp eq i128 %.fr, -170141183460469231731687303715884105728
%5 = and i1 %3, %4
br i1 %5, label %Then7, label %Else2
Else2: ; preds = %Entry
%mul.ov = extractvalue { i128, i1 } %mul, 1
br i1 %mul.ov, label %Then7, label %Block9
Then7: ; preds = %Else2, %Entry
store i32 1, ptr %2, align 4
br label %Block9
Block9: ; preds = %Else2, %Then7
%mul.val = extractvalue { i128, i1 } %mul, 0
ret i128 %mul.val
}
declare { i128, i1 } @llvm.umul.with.overflow.i128(i128, i128) #1
declare { i128, i1 } @llvm.smul.with.overflow.i128(i128, i128) #1
attributes #0 = { nounwind readnone uwtable }
attributes #1 = { nounwind readnone speculatable }
attributes #2 = { nounwind }