blob: 707d1621d6ef819e13bb98015258d62fb2936ac5 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv32 -mattr=+m -verify-machineinstrs | FileCheck %s --check-prefix=RV32
; RUN: llc < %s -mtriple=riscv64 -mattr=+m -verify-machineinstrs | FileCheck %s --check-prefix=RV64
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+experimental-zba -verify-machineinstrs | FileCheck %s --check-prefix=RV32ZBA
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+experimental-zba -verify-machineinstrs | FileCheck %s --check-prefix=RV64ZBA
;
; Get the actual value of the overflow bit.
;
define zeroext i1 @saddo1.i32(i32 %v1, i32 %v2, i32* %res) {
; RV32-LABEL: saddo1.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: add a3, a0, a1
; RV32-NEXT: slt a0, a3, a0
; RV32-NEXT: slti a1, a1, 0
; RV32-NEXT: xor a0, a1, a0
; RV32-NEXT: sw a3, 0(a2)
; RV32-NEXT: ret
;
; RV64-LABEL: saddo1.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: add a3, a0, a1
; RV64-NEXT: addw a0, a0, a1
; RV64-NEXT: xor a0, a0, a3
; RV64-NEXT: snez a0, a0
; RV64-NEXT: sw a3, 0(a2)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: saddo1.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: add a3, a0, a1
; RV32ZBA-NEXT: slt a0, a3, a0
; RV32ZBA-NEXT: slti a1, a1, 0
; RV32ZBA-NEXT: xor a0, a1, a0
; RV32ZBA-NEXT: sw a3, 0(a2)
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: saddo1.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sext.w a1, a1
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: add a3, a0, a1
; RV64ZBA-NEXT: addw a0, a0, a1
; RV64ZBA-NEXT: xor a0, a0, a3
; RV64ZBA-NEXT: snez a0, a0
; RV64ZBA-NEXT: sw a3, 0(a2)
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
store i32 %val, i32* %res
ret i1 %obit
}
; Test the immediate version.
define zeroext i1 @saddo2.i32(i32 %v1, i32* %res) {
; RV32-LABEL: saddo2.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi a2, a0, 4
; RV32-NEXT: slt a0, a2, a0
; RV32-NEXT: sw a2, 0(a1)
; RV32-NEXT: ret
;
; RV64-LABEL: saddo2.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: addi a2, a0, 4
; RV64-NEXT: addiw a0, a0, 4
; RV64-NEXT: xor a0, a0, a2
; RV64-NEXT: snez a0, a0
; RV64-NEXT: sw a2, 0(a1)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: saddo2.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: addi a2, a0, 4
; RV32ZBA-NEXT: slt a0, a2, a0
; RV32ZBA-NEXT: sw a2, 0(a1)
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: saddo2.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: addi a2, a0, 4
; RV64ZBA-NEXT: addiw a0, a0, 4
; RV64ZBA-NEXT: xor a0, a0, a2
; RV64ZBA-NEXT: snez a0, a0
; RV64ZBA-NEXT: sw a2, 0(a1)
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 4)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
store i32 %val, i32* %res
ret i1 %obit
}
; Test negative immediates.
define zeroext i1 @saddo3.i32(i32 %v1, i32* %res) {
; RV32-LABEL: saddo3.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi a2, a0, -4
; RV32-NEXT: slt a0, a2, a0
; RV32-NEXT: xori a0, a0, 1
; RV32-NEXT: sw a2, 0(a1)
; RV32-NEXT: ret
;
; RV64-LABEL: saddo3.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: addi a2, a0, -4
; RV64-NEXT: addiw a0, a0, -4
; RV64-NEXT: xor a0, a0, a2
; RV64-NEXT: snez a0, a0
; RV64-NEXT: sw a2, 0(a1)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: saddo3.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: addi a2, a0, -4
; RV32ZBA-NEXT: slt a0, a2, a0
; RV32ZBA-NEXT: xori a0, a0, 1
; RV32ZBA-NEXT: sw a2, 0(a1)
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: saddo3.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: addi a2, a0, -4
; RV64ZBA-NEXT: addiw a0, a0, -4
; RV64ZBA-NEXT: xor a0, a0, a2
; RV64ZBA-NEXT: snez a0, a0
; RV64ZBA-NEXT: sw a2, 0(a1)
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 -4)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
store i32 %val, i32* %res
ret i1 %obit
}
; Test immediates that are too large to be encoded.
define zeroext i1 @saddo4.i32(i32 %v1, i32* %res) {
; RV32-LABEL: saddo4.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: lui a2, 4096
; RV32-NEXT: addi a2, a2, -1
; RV32-NEXT: add a2, a0, a2
; RV32-NEXT: slt a0, a2, a0
; RV32-NEXT: sw a2, 0(a1)
; RV32-NEXT: ret
;
; RV64-LABEL: saddo4.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: lui a2, 4096
; RV64-NEXT: addiw a2, a2, -1
; RV64-NEXT: add a3, a0, a2
; RV64-NEXT: addw a0, a0, a2
; RV64-NEXT: xor a0, a0, a3
; RV64-NEXT: snez a0, a0
; RV64-NEXT: sw a3, 0(a1)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: saddo4.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: lui a2, 4096
; RV32ZBA-NEXT: addi a2, a2, -1
; RV32ZBA-NEXT: add a2, a0, a2
; RV32ZBA-NEXT: slt a0, a2, a0
; RV32ZBA-NEXT: sw a2, 0(a1)
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: saddo4.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: lui a2, 4096
; RV64ZBA-NEXT: addiw a2, a2, -1
; RV64ZBA-NEXT: add a3, a0, a2
; RV64ZBA-NEXT: addw a0, a0, a2
; RV64ZBA-NEXT: xor a0, a0, a3
; RV64ZBA-NEXT: snez a0, a0
; RV64ZBA-NEXT: sw a3, 0(a1)
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 16777215)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
store i32 %val, i32* %res
ret i1 %obit
}
define zeroext i1 @saddo1.i64(i64 %v1, i64 %v2, i64* %res) {
; RV32-LABEL: saddo1.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: add a5, a1, a3
; RV32-NEXT: add a2, a0, a2
; RV32-NEXT: sltu a0, a2, a0
; RV32-NEXT: add a5, a5, a0
; RV32-NEXT: xor a0, a1, a5
; RV32-NEXT: xor a1, a1, a3
; RV32-NEXT: not a1, a1
; RV32-NEXT: and a0, a1, a0
; RV32-NEXT: slti a0, a0, 0
; RV32-NEXT: sw a2, 0(a4)
; RV32-NEXT: sw a5, 4(a4)
; RV32-NEXT: ret
;
; RV64-LABEL: saddo1.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: add a3, a0, a1
; RV64-NEXT: slt a0, a3, a0
; RV64-NEXT: slti a1, a1, 0
; RV64-NEXT: xor a0, a1, a0
; RV64-NEXT: sd a3, 0(a2)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: saddo1.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: add a5, a1, a3
; RV32ZBA-NEXT: add a2, a0, a2
; RV32ZBA-NEXT: sltu a0, a2, a0
; RV32ZBA-NEXT: add a5, a5, a0
; RV32ZBA-NEXT: xor a0, a1, a5
; RV32ZBA-NEXT: xor a1, a1, a3
; RV32ZBA-NEXT: not a1, a1
; RV32ZBA-NEXT: and a0, a1, a0
; RV32ZBA-NEXT: slti a0, a0, 0
; RV32ZBA-NEXT: sw a2, 0(a4)
; RV32ZBA-NEXT: sw a5, 4(a4)
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: saddo1.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: add a3, a0, a1
; RV64ZBA-NEXT: slt a0, a3, a0
; RV64ZBA-NEXT: slti a1, a1, 0
; RV64ZBA-NEXT: xor a0, a1, a0
; RV64ZBA-NEXT: sd a3, 0(a2)
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, i64* %res
ret i1 %obit
}
define zeroext i1 @saddo2.i64(i64 %v1, i64* %res) {
; RV32-LABEL: saddo2.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi a3, a0, 4
; RV32-NEXT: sltu a0, a3, a0
; RV32-NEXT: add a4, a1, a0
; RV32-NEXT: xor a0, a1, a4
; RV32-NEXT: not a1, a1
; RV32-NEXT: and a0, a1, a0
; RV32-NEXT: slti a0, a0, 0
; RV32-NEXT: sw a3, 0(a2)
; RV32-NEXT: sw a4, 4(a2)
; RV32-NEXT: ret
;
; RV64-LABEL: saddo2.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi a2, a0, 4
; RV64-NEXT: slt a0, a2, a0
; RV64-NEXT: sd a2, 0(a1)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: saddo2.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: addi a3, a0, 4
; RV32ZBA-NEXT: sltu a0, a3, a0
; RV32ZBA-NEXT: add a4, a1, a0
; RV32ZBA-NEXT: xor a0, a1, a4
; RV32ZBA-NEXT: not a1, a1
; RV32ZBA-NEXT: and a0, a1, a0
; RV32ZBA-NEXT: slti a0, a0, 0
; RV32ZBA-NEXT: sw a3, 0(a2)
; RV32ZBA-NEXT: sw a4, 4(a2)
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: saddo2.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: addi a2, a0, 4
; RV64ZBA-NEXT: slt a0, a2, a0
; RV64ZBA-NEXT: sd a2, 0(a1)
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 4)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, i64* %res
ret i1 %obit
}
define zeroext i1 @saddo3.i64(i64 %v1, i64* %res) {
; RV32-LABEL: saddo3.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi a3, a0, -4
; RV32-NEXT: sltu a0, a3, a0
; RV32-NEXT: add a0, a1, a0
; RV32-NEXT: addi a4, a0, -1
; RV32-NEXT: xor a0, a1, a4
; RV32-NEXT: and a0, a1, a0
; RV32-NEXT: slti a0, a0, 0
; RV32-NEXT: sw a3, 0(a2)
; RV32-NEXT: sw a4, 4(a2)
; RV32-NEXT: ret
;
; RV64-LABEL: saddo3.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi a2, a0, -4
; RV64-NEXT: slt a0, a2, a0
; RV64-NEXT: xori a0, a0, 1
; RV64-NEXT: sd a2, 0(a1)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: saddo3.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: addi a3, a0, -4
; RV32ZBA-NEXT: sltu a0, a3, a0
; RV32ZBA-NEXT: add a0, a1, a0
; RV32ZBA-NEXT: addi a4, a0, -1
; RV32ZBA-NEXT: xor a0, a1, a4
; RV32ZBA-NEXT: and a0, a1, a0
; RV32ZBA-NEXT: slti a0, a0, 0
; RV32ZBA-NEXT: sw a3, 0(a2)
; RV32ZBA-NEXT: sw a4, 4(a2)
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: saddo3.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: addi a2, a0, -4
; RV64ZBA-NEXT: slt a0, a2, a0
; RV64ZBA-NEXT: xori a0, a0, 1
; RV64ZBA-NEXT: sd a2, 0(a1)
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -4)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, i64* %res
ret i1 %obit
}
define zeroext i1 @uaddo.i32(i32 %v1, i32 %v2, i32* %res) {
; RV32-LABEL: uaddo.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: add a1, a0, a1
; RV32-NEXT: sltu a0, a1, a0
; RV32-NEXT: sw a1, 0(a2)
; RV32-NEXT: ret
;
; RV64-LABEL: uaddo.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addw a3, a0, a1
; RV64-NEXT: sext.w a4, a0
; RV64-NEXT: sltu a3, a3, a4
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: sw a0, 0(a2)
; RV64-NEXT: mv a0, a3
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: uaddo.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: add a1, a0, a1
; RV32ZBA-NEXT: sltu a0, a1, a0
; RV32ZBA-NEXT: sw a1, 0(a2)
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: uaddo.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: addw a3, a0, a1
; RV64ZBA-NEXT: sext.w a4, a0
; RV64ZBA-NEXT: sltu a3, a3, a4
; RV64ZBA-NEXT: add a0, a0, a1
; RV64ZBA-NEXT: sw a0, 0(a2)
; RV64ZBA-NEXT: mv a0, a3
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
store i32 %val, i32* %res
ret i1 %obit
}
define zeroext i1 @uaddo.i64(i64 %v1, i64 %v2, i64* %res) {
; RV32-LABEL: uaddo.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: add a3, a1, a3
; RV32-NEXT: add a2, a0, a2
; RV32-NEXT: sltu a0, a2, a0
; RV32-NEXT: add a3, a3, a0
; RV32-NEXT: beq a3, a1, .LBB8_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: sltu a0, a3, a1
; RV32-NEXT: .LBB8_2: # %entry
; RV32-NEXT: sw a2, 0(a4)
; RV32-NEXT: sw a3, 4(a4)
; RV32-NEXT: ret
;
; RV64-LABEL: uaddo.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: add a1, a0, a1
; RV64-NEXT: sltu a0, a1, a0
; RV64-NEXT: sd a1, 0(a2)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: uaddo.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: add a3, a1, a3
; RV32ZBA-NEXT: add a2, a0, a2
; RV32ZBA-NEXT: sltu a0, a2, a0
; RV32ZBA-NEXT: add a3, a3, a0
; RV32ZBA-NEXT: beq a3, a1, .LBB8_2
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: sltu a0, a3, a1
; RV32ZBA-NEXT: .LBB8_2: # %entry
; RV32ZBA-NEXT: sw a2, 0(a4)
; RV32ZBA-NEXT: sw a3, 4(a4)
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: uaddo.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: add a1, a0, a1
; RV64ZBA-NEXT: sltu a0, a1, a0
; RV64ZBA-NEXT: sd a1, 0(a2)
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, i64* %res
ret i1 %obit
}
define zeroext i1 @ssubo1.i32(i32 %v1, i32 %v2, i32* %res) {
; RV32-LABEL: ssubo1.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: sgtz a3, a1
; RV32-NEXT: sub a1, a0, a1
; RV32-NEXT: slt a0, a1, a0
; RV32-NEXT: xor a0, a3, a0
; RV32-NEXT: sw a1, 0(a2)
; RV32-NEXT: ret
;
; RV64-LABEL: ssubo1.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: sub a3, a0, a1
; RV64-NEXT: subw a0, a0, a1
; RV64-NEXT: xor a0, a0, a3
; RV64-NEXT: snez a0, a0
; RV64-NEXT: sw a3, 0(a2)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: ssubo1.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: sgtz a3, a1
; RV32ZBA-NEXT: sub a1, a0, a1
; RV32ZBA-NEXT: slt a0, a1, a0
; RV32ZBA-NEXT: xor a0, a3, a0
; RV32ZBA-NEXT: sw a1, 0(a2)
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: ssubo1.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sext.w a1, a1
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: sub a3, a0, a1
; RV64ZBA-NEXT: subw a0, a0, a1
; RV64ZBA-NEXT: xor a0, a0, a3
; RV64ZBA-NEXT: snez a0, a0
; RV64ZBA-NEXT: sw a3, 0(a2)
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
store i32 %val, i32* %res
ret i1 %obit
}
define zeroext i1 @ssubo2.i32(i32 %v1, i32* %res) {
; RV32-LABEL: ssubo2.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi a2, a0, 4
; RV32-NEXT: slt a0, a2, a0
; RV32-NEXT: sw a2, 0(a1)
; RV32-NEXT: ret
;
; RV64-LABEL: ssubo2.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: addi a2, a0, 4
; RV64-NEXT: addiw a0, a0, 4
; RV64-NEXT: xor a0, a0, a2
; RV64-NEXT: snez a0, a0
; RV64-NEXT: sw a2, 0(a1)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: ssubo2.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: addi a2, a0, 4
; RV32ZBA-NEXT: slt a0, a2, a0
; RV32ZBA-NEXT: sw a2, 0(a1)
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: ssubo2.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: addi a2, a0, 4
; RV64ZBA-NEXT: addiw a0, a0, 4
; RV64ZBA-NEXT: xor a0, a0, a2
; RV64ZBA-NEXT: snez a0, a0
; RV64ZBA-NEXT: sw a2, 0(a1)
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 -4)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
store i32 %val, i32* %res
ret i1 %obit
}
define zeroext i1 @ssubo.i64(i64 %v1, i64 %v2, i64* %res) {
; RV32-LABEL: ssubo.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: sltu a6, a0, a2
; RV32-NEXT: sub a5, a1, a3
; RV32-NEXT: sub a5, a5, a6
; RV32-NEXT: xor a6, a1, a5
; RV32-NEXT: xor a1, a1, a3
; RV32-NEXT: and a1, a1, a6
; RV32-NEXT: slti a1, a1, 0
; RV32-NEXT: sub a0, a0, a2
; RV32-NEXT: sw a0, 0(a4)
; RV32-NEXT: sw a5, 4(a4)
; RV32-NEXT: mv a0, a1
; RV32-NEXT: ret
;
; RV64-LABEL: ssubo.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sgtz a3, a1
; RV64-NEXT: sub a1, a0, a1
; RV64-NEXT: slt a0, a1, a0
; RV64-NEXT: xor a0, a3, a0
; RV64-NEXT: sd a1, 0(a2)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: ssubo.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: sltu a6, a0, a2
; RV32ZBA-NEXT: sub a5, a1, a3
; RV32ZBA-NEXT: sub a5, a5, a6
; RV32ZBA-NEXT: xor a6, a1, a5
; RV32ZBA-NEXT: xor a1, a1, a3
; RV32ZBA-NEXT: and a1, a1, a6
; RV32ZBA-NEXT: slti a1, a1, 0
; RV32ZBA-NEXT: sub a0, a0, a2
; RV32ZBA-NEXT: sw a0, 0(a4)
; RV32ZBA-NEXT: sw a5, 4(a4)
; RV32ZBA-NEXT: mv a0, a1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: ssubo.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sgtz a3, a1
; RV64ZBA-NEXT: sub a1, a0, a1
; RV64ZBA-NEXT: slt a0, a1, a0
; RV64ZBA-NEXT: xor a0, a3, a0
; RV64ZBA-NEXT: sd a1, 0(a2)
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, i64* %res
ret i1 %obit
}
define zeroext i1 @usubo.i32(i32 %v1, i32 %v2, i32* %res) {
; RV32-LABEL: usubo.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: sub a1, a0, a1
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: sw a1, 0(a2)
; RV32-NEXT: ret
;
; RV64-LABEL: usubo.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: subw a3, a0, a1
; RV64-NEXT: sext.w a4, a0
; RV64-NEXT: sltu a3, a4, a3
; RV64-NEXT: sub a0, a0, a1
; RV64-NEXT: sw a0, 0(a2)
; RV64-NEXT: mv a0, a3
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: usubo.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: sub a1, a0, a1
; RV32ZBA-NEXT: sltu a0, a0, a1
; RV32ZBA-NEXT: sw a1, 0(a2)
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: usubo.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: subw a3, a0, a1
; RV64ZBA-NEXT: sext.w a4, a0
; RV64ZBA-NEXT: sltu a3, a4, a3
; RV64ZBA-NEXT: sub a0, a0, a1
; RV64ZBA-NEXT: sw a0, 0(a2)
; RV64ZBA-NEXT: mv a0, a3
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
store i32 %val, i32* %res
ret i1 %obit
}
define zeroext i1 @usubo.i64(i64 %v1, i64 %v2, i64* %res) {
; RV32-LABEL: usubo.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: sltu a5, a0, a2
; RV32-NEXT: sub a3, a1, a3
; RV32-NEXT: sub a3, a3, a5
; RV32-NEXT: sub a2, a0, a2
; RV32-NEXT: beq a3, a1, .LBB13_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: sltu a0, a1, a3
; RV32-NEXT: j .LBB13_3
; RV32-NEXT: .LBB13_2:
; RV32-NEXT: sltu a0, a0, a2
; RV32-NEXT: .LBB13_3: # %entry
; RV32-NEXT: sw a2, 0(a4)
; RV32-NEXT: sw a3, 4(a4)
; RV32-NEXT: ret
;
; RV64-LABEL: usubo.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sub a1, a0, a1
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: sd a1, 0(a2)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: usubo.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: sltu a5, a0, a2
; RV32ZBA-NEXT: sub a3, a1, a3
; RV32ZBA-NEXT: sub a3, a3, a5
; RV32ZBA-NEXT: sub a2, a0, a2
; RV32ZBA-NEXT: beq a3, a1, .LBB13_2
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: sltu a0, a1, a3
; RV32ZBA-NEXT: j .LBB13_3
; RV32ZBA-NEXT: .LBB13_2:
; RV32ZBA-NEXT: sltu a0, a0, a2
; RV32ZBA-NEXT: .LBB13_3: # %entry
; RV32ZBA-NEXT: sw a2, 0(a4)
; RV32ZBA-NEXT: sw a3, 4(a4)
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: usubo.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sub a1, a0, a1
; RV64ZBA-NEXT: sltu a0, a0, a1
; RV64ZBA-NEXT: sd a1, 0(a2)
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, i64* %res
ret i1 %obit
}
define zeroext i1 @smulo.i32(i32 %v1, i32 %v2, i32* %res) {
; RV32-LABEL: smulo.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: mulh a3, a0, a1
; RV32-NEXT: mul a1, a0, a1
; RV32-NEXT: srai a0, a1, 31
; RV32-NEXT: xor a0, a3, a0
; RV32-NEXT: snez a0, a0
; RV32-NEXT: sw a1, 0(a2)
; RV32-NEXT: ret
;
; RV64-LABEL: smulo.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: mul a3, a0, a1
; RV64-NEXT: mulw a0, a0, a1
; RV64-NEXT: xor a0, a0, a3
; RV64-NEXT: snez a0, a0
; RV64-NEXT: sw a3, 0(a2)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: smulo.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: mulh a3, a0, a1
; RV32ZBA-NEXT: mul a1, a0, a1
; RV32ZBA-NEXT: srai a0, a1, 31
; RV32ZBA-NEXT: xor a0, a3, a0
; RV32ZBA-NEXT: snez a0, a0
; RV32ZBA-NEXT: sw a1, 0(a2)
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: smulo.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sext.w a1, a1
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: mul a3, a0, a1
; RV64ZBA-NEXT: mulw a0, a0, a1
; RV64ZBA-NEXT: xor a0, a0, a3
; RV64ZBA-NEXT: snez a0, a0
; RV64ZBA-NEXT: sw a3, 0(a2)
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
store i32 %val, i32* %res
ret i1 %obit
}
define zeroext i1 @smulo2.i32(i32 %v1, i32* %res) {
; RV32-LABEL: smulo2.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi a2, zero, 13
; RV32-NEXT: mulh a3, a0, a2
; RV32-NEXT: mul a2, a0, a2
; RV32-NEXT: srai a0, a2, 31
; RV32-NEXT: xor a0, a3, a0
; RV32-NEXT: snez a0, a0
; RV32-NEXT: sw a2, 0(a1)
; RV32-NEXT: ret
;
; RV64-LABEL: smulo2.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: addi a2, zero, 13
; RV64-NEXT: mul a3, a0, a2
; RV64-NEXT: mulw a0, a0, a2
; RV64-NEXT: xor a0, a0, a3
; RV64-NEXT: snez a0, a0
; RV64-NEXT: sw a3, 0(a1)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: smulo2.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: addi a2, zero, 13
; RV32ZBA-NEXT: mulh a3, a0, a2
; RV32ZBA-NEXT: mul a2, a0, a2
; RV32ZBA-NEXT: srai a0, a2, 31
; RV32ZBA-NEXT: xor a0, a3, a0
; RV32ZBA-NEXT: snez a0, a0
; RV32ZBA-NEXT: sw a2, 0(a1)
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: smulo2.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: addi a2, zero, 13
; RV64ZBA-NEXT: mul a3, a0, a2
; RV64ZBA-NEXT: mulw a0, a0, a2
; RV64ZBA-NEXT: xor a0, a0, a3
; RV64ZBA-NEXT: snez a0, a0
; RV64ZBA-NEXT: sw a3, 0(a1)
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 13)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
store i32 %val, i32* %res
ret i1 %obit
}
define zeroext i1 @smulo.i64(i64 %v1, i64 %v2, i64* %res) {
; RV32-LABEL: smulo.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: mv s0, a4
; RV32-NEXT: sw zero, 4(sp)
; RV32-NEXT: addi a4, sp, 4
; RV32-NEXT: call __mulodi4@plt
; RV32-NEXT: lw a2, 4(sp)
; RV32-NEXT: snez a2, a2
; RV32-NEXT: sw a1, 4(s0)
; RV32-NEXT: sw a0, 0(s0)
; RV32-NEXT: mv a0, a2
; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: smulo.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: mulh a3, a0, a1
; RV64-NEXT: mul a1, a0, a1
; RV64-NEXT: srai a0, a1, 63
; RV64-NEXT: xor a0, a3, a0
; RV64-NEXT: snez a0, a0
; RV64-NEXT: sd a1, 0(a2)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: smulo.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: addi sp, sp, -16
; RV32ZBA-NEXT: .cfi_def_cfa_offset 16
; RV32ZBA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ZBA-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32ZBA-NEXT: .cfi_offset ra, -4
; RV32ZBA-NEXT: .cfi_offset s0, -8
; RV32ZBA-NEXT: mv s0, a4
; RV32ZBA-NEXT: sw zero, 4(sp)
; RV32ZBA-NEXT: addi a4, sp, 4
; RV32ZBA-NEXT: call __mulodi4@plt
; RV32ZBA-NEXT: lw a2, 4(sp)
; RV32ZBA-NEXT: snez a2, a2
; RV32ZBA-NEXT: sw a1, 4(s0)
; RV32ZBA-NEXT: sw a0, 0(s0)
; RV32ZBA-NEXT: mv a0, a2
; RV32ZBA-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32ZBA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ZBA-NEXT: addi sp, sp, 16
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: smulo.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: mulh a3, a0, a1
; RV64ZBA-NEXT: mul a1, a0, a1
; RV64ZBA-NEXT: srai a0, a1, 63
; RV64ZBA-NEXT: xor a0, a3, a0
; RV64ZBA-NEXT: snez a0, a0
; RV64ZBA-NEXT: sd a1, 0(a2)
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, i64* %res
ret i1 %obit
}
define zeroext i1 @smulo2.i64(i64 %v1, i64* %res) {
; RV32-LABEL: smulo2.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: mv s0, a2
; RV32-NEXT: sw zero, 4(sp)
; RV32-NEXT: addi a2, zero, 13
; RV32-NEXT: addi a4, sp, 4
; RV32-NEXT: mv a3, zero
; RV32-NEXT: call __mulodi4@plt
; RV32-NEXT: lw a2, 4(sp)
; RV32-NEXT: snez a2, a2
; RV32-NEXT: sw a1, 4(s0)
; RV32-NEXT: sw a0, 0(s0)
; RV32-NEXT: mv a0, a2
; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: smulo2.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi a2, zero, 13
; RV64-NEXT: mulh a3, a0, a2
; RV64-NEXT: mul a2, a0, a2
; RV64-NEXT: srai a0, a2, 63
; RV64-NEXT: xor a0, a3, a0
; RV64-NEXT: snez a0, a0
; RV64-NEXT: sd a2, 0(a1)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: smulo2.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: addi sp, sp, -16
; RV32ZBA-NEXT: .cfi_def_cfa_offset 16
; RV32ZBA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ZBA-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32ZBA-NEXT: .cfi_offset ra, -4
; RV32ZBA-NEXT: .cfi_offset s0, -8
; RV32ZBA-NEXT: mv s0, a2
; RV32ZBA-NEXT: sw zero, 4(sp)
; RV32ZBA-NEXT: addi a2, zero, 13
; RV32ZBA-NEXT: addi a4, sp, 4
; RV32ZBA-NEXT: mv a3, zero
; RV32ZBA-NEXT: call __mulodi4@plt
; RV32ZBA-NEXT: lw a2, 4(sp)
; RV32ZBA-NEXT: snez a2, a2
; RV32ZBA-NEXT: sw a1, 4(s0)
; RV32ZBA-NEXT: sw a0, 0(s0)
; RV32ZBA-NEXT: mv a0, a2
; RV32ZBA-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32ZBA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ZBA-NEXT: addi sp, sp, 16
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: smulo2.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: addi a2, zero, 13
; RV64ZBA-NEXT: mulh a3, a0, a2
; RV64ZBA-NEXT: mul a2, a0, a2
; RV64ZBA-NEXT: srai a0, a2, 63
; RV64ZBA-NEXT: xor a0, a3, a0
; RV64ZBA-NEXT: snez a0, a0
; RV64ZBA-NEXT: sd a2, 0(a1)
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 13)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, i64* %res
ret i1 %obit
}
define zeroext i1 @umulo.i32(i32 %v1, i32 %v2, i32* %res) {
; RV32-LABEL: umulo.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: mulhu a3, a0, a1
; RV32-NEXT: snez a3, a3
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: sw a0, 0(a2)
; RV32-NEXT: mv a0, a3
; RV32-NEXT: ret
;
; RV64-LABEL: umulo.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: slli a1, a1, 32
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: mulhu a1, a0, a1
; RV64-NEXT: srli a0, a1, 32
; RV64-NEXT: snez a0, a0
; RV64-NEXT: sw a1, 0(a2)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: umulo.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: mulhu a3, a0, a1
; RV32ZBA-NEXT: snez a3, a3
; RV32ZBA-NEXT: mul a0, a0, a1
; RV32ZBA-NEXT: sw a0, 0(a2)
; RV32ZBA-NEXT: mv a0, a3
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: umulo.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: zext.w a1, a1
; RV64ZBA-NEXT: zext.w a0, a0
; RV64ZBA-NEXT: mul a1, a0, a1
; RV64ZBA-NEXT: srli a0, a1, 32
; RV64ZBA-NEXT: snez a0, a0
; RV64ZBA-NEXT: sw a1, 0(a2)
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
store i32 %val, i32* %res
ret i1 %obit
}
define zeroext i1 @umulo2.i32(i32 %v1, i32* %res) {
; RV32-LABEL: umulo2.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi a3, zero, 13
; RV32-NEXT: mulhu a2, a0, a3
; RV32-NEXT: snez a2, a2
; RV32-NEXT: mul a0, a0, a3
; RV32-NEXT: sw a0, 0(a1)
; RV32-NEXT: mv a0, a2
; RV32-NEXT: ret
;
; RV64-LABEL: umulo2.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: srli a0, a0, 32
; RV64-NEXT: addi a2, zero, 13
; RV64-NEXT: mul a2, a0, a2
; RV64-NEXT: srli a0, a2, 32
; RV64-NEXT: snez a0, a0
; RV64-NEXT: sw a2, 0(a1)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: umulo2.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: addi a3, zero, 13
; RV32ZBA-NEXT: mulhu a2, a0, a3
; RV32ZBA-NEXT: snez a2, a2
; RV32ZBA-NEXT: mul a0, a0, a3
; RV32ZBA-NEXT: sw a0, 0(a1)
; RV32ZBA-NEXT: mv a0, a2
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: umulo2.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: zext.w a0, a0
; RV64ZBA-NEXT: addi a2, zero, 13
; RV64ZBA-NEXT: mul a2, a0, a2
; RV64ZBA-NEXT: srli a0, a2, 32
; RV64ZBA-NEXT: snez a0, a0
; RV64ZBA-NEXT: sw a2, 0(a1)
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 13)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
store i32 %val, i32* %res
ret i1 %obit
}
; Similar to umulo.i32, but storing the overflow and returning the result.
define signext i32 @umulo3.i32(i32 signext %0, i32 signext %1, i32* %2) {
; RV32-LABEL: umulo3.i32:
; RV32: # %bb.0:
; RV32-NEXT: mul a3, a0, a1
; RV32-NEXT: mulhu a0, a0, a1
; RV32-NEXT: snez a0, a0
; RV32-NEXT: sw a0, 0(a2)
; RV32-NEXT: mv a0, a3
; RV32-NEXT: ret
;
; RV64-LABEL: umulo3.i32:
; RV64: # %bb.0:
; RV64-NEXT: slli a1, a1, 32
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: mulhu a0, a0, a1
; RV64-NEXT: srli a1, a0, 32
; RV64-NEXT: snez a1, a1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: sw a1, 0(a2)
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: umulo3.i32:
; RV32ZBA: # %bb.0:
; RV32ZBA-NEXT: mul a3, a0, a1
; RV32ZBA-NEXT: mulhu a0, a0, a1
; RV32ZBA-NEXT: snez a0, a0
; RV32ZBA-NEXT: sw a0, 0(a2)
; RV32ZBA-NEXT: mv a0, a3
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: umulo3.i32:
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: zext.w a1, a1
; RV64ZBA-NEXT: zext.w a0, a0
; RV64ZBA-NEXT: mul a3, a0, a1
; RV64ZBA-NEXT: srli a3, a3, 32
; RV64ZBA-NEXT: snez a3, a3
; RV64ZBA-NEXT: mulw a0, a0, a1
; RV64ZBA-NEXT: sw a3, 0(a2)
; RV64ZBA-NEXT: ret
%4 = tail call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %0, i32 %1)
%5 = extractvalue { i32, i1 } %4, 1
%6 = extractvalue { i32, i1 } %4, 0
%7 = zext i1 %5 to i32
store i32 %7, i32* %2, align 4
ret i32 %6
}
define zeroext i1 @umulo.i64(i64 %v1, i64 %v2, i64* %res) {
; RV32-LABEL: umulo.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: mul a6, a3, a0
; RV32-NEXT: mul a5, a1, a2
; RV32-NEXT: add a6, a5, a6
; RV32-NEXT: mulhu a5, a0, a2
; RV32-NEXT: add a6, a5, a6
; RV32-NEXT: sltu a7, a6, a5
; RV32-NEXT: snez t0, a3
; RV32-NEXT: snez a5, a1
; RV32-NEXT: and a5, a5, t0
; RV32-NEXT: mulhu a1, a1, a2
; RV32-NEXT: snez a1, a1
; RV32-NEXT: or a1, a5, a1
; RV32-NEXT: mulhu a3, a3, a0
; RV32-NEXT: snez a3, a3
; RV32-NEXT: or a1, a1, a3
; RV32-NEXT: or a1, a1, a7
; RV32-NEXT: mul a0, a0, a2
; RV32-NEXT: sw a0, 0(a4)
; RV32-NEXT: sw a6, 4(a4)
; RV32-NEXT: mv a0, a1
; RV32-NEXT: ret
;
; RV64-LABEL: umulo.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: mulhu a3, a0, a1
; RV64-NEXT: snez a3, a3
; RV64-NEXT: mul a0, a0, a1
; RV64-NEXT: sd a0, 0(a2)
; RV64-NEXT: mv a0, a3
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: umulo.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: mul a6, a3, a0
; RV32ZBA-NEXT: mul a5, a1, a2
; RV32ZBA-NEXT: add a6, a5, a6
; RV32ZBA-NEXT: mulhu a5, a0, a2
; RV32ZBA-NEXT: add a6, a5, a6
; RV32ZBA-NEXT: sltu a7, a6, a5
; RV32ZBA-NEXT: snez t0, a3
; RV32ZBA-NEXT: snez a5, a1
; RV32ZBA-NEXT: and a5, a5, t0
; RV32ZBA-NEXT: mulhu a1, a1, a2
; RV32ZBA-NEXT: snez a1, a1
; RV32ZBA-NEXT: or a1, a5, a1
; RV32ZBA-NEXT: mulhu a3, a3, a0
; RV32ZBA-NEXT: snez a3, a3
; RV32ZBA-NEXT: or a1, a1, a3
; RV32ZBA-NEXT: or a1, a1, a7
; RV32ZBA-NEXT: mul a0, a0, a2
; RV32ZBA-NEXT: sw a0, 0(a4)
; RV32ZBA-NEXT: sw a6, 4(a4)
; RV32ZBA-NEXT: mv a0, a1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: umulo.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: mulhu a3, a0, a1
; RV64ZBA-NEXT: snez a3, a3
; RV64ZBA-NEXT: mul a0, a0, a1
; RV64ZBA-NEXT: sd a0, 0(a2)
; RV64ZBA-NEXT: mv a0, a3
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, i64* %res
ret i1 %obit
}
define zeroext i1 @umulo2.i64(i64 %v1, i64* %res) {
; RV32-LABEL: umulo2.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi a3, zero, 13
; RV32-NEXT: mul a4, a1, a3
; RV32-NEXT: mulhu a5, a0, a3
; RV32-NEXT: add a4, a5, a4
; RV32-NEXT: sltu a5, a4, a5
; RV32-NEXT: mulhu a1, a1, a3
; RV32-NEXT: snez a1, a1
; RV32-NEXT: or a1, a1, a5
; RV32-NEXT: mul a0, a0, a3
; RV32-NEXT: sw a0, 0(a2)
; RV32-NEXT: sw a4, 4(a2)
; RV32-NEXT: mv a0, a1
; RV32-NEXT: ret
;
; RV64-LABEL: umulo2.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi a3, zero, 13
; RV64-NEXT: mulhu a2, a0, a3
; RV64-NEXT: snez a2, a2
; RV64-NEXT: mul a0, a0, a3
; RV64-NEXT: sd a0, 0(a1)
; RV64-NEXT: mv a0, a2
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: umulo2.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: addi a3, zero, 13
; RV32ZBA-NEXT: mul a4, a1, a3
; RV32ZBA-NEXT: mulhu a5, a0, a3
; RV32ZBA-NEXT: add a4, a5, a4
; RV32ZBA-NEXT: sltu a5, a4, a5
; RV32ZBA-NEXT: mulhu a1, a1, a3
; RV32ZBA-NEXT: snez a1, a1
; RV32ZBA-NEXT: or a1, a1, a5
; RV32ZBA-NEXT: mul a0, a0, a3
; RV32ZBA-NEXT: sw a0, 0(a2)
; RV32ZBA-NEXT: sw a4, 4(a2)
; RV32ZBA-NEXT: mv a0, a1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: umulo2.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: addi a3, zero, 13
; RV64ZBA-NEXT: mulhu a2, a0, a3
; RV64ZBA-NEXT: snez a2, a2
; RV64ZBA-NEXT: mul a0, a0, a3
; RV64ZBA-NEXT: sd a0, 0(a1)
; RV64ZBA-NEXT: mv a0, a2
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 13)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, i64* %res
ret i1 %obit
}
;
; Check the use of the overflow bit in combination with a select instruction.
;
define i32 @saddo.select.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: saddo.select.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: add a2, a0, a1
; RV32-NEXT: slt a2, a2, a0
; RV32-NEXT: slti a3, a1, 0
; RV32-NEXT: bne a3, a2, .LBB23_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB23_2: # %entry
; RV32-NEXT: ret
;
; RV64-LABEL: saddo.select.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a2, a1
; RV64-NEXT: sext.w a3, a0
; RV64-NEXT: add a4, a3, a2
; RV64-NEXT: addw a2, a3, a2
; RV64-NEXT: bne a2, a4, .LBB23_2
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB23_2: # %entry
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: saddo.select.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: add a2, a0, a1
; RV32ZBA-NEXT: slt a2, a2, a0
; RV32ZBA-NEXT: slti a3, a1, 0
; RV32ZBA-NEXT: bne a3, a2, .LBB23_2
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: mv a0, a1
; RV32ZBA-NEXT: .LBB23_2: # %entry
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: saddo.select.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sext.w a2, a1
; RV64ZBA-NEXT: sext.w a3, a0
; RV64ZBA-NEXT: add a4, a3, a2
; RV64ZBA-NEXT: addw a2, a3, a2
; RV64ZBA-NEXT: bne a2, a4, .LBB23_2
; RV64ZBA-NEXT: # %bb.1: # %entry
; RV64ZBA-NEXT: mv a0, a1
; RV64ZBA-NEXT: .LBB23_2: # %entry
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
%obit = extractvalue {i32, i1} %t, 1
%ret = select i1 %obit, i32 %v1, i32 %v2
ret i32 %ret
}
define i1 @saddo.not.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: saddo.not.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: add a2, a0, a1
; RV32-NEXT: slt a0, a2, a0
; RV32-NEXT: slti a1, a1, 0
; RV32-NEXT: xor a0, a1, a0
; RV32-NEXT: xori a0, a0, 1
; RV32-NEXT: ret
;
; RV64-LABEL: saddo.not.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: add a2, a0, a1
; RV64-NEXT: addw a0, a0, a1
; RV64-NEXT: xor a0, a0, a2
; RV64-NEXT: seqz a0, a0
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: saddo.not.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: add a2, a0, a1
; RV32ZBA-NEXT: slt a0, a2, a0
; RV32ZBA-NEXT: slti a1, a1, 0
; RV32ZBA-NEXT: xor a0, a1, a0
; RV32ZBA-NEXT: xori a0, a0, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: saddo.not.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sext.w a1, a1
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: add a2, a0, a1
; RV64ZBA-NEXT: addw a0, a0, a1
; RV64ZBA-NEXT: xor a0, a0, a2
; RV64ZBA-NEXT: seqz a0, a0
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
%obit = extractvalue {i32, i1} %t, 1
%ret = xor i1 %obit, true
ret i1 %ret
}
define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: saddo.select.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: add a4, a1, a3
; RV32-NEXT: add a5, a0, a2
; RV32-NEXT: sltu a5, a5, a0
; RV32-NEXT: add a4, a4, a5
; RV32-NEXT: xor a4, a1, a4
; RV32-NEXT: xor a5, a1, a3
; RV32-NEXT: not a5, a5
; RV32-NEXT: and a4, a5, a4
; RV32-NEXT: bltz a4, .LBB25_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: mv a0, a2
; RV32-NEXT: mv a1, a3
; RV32-NEXT: .LBB25_2: # %entry
; RV32-NEXT: ret
;
; RV64-LABEL: saddo.select.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: add a2, a0, a1
; RV64-NEXT: slt a2, a2, a0
; RV64-NEXT: slti a3, a1, 0
; RV64-NEXT: bne a3, a2, .LBB25_2
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB25_2: # %entry
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: saddo.select.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: add a4, a1, a3
; RV32ZBA-NEXT: add a5, a0, a2
; RV32ZBA-NEXT: sltu a5, a5, a0
; RV32ZBA-NEXT: add a4, a4, a5
; RV32ZBA-NEXT: xor a4, a1, a4
; RV32ZBA-NEXT: xor a5, a1, a3
; RV32ZBA-NEXT: not a5, a5
; RV32ZBA-NEXT: and a4, a5, a4
; RV32ZBA-NEXT: bltz a4, .LBB25_2
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: mv a0, a2
; RV32ZBA-NEXT: mv a1, a3
; RV32ZBA-NEXT: .LBB25_2: # %entry
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: saddo.select.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: add a2, a0, a1
; RV64ZBA-NEXT: slt a2, a2, a0
; RV64ZBA-NEXT: slti a3, a1, 0
; RV64ZBA-NEXT: bne a3, a2, .LBB25_2
; RV64ZBA-NEXT: # %bb.1: # %entry
; RV64ZBA-NEXT: mv a0, a1
; RV64ZBA-NEXT: .LBB25_2: # %entry
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
%obit = extractvalue {i64, i1} %t, 1
%ret = select i1 %obit, i64 %v1, i64 %v2
ret i64 %ret
}
define i1 @saddo.not.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: saddo.not.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: add a4, a1, a3
; RV32-NEXT: add a2, a0, a2
; RV32-NEXT: sltu a0, a2, a0
; RV32-NEXT: add a0, a4, a0
; RV32-NEXT: xor a0, a1, a0
; RV32-NEXT: xor a1, a1, a3
; RV32-NEXT: not a1, a1
; RV32-NEXT: and a0, a1, a0
; RV32-NEXT: addi a1, zero, -1
; RV32-NEXT: slt a0, a1, a0
; RV32-NEXT: ret
;
; RV64-LABEL: saddo.not.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: add a2, a0, a1
; RV64-NEXT: slt a0, a2, a0
; RV64-NEXT: slti a1, a1, 0
; RV64-NEXT: xor a0, a1, a0
; RV64-NEXT: xori a0, a0, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: saddo.not.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: add a4, a1, a3
; RV32ZBA-NEXT: add a2, a0, a2
; RV32ZBA-NEXT: sltu a0, a2, a0
; RV32ZBA-NEXT: add a0, a4, a0
; RV32ZBA-NEXT: xor a0, a1, a0
; RV32ZBA-NEXT: xor a1, a1, a3
; RV32ZBA-NEXT: not a1, a1
; RV32ZBA-NEXT: and a0, a1, a0
; RV32ZBA-NEXT: addi a1, zero, -1
; RV32ZBA-NEXT: slt a0, a1, a0
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: saddo.not.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: add a2, a0, a1
; RV64ZBA-NEXT: slt a0, a2, a0
; RV64ZBA-NEXT: slti a1, a1, 0
; RV64ZBA-NEXT: xor a0, a1, a0
; RV64ZBA-NEXT: xori a0, a0, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
%obit = extractvalue {i64, i1} %t, 1
%ret = xor i1 %obit, true
ret i1 %ret
}
define i32 @uaddo.select.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: uaddo.select.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: add a2, a0, a1
; RV32-NEXT: bltu a2, a0, .LBB27_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB27_2: # %entry
; RV32-NEXT: ret
;
; RV64-LABEL: uaddo.select.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addw a2, a0, a1
; RV64-NEXT: sext.w a3, a0
; RV64-NEXT: bltu a2, a3, .LBB27_2
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB27_2: # %entry
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: uaddo.select.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: add a2, a0, a1
; RV32ZBA-NEXT: bltu a2, a0, .LBB27_2
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: mv a0, a1
; RV32ZBA-NEXT: .LBB27_2: # %entry
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: uaddo.select.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: addw a2, a0, a1
; RV64ZBA-NEXT: sext.w a3, a0
; RV64ZBA-NEXT: bltu a2, a3, .LBB27_2
; RV64ZBA-NEXT: # %bb.1: # %entry
; RV64ZBA-NEXT: mv a0, a1
; RV64ZBA-NEXT: .LBB27_2: # %entry
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
%obit = extractvalue {i32, i1} %t, 1
%ret = select i1 %obit, i32 %v1, i32 %v2
ret i32 %ret
}
define i1 @uaddo.not.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: uaddo.not.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: add a1, a0, a1
; RV32-NEXT: sltu a0, a1, a0
; RV32-NEXT: xori a0, a0, 1
; RV32-NEXT: ret
;
; RV64-LABEL: uaddo.not.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addw a1, a0, a1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: sltu a0, a1, a0
; RV64-NEXT: xori a0, a0, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: uaddo.not.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: add a1, a0, a1
; RV32ZBA-NEXT: sltu a0, a1, a0
; RV32ZBA-NEXT: xori a0, a0, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: uaddo.not.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: addw a1, a0, a1
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: sltu a0, a1, a0
; RV64ZBA-NEXT: xori a0, a0, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
%obit = extractvalue {i32, i1} %t, 1
%ret = xor i1 %obit, true
ret i1 %ret
}
define i64 @uaddo.select.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: uaddo.select.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: add a5, a1, a3
; RV32-NEXT: add a4, a0, a2
; RV32-NEXT: sltu a4, a4, a0
; RV32-NEXT: add a5, a5, a4
; RV32-NEXT: bne a5, a1, .LBB29_3
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: beqz a4, .LBB29_4
; RV32-NEXT: .LBB29_2: # %entry
; RV32-NEXT: ret
; RV32-NEXT: .LBB29_3: # %entry
; RV32-NEXT: sltu a4, a5, a1
; RV32-NEXT: bnez a4, .LBB29_2
; RV32-NEXT: .LBB29_4: # %entry
; RV32-NEXT: mv a0, a2
; RV32-NEXT: mv a1, a3
; RV32-NEXT: ret
;
; RV64-LABEL: uaddo.select.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: add a2, a0, a1
; RV64-NEXT: bltu a2, a0, .LBB29_2
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB29_2: # %entry
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: uaddo.select.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: add a5, a1, a3
; RV32ZBA-NEXT: add a4, a0, a2
; RV32ZBA-NEXT: sltu a4, a4, a0
; RV32ZBA-NEXT: add a5, a5, a4
; RV32ZBA-NEXT: bne a5, a1, .LBB29_3
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: beqz a4, .LBB29_4
; RV32ZBA-NEXT: .LBB29_2: # %entry
; RV32ZBA-NEXT: ret
; RV32ZBA-NEXT: .LBB29_3: # %entry
; RV32ZBA-NEXT: sltu a4, a5, a1
; RV32ZBA-NEXT: bnez a4, .LBB29_2
; RV32ZBA-NEXT: .LBB29_4: # %entry
; RV32ZBA-NEXT: mv a0, a2
; RV32ZBA-NEXT: mv a1, a3
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: uaddo.select.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: add a2, a0, a1
; RV64ZBA-NEXT: bltu a2, a0, .LBB29_2
; RV64ZBA-NEXT: # %bb.1: # %entry
; RV64ZBA-NEXT: mv a0, a1
; RV64ZBA-NEXT: .LBB29_2: # %entry
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
%obit = extractvalue {i64, i1} %t, 1
%ret = select i1 %obit, i64 %v1, i64 %v2
ret i64 %ret
}
define i1 @uaddo.not.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: uaddo.not.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: add a3, a1, a3
; RV32-NEXT: add a2, a0, a2
; RV32-NEXT: sltu a0, a2, a0
; RV32-NEXT: add a2, a3, a0
; RV32-NEXT: beq a2, a1, .LBB30_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: sltu a0, a2, a1
; RV32-NEXT: .LBB30_2: # %entry
; RV32-NEXT: xori a0, a0, 1
; RV32-NEXT: ret
;
; RV64-LABEL: uaddo.not.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: add a1, a0, a1
; RV64-NEXT: sltu a0, a1, a0
; RV64-NEXT: xori a0, a0, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: uaddo.not.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: add a3, a1, a3
; RV32ZBA-NEXT: add a2, a0, a2
; RV32ZBA-NEXT: sltu a0, a2, a0
; RV32ZBA-NEXT: add a2, a3, a0
; RV32ZBA-NEXT: beq a2, a1, .LBB30_2
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: sltu a0, a2, a1
; RV32ZBA-NEXT: .LBB30_2: # %entry
; RV32ZBA-NEXT: xori a0, a0, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: uaddo.not.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: add a1, a0, a1
; RV64ZBA-NEXT: sltu a0, a1, a0
; RV64ZBA-NEXT: xori a0, a0, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
%obit = extractvalue {i64, i1} %t, 1
%ret = xor i1 %obit, true
ret i1 %ret
}
define i32 @ssubo.select.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: ssubo.select.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: sgtz a2, a1
; RV32-NEXT: sub a3, a0, a1
; RV32-NEXT: slt a3, a3, a0
; RV32-NEXT: bne a2, a3, .LBB31_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB31_2: # %entry
; RV32-NEXT: ret
;
; RV64-LABEL: ssubo.select.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a2, a1
; RV64-NEXT: sext.w a3, a0
; RV64-NEXT: sub a4, a3, a2
; RV64-NEXT: subw a2, a3, a2
; RV64-NEXT: bne a2, a4, .LBB31_2
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB31_2: # %entry
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: ssubo.select.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: sgtz a2, a1
; RV32ZBA-NEXT: sub a3, a0, a1
; RV32ZBA-NEXT: slt a3, a3, a0
; RV32ZBA-NEXT: bne a2, a3, .LBB31_2
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: mv a0, a1
; RV32ZBA-NEXT: .LBB31_2: # %entry
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: ssubo.select.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sext.w a2, a1
; RV64ZBA-NEXT: sext.w a3, a0
; RV64ZBA-NEXT: sub a4, a3, a2
; RV64ZBA-NEXT: subw a2, a3, a2
; RV64ZBA-NEXT: bne a2, a4, .LBB31_2
; RV64ZBA-NEXT: # %bb.1: # %entry
; RV64ZBA-NEXT: mv a0, a1
; RV64ZBA-NEXT: .LBB31_2: # %entry
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
%obit = extractvalue {i32, i1} %t, 1
%ret = select i1 %obit, i32 %v1, i32 %v2
ret i32 %ret
}
define i1 @ssubo.not.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: ssubo.not.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: sgtz a2, a1
; RV32-NEXT: sub a1, a0, a1
; RV32-NEXT: slt a0, a1, a0
; RV32-NEXT: xor a0, a2, a0
; RV32-NEXT: xori a0, a0, 1
; RV32-NEXT: ret
;
; RV64-LABEL: ssubo.not.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: sub a2, a0, a1
; RV64-NEXT: subw a0, a0, a1
; RV64-NEXT: xor a0, a0, a2
; RV64-NEXT: seqz a0, a0
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: ssubo.not.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: sgtz a2, a1
; RV32ZBA-NEXT: sub a1, a0, a1
; RV32ZBA-NEXT: slt a0, a1, a0
; RV32ZBA-NEXT: xor a0, a2, a0
; RV32ZBA-NEXT: xori a0, a0, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: ssubo.not.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sext.w a1, a1
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: sub a2, a0, a1
; RV64ZBA-NEXT: subw a0, a0, a1
; RV64ZBA-NEXT: xor a0, a0, a2
; RV64ZBA-NEXT: seqz a0, a0
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
%obit = extractvalue {i32, i1} %t, 1
%ret = xor i1 %obit, true
ret i1 %ret
}
define i64 @ssubo.select.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: ssubo.select.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: sltu a4, a0, a2
; RV32-NEXT: sub a5, a1, a3
; RV32-NEXT: sub a4, a5, a4
; RV32-NEXT: xor a4, a1, a4
; RV32-NEXT: xor a5, a1, a3
; RV32-NEXT: and a4, a5, a4
; RV32-NEXT: bltz a4, .LBB33_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: mv a0, a2
; RV32-NEXT: mv a1, a3
; RV32-NEXT: .LBB33_2: # %entry
; RV32-NEXT: ret
;
; RV64-LABEL: ssubo.select.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sgtz a2, a1
; RV64-NEXT: sub a3, a0, a1
; RV64-NEXT: slt a3, a3, a0
; RV64-NEXT: bne a2, a3, .LBB33_2
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB33_2: # %entry
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: ssubo.select.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: sltu a4, a0, a2
; RV32ZBA-NEXT: sub a5, a1, a3
; RV32ZBA-NEXT: sub a4, a5, a4
; RV32ZBA-NEXT: xor a4, a1, a4
; RV32ZBA-NEXT: xor a5, a1, a3
; RV32ZBA-NEXT: and a4, a5, a4
; RV32ZBA-NEXT: bltz a4, .LBB33_2
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: mv a0, a2
; RV32ZBA-NEXT: mv a1, a3
; RV32ZBA-NEXT: .LBB33_2: # %entry
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: ssubo.select.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sgtz a2, a1
; RV64ZBA-NEXT: sub a3, a0, a1
; RV64ZBA-NEXT: slt a3, a3, a0
; RV64ZBA-NEXT: bne a2, a3, .LBB33_2
; RV64ZBA-NEXT: # %bb.1: # %entry
; RV64ZBA-NEXT: mv a0, a1
; RV64ZBA-NEXT: .LBB33_2: # %entry
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
%obit = extractvalue {i64, i1} %t, 1
%ret = select i1 %obit, i64 %v1, i64 %v2
ret i64 %ret
}
define i1 @ssub.not.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: ssub.not.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: sltu a0, a0, a2
; RV32-NEXT: sub a2, a1, a3
; RV32-NEXT: sub a0, a2, a0
; RV32-NEXT: xor a0, a1, a0
; RV32-NEXT: xor a1, a1, a3
; RV32-NEXT: and a0, a1, a0
; RV32-NEXT: addi a1, zero, -1
; RV32-NEXT: slt a0, a1, a0
; RV32-NEXT: ret
;
; RV64-LABEL: ssub.not.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sgtz a2, a1
; RV64-NEXT: sub a1, a0, a1
; RV64-NEXT: slt a0, a1, a0
; RV64-NEXT: xor a0, a2, a0
; RV64-NEXT: xori a0, a0, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: ssub.not.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: sltu a0, a0, a2
; RV32ZBA-NEXT: sub a2, a1, a3
; RV32ZBA-NEXT: sub a0, a2, a0
; RV32ZBA-NEXT: xor a0, a1, a0
; RV32ZBA-NEXT: xor a1, a1, a3
; RV32ZBA-NEXT: and a0, a1, a0
; RV32ZBA-NEXT: addi a1, zero, -1
; RV32ZBA-NEXT: slt a0, a1, a0
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: ssub.not.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sgtz a2, a1
; RV64ZBA-NEXT: sub a1, a0, a1
; RV64ZBA-NEXT: slt a0, a1, a0
; RV64ZBA-NEXT: xor a0, a2, a0
; RV64ZBA-NEXT: xori a0, a0, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
%obit = extractvalue {i64, i1} %t, 1
%ret = xor i1 %obit, true
ret i1 %ret
}
define i32 @usubo.select.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: usubo.select.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: sub a2, a0, a1
; RV32-NEXT: bltu a0, a2, .LBB35_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB35_2: # %entry
; RV32-NEXT: ret
;
; RV64-LABEL: usubo.select.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: subw a2, a0, a1
; RV64-NEXT: sext.w a3, a0
; RV64-NEXT: bltu a3, a2, .LBB35_2
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB35_2: # %entry
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: usubo.select.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: sub a2, a0, a1
; RV32ZBA-NEXT: bltu a0, a2, .LBB35_2
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: mv a0, a1
; RV32ZBA-NEXT: .LBB35_2: # %entry
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: usubo.select.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: subw a2, a0, a1
; RV64ZBA-NEXT: sext.w a3, a0
; RV64ZBA-NEXT: bltu a3, a2, .LBB35_2
; RV64ZBA-NEXT: # %bb.1: # %entry
; RV64ZBA-NEXT: mv a0, a1
; RV64ZBA-NEXT: .LBB35_2: # %entry
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
%obit = extractvalue {i32, i1} %t, 1
%ret = select i1 %obit, i32 %v1, i32 %v2
ret i32 %ret
}
define i1 @usubo.not.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: usubo.not.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: sub a1, a0, a1
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: xori a0, a0, 1
; RV32-NEXT: ret
;
; RV64-LABEL: usubo.not.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: subw a1, a0, a1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: xori a0, a0, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: usubo.not.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: sub a1, a0, a1
; RV32ZBA-NEXT: sltu a0, a0, a1
; RV32ZBA-NEXT: xori a0, a0, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: usubo.not.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: subw a1, a0, a1
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: sltu a0, a0, a1
; RV64ZBA-NEXT: xori a0, a0, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
%obit = extractvalue {i32, i1} %t, 1
%ret = xor i1 %obit, true
ret i1 %ret
}
define i64 @usubo.select.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: usubo.select.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: sltu a4, a0, a2
; RV32-NEXT: sub a5, a1, a3
; RV32-NEXT: sub a4, a5, a4
; RV32-NEXT: beq a4, a1, .LBB37_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: sltu a4, a1, a4
; RV32-NEXT: beqz a4, .LBB37_3
; RV32-NEXT: j .LBB37_4
; RV32-NEXT: .LBB37_2:
; RV32-NEXT: sub a4, a0, a2
; RV32-NEXT: sltu a4, a0, a4
; RV32-NEXT: bnez a4, .LBB37_4
; RV32-NEXT: .LBB37_3: # %entry
; RV32-NEXT: mv a0, a2
; RV32-NEXT: mv a1, a3
; RV32-NEXT: .LBB37_4: # %entry
; RV32-NEXT: ret
;
; RV64-LABEL: usubo.select.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sub a2, a0, a1
; RV64-NEXT: bltu a0, a2, .LBB37_2
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB37_2: # %entry
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: usubo.select.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: sltu a4, a0, a2
; RV32ZBA-NEXT: sub a5, a1, a3
; RV32ZBA-NEXT: sub a4, a5, a4
; RV32ZBA-NEXT: beq a4, a1, .LBB37_2
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: sltu a4, a1, a4
; RV32ZBA-NEXT: beqz a4, .LBB37_3
; RV32ZBA-NEXT: j .LBB37_4
; RV32ZBA-NEXT: .LBB37_2:
; RV32ZBA-NEXT: sub a4, a0, a2
; RV32ZBA-NEXT: sltu a4, a0, a4
; RV32ZBA-NEXT: bnez a4, .LBB37_4
; RV32ZBA-NEXT: .LBB37_3: # %entry
; RV32ZBA-NEXT: mv a0, a2
; RV32ZBA-NEXT: mv a1, a3
; RV32ZBA-NEXT: .LBB37_4: # %entry
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: usubo.select.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sub a2, a0, a1
; RV64ZBA-NEXT: bltu a0, a2, .LBB37_2
; RV64ZBA-NEXT: # %bb.1: # %entry
; RV64ZBA-NEXT: mv a0, a1
; RV64ZBA-NEXT: .LBB37_2: # %entry
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
%obit = extractvalue {i64, i1} %t, 1
%ret = select i1 %obit, i64 %v1, i64 %v2
ret i64 %ret
}
define i1 @usubo.not.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: usubo.not.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: sltu a4, a0, a2
; RV32-NEXT: sub a3, a1, a3
; RV32-NEXT: sub a3, a3, a4
; RV32-NEXT: beq a3, a1, .LBB38_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: sltu a0, a1, a3
; RV32-NEXT: xori a0, a0, 1
; RV32-NEXT: ret
; RV32-NEXT: .LBB38_2:
; RV32-NEXT: sub a1, a0, a2
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: xori a0, a0, 1
; RV32-NEXT: ret
;
; RV64-LABEL: usubo.not.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sub a1, a0, a1
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: xori a0, a0, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: usubo.not.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: sltu a4, a0, a2
; RV32ZBA-NEXT: sub a3, a1, a3
; RV32ZBA-NEXT: sub a3, a3, a4
; RV32ZBA-NEXT: beq a3, a1, .LBB38_2
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: sltu a0, a1, a3
; RV32ZBA-NEXT: xori a0, a0, 1
; RV32ZBA-NEXT: ret
; RV32ZBA-NEXT: .LBB38_2:
; RV32ZBA-NEXT: sub a1, a0, a2
; RV32ZBA-NEXT: sltu a0, a0, a1
; RV32ZBA-NEXT: xori a0, a0, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: usubo.not.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sub a1, a0, a1
; RV64ZBA-NEXT: sltu a0, a0, a1
; RV64ZBA-NEXT: xori a0, a0, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
%obit = extractvalue {i64, i1} %t, 1
%ret = xor i1 %obit, true
ret i1 %ret
}
define i32 @smulo.select.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: smulo.select.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: mulh a2, a0, a1
; RV32-NEXT: mul a3, a0, a1
; RV32-NEXT: srai a3, a3, 31
; RV32-NEXT: bne a2, a3, .LBB39_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB39_2: # %entry
; RV32-NEXT: ret
;
; RV64-LABEL: smulo.select.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a2, a1
; RV64-NEXT: sext.w a3, a0
; RV64-NEXT: mul a4, a3, a2
; RV64-NEXT: mulw a2, a3, a2
; RV64-NEXT: bne a2, a4, .LBB39_2
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB39_2: # %entry
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: smulo.select.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: mulh a2, a0, a1
; RV32ZBA-NEXT: mul a3, a0, a1
; RV32ZBA-NEXT: srai a3, a3, 31
; RV32ZBA-NEXT: bne a2, a3, .LBB39_2
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: mv a0, a1
; RV32ZBA-NEXT: .LBB39_2: # %entry
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: smulo.select.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sext.w a2, a1
; RV64ZBA-NEXT: sext.w a3, a0
; RV64ZBA-NEXT: mul a4, a3, a2
; RV64ZBA-NEXT: mulw a2, a3, a2
; RV64ZBA-NEXT: bne a2, a4, .LBB39_2
; RV64ZBA-NEXT: # %bb.1: # %entry
; RV64ZBA-NEXT: mv a0, a1
; RV64ZBA-NEXT: .LBB39_2: # %entry
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
%obit = extractvalue {i32, i1} %t, 1
%ret = select i1 %obit, i32 %v1, i32 %v2
ret i32 %ret
}
define i1 @smulo.not.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: smulo.not.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: mulh a2, a0, a1
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: srai a0, a0, 31
; RV32-NEXT: xor a0, a2, a0
; RV32-NEXT: seqz a0, a0
; RV32-NEXT: ret
;
; RV64-LABEL: smulo.not.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: mul a2, a0, a1
; RV64-NEXT: mulw a0, a0, a1
; RV64-NEXT: xor a0, a0, a2
; RV64-NEXT: seqz a0, a0
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: smulo.not.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: mulh a2, a0, a1
; RV32ZBA-NEXT: mul a0, a0, a1
; RV32ZBA-NEXT: srai a0, a0, 31
; RV32ZBA-NEXT: xor a0, a2, a0
; RV32ZBA-NEXT: seqz a0, a0
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: smulo.not.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sext.w a1, a1
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: mul a2, a0, a1
; RV64ZBA-NEXT: mulw a0, a0, a1
; RV64ZBA-NEXT: xor a0, a0, a2
; RV64ZBA-NEXT: seqz a0, a0
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
%obit = extractvalue {i32, i1} %t, 1
%ret = xor i1 %obit, true
ret i1 %ret
}
define i64 @smulo.select.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: smulo.select.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -32
; RV32-NEXT: .cfi_def_cfa_offset 32
; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: .cfi_offset s1, -12
; RV32-NEXT: .cfi_offset s2, -16
; RV32-NEXT: .cfi_offset s3, -20
; RV32-NEXT: mv s2, a3
; RV32-NEXT: mv s3, a2
; RV32-NEXT: mv s0, a1
; RV32-NEXT: mv s1, a0
; RV32-NEXT: sw zero, 8(sp)
; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: call __mulodi4@plt
; RV32-NEXT: lw a0, 8(sp)
; RV32-NEXT: bnez a0, .LBB41_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: mv s1, s3
; RV32-NEXT: mv s0, s2
; RV32-NEXT: .LBB41_2: # %entry
; RV32-NEXT: mv a0, s1
; RV32-NEXT: mv a1, s0
; RV32-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 32
; RV32-NEXT: ret
;
; RV64-LABEL: smulo.select.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: mulh a2, a0, a1
; RV64-NEXT: mul a3, a0, a1
; RV64-NEXT: srai a3, a3, 63
; RV64-NEXT: bne a2, a3, .LBB41_2
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB41_2: # %entry
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: smulo.select.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: addi sp, sp, -32
; RV32ZBA-NEXT: .cfi_def_cfa_offset 32
; RV32ZBA-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32ZBA-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32ZBA-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32ZBA-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32ZBA-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32ZBA-NEXT: .cfi_offset ra, -4
; RV32ZBA-NEXT: .cfi_offset s0, -8
; RV32ZBA-NEXT: .cfi_offset s1, -12
; RV32ZBA-NEXT: .cfi_offset s2, -16
; RV32ZBA-NEXT: .cfi_offset s3, -20
; RV32ZBA-NEXT: mv s2, a3
; RV32ZBA-NEXT: mv s3, a2
; RV32ZBA-NEXT: mv s0, a1
; RV32ZBA-NEXT: mv s1, a0
; RV32ZBA-NEXT: sw zero, 8(sp)
; RV32ZBA-NEXT: addi a4, sp, 8
; RV32ZBA-NEXT: call __mulodi4@plt
; RV32ZBA-NEXT: lw a0, 8(sp)
; RV32ZBA-NEXT: bnez a0, .LBB41_2
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: mv s1, s3
; RV32ZBA-NEXT: mv s0, s2
; RV32ZBA-NEXT: .LBB41_2: # %entry
; RV32ZBA-NEXT: mv a0, s1
; RV32ZBA-NEXT: mv a1, s0
; RV32ZBA-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
; RV32ZBA-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32ZBA-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32ZBA-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32ZBA-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32ZBA-NEXT: addi sp, sp, 32
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: smulo.select.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: mulh a2, a0, a1
; RV64ZBA-NEXT: mul a3, a0, a1
; RV64ZBA-NEXT: srai a3, a3, 63
; RV64ZBA-NEXT: bne a2, a3, .LBB41_2
; RV64ZBA-NEXT: # %bb.1: # %entry
; RV64ZBA-NEXT: mv a0, a1
; RV64ZBA-NEXT: .LBB41_2: # %entry
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
%obit = extractvalue {i64, i1} %t, 1
%ret = select i1 %obit, i64 %v1, i64 %v2
ret i64 %ret
}
define i1 @smulo.not.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: smulo.not.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: sw zero, 8(sp)
; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: call __mulodi4@plt
; RV32-NEXT: lw a0, 8(sp)
; RV32-NEXT: seqz a0, a0
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: smulo.not.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: mulh a2, a0, a1
; RV64-NEXT: mul a0, a0, a1
; RV64-NEXT: srai a0, a0, 63
; RV64-NEXT: xor a0, a2, a0
; RV64-NEXT: seqz a0, a0
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: smulo.not.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: addi sp, sp, -16
; RV32ZBA-NEXT: .cfi_def_cfa_offset 16
; RV32ZBA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ZBA-NEXT: .cfi_offset ra, -4
; RV32ZBA-NEXT: sw zero, 8(sp)
; RV32ZBA-NEXT: addi a4, sp, 8
; RV32ZBA-NEXT: call __mulodi4@plt
; RV32ZBA-NEXT: lw a0, 8(sp)
; RV32ZBA-NEXT: seqz a0, a0
; RV32ZBA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ZBA-NEXT: addi sp, sp, 16
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: smulo.not.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: mulh a2, a0, a1
; RV64ZBA-NEXT: mul a0, a0, a1
; RV64ZBA-NEXT: srai a0, a0, 63
; RV64ZBA-NEXT: xor a0, a2, a0
; RV64ZBA-NEXT: seqz a0, a0
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
%obit = extractvalue {i64, i1} %t, 1
%ret = xor i1 %obit, true
ret i1 %ret
}
define i32 @umulo.select.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: umulo.select.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: mulhu a2, a0, a1
; RV32-NEXT: bnez a2, .LBB43_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB43_2: # %entry
; RV32-NEXT: ret
;
; RV64-LABEL: umulo.select.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: slli a2, a1, 32
; RV64-NEXT: slli a3, a0, 32
; RV64-NEXT: mulhu a2, a3, a2
; RV64-NEXT: srli a2, a2, 32
; RV64-NEXT: bnez a2, .LBB43_2
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB43_2: # %entry
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: umulo.select.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: mulhu a2, a0, a1
; RV32ZBA-NEXT: bnez a2, .LBB43_2
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: mv a0, a1
; RV32ZBA-NEXT: .LBB43_2: # %entry
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: umulo.select.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: zext.w a2, a1
; RV64ZBA-NEXT: zext.w a3, a0
; RV64ZBA-NEXT: mul a2, a3, a2
; RV64ZBA-NEXT: srli a2, a2, 32
; RV64ZBA-NEXT: bnez a2, .LBB43_2
; RV64ZBA-NEXT: # %bb.1: # %entry
; RV64ZBA-NEXT: mv a0, a1
; RV64ZBA-NEXT: .LBB43_2: # %entry
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
%obit = extractvalue {i32, i1} %t, 1
%ret = select i1 %obit, i32 %v1, i32 %v2
ret i32 %ret
}
define i1 @umulo.not.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: umulo.not.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: mulhu a0, a0, a1
; RV32-NEXT: seqz a0, a0
; RV32-NEXT: ret
;
; RV64-LABEL: umulo.not.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: slli a1, a1, 32
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: mulhu a0, a0, a1
; RV64-NEXT: srli a0, a0, 32
; RV64-NEXT: seqz a0, a0
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: umulo.not.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: mulhu a0, a0, a1
; RV32ZBA-NEXT: seqz a0, a0
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: umulo.not.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: zext.w a1, a1
; RV64ZBA-NEXT: zext.w a0, a0
; RV64ZBA-NEXT: mul a0, a0, a1
; RV64ZBA-NEXT: srli a0, a0, 32
; RV64ZBA-NEXT: seqz a0, a0
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
%obit = extractvalue {i32, i1} %t, 1
%ret = xor i1 %obit, true
ret i1 %ret
}
define i64 @umulo.select.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: umulo.select.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: mul a4, a3, a0
; RV32-NEXT: mul a5, a1, a2
; RV32-NEXT: add a4, a5, a4
; RV32-NEXT: mulhu a5, a0, a2
; RV32-NEXT: add a4, a5, a4
; RV32-NEXT: sltu a6, a4, a5
; RV32-NEXT: snez a5, a3
; RV32-NEXT: snez a4, a1
; RV32-NEXT: and a4, a4, a5
; RV32-NEXT: mulhu a5, a1, a2
; RV32-NEXT: snez a5, a5
; RV32-NEXT: or a4, a4, a5
; RV32-NEXT: mulhu a5, a3, a0
; RV32-NEXT: snez a5, a5
; RV32-NEXT: or a4, a4, a5
; RV32-NEXT: or a4, a4, a6
; RV32-NEXT: bnez a4, .LBB45_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: mv a0, a2
; RV32-NEXT: mv a1, a3
; RV32-NEXT: .LBB45_2: # %entry
; RV32-NEXT: ret
;
; RV64-LABEL: umulo.select.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: mulhu a2, a0, a1
; RV64-NEXT: bnez a2, .LBB45_2
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB45_2: # %entry
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: umulo.select.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: mul a4, a3, a0
; RV32ZBA-NEXT: mul a5, a1, a2
; RV32ZBA-NEXT: add a4, a5, a4
; RV32ZBA-NEXT: mulhu a5, a0, a2
; RV32ZBA-NEXT: add a4, a5, a4
; RV32ZBA-NEXT: sltu a6, a4, a5
; RV32ZBA-NEXT: snez a5, a3
; RV32ZBA-NEXT: snez a4, a1
; RV32ZBA-NEXT: and a4, a4, a5
; RV32ZBA-NEXT: mulhu a5, a1, a2
; RV32ZBA-NEXT: snez a5, a5
; RV32ZBA-NEXT: or a4, a4, a5
; RV32ZBA-NEXT: mulhu a5, a3, a0
; RV32ZBA-NEXT: snez a5, a5
; RV32ZBA-NEXT: or a4, a4, a5
; RV32ZBA-NEXT: or a4, a4, a6
; RV32ZBA-NEXT: bnez a4, .LBB45_2
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: mv a0, a2
; RV32ZBA-NEXT: mv a1, a3
; RV32ZBA-NEXT: .LBB45_2: # %entry
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: umulo.select.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: mulhu a2, a0, a1
; RV64ZBA-NEXT: bnez a2, .LBB45_2
; RV64ZBA-NEXT: # %bb.1: # %entry
; RV64ZBA-NEXT: mv a0, a1
; RV64ZBA-NEXT: .LBB45_2: # %entry
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
%obit = extractvalue {i64, i1} %t, 1
%ret = select i1 %obit, i64 %v1, i64 %v2
ret i64 %ret
}
define i1 @umulo.not.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: umulo.not.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: mul a4, a3, a0
; RV32-NEXT: mul a5, a1, a2
; RV32-NEXT: add a4, a5, a4
; RV32-NEXT: mulhu a5, a0, a2
; RV32-NEXT: add a4, a5, a4
; RV32-NEXT: sltu a6, a4, a5
; RV32-NEXT: snez a5, a3
; RV32-NEXT: snez a4, a1
; RV32-NEXT: and a4, a4, a5
; RV32-NEXT: mulhu a1, a1, a2
; RV32-NEXT: snez a1, a1
; RV32-NEXT: or a1, a4, a1
; RV32-NEXT: mulhu a0, a3, a0
; RV32-NEXT: snez a0, a0
; RV32-NEXT: or a0, a1, a0
; RV32-NEXT: or a0, a0, a6
; RV32-NEXT: xori a0, a0, 1
; RV32-NEXT: ret
;
; RV64-LABEL: umulo.not.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: mulhu a0, a0, a1
; RV64-NEXT: seqz a0, a0
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: umulo.not.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: mul a4, a3, a0
; RV32ZBA-NEXT: mul a5, a1, a2
; RV32ZBA-NEXT: add a4, a5, a4
; RV32ZBA-NEXT: mulhu a5, a0, a2
; RV32ZBA-NEXT: add a4, a5, a4
; RV32ZBA-NEXT: sltu a6, a4, a5
; RV32ZBA-NEXT: snez a5, a3
; RV32ZBA-NEXT: snez a4, a1
; RV32ZBA-NEXT: and a4, a4, a5
; RV32ZBA-NEXT: mulhu a1, a1, a2
; RV32ZBA-NEXT: snez a1, a1
; RV32ZBA-NEXT: or a1, a4, a1
; RV32ZBA-NEXT: mulhu a0, a3, a0
; RV32ZBA-NEXT: snez a0, a0
; RV32ZBA-NEXT: or a0, a1, a0
; RV32ZBA-NEXT: or a0, a0, a6
; RV32ZBA-NEXT: xori a0, a0, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: umulo.not.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: mulhu a0, a0, a1
; RV64ZBA-NEXT: seqz a0, a0
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
%obit = extractvalue {i64, i1} %t, 1
%ret = xor i1 %obit, true
ret i1 %ret
}
;
; Check the use of the overflow bit in combination with a branch instruction.
;
define zeroext i1 @saddo.br.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: saddo.br.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: add a2, a0, a1
; RV32-NEXT: slt a0, a2, a0
; RV32-NEXT: slti a1, a1, 0
; RV32-NEXT: beq a1, a0, .LBB47_2
; RV32-NEXT: # %bb.1: # %overflow
; RV32-NEXT: mv a0, zero
; RV32-NEXT: ret
; RV32-NEXT: .LBB47_2: # %continue
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: ret
;
; RV64-LABEL: saddo.br.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: add a2, a0, a1
; RV64-NEXT: addw a0, a0, a1
; RV64-NEXT: beq a0, a2, .LBB47_2
; RV64-NEXT: # %bb.1: # %overflow
; RV64-NEXT: mv a0, zero
; RV64-NEXT: ret
; RV64-NEXT: .LBB47_2: # %continue
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: saddo.br.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: add a2, a0, a1
; RV32ZBA-NEXT: slt a0, a2, a0
; RV32ZBA-NEXT: slti a1, a1, 0
; RV32ZBA-NEXT: beq a1, a0, .LBB47_2
; RV32ZBA-NEXT: # %bb.1: # %overflow
; RV32ZBA-NEXT: mv a0, zero
; RV32ZBA-NEXT: ret
; RV32ZBA-NEXT: .LBB47_2: # %continue
; RV32ZBA-NEXT: addi a0, zero, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: saddo.br.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sext.w a1, a1
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: add a2, a0, a1
; RV64ZBA-NEXT: addw a0, a0, a1
; RV64ZBA-NEXT: beq a0, a2, .LBB47_2
; RV64ZBA-NEXT: # %bb.1: # %overflow
; RV64ZBA-NEXT: mv a0, zero
; RV64ZBA-NEXT: ret
; RV64ZBA-NEXT: .LBB47_2: # %continue
; RV64ZBA-NEXT: addi a0, zero, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
br i1 %obit, label %overflow, label %continue
overflow:
ret i1 false
continue:
ret i1 true
}
define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: saddo.br.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: add a4, a1, a3
; RV32-NEXT: add a2, a0, a2
; RV32-NEXT: sltu a0, a2, a0
; RV32-NEXT: add a0, a4, a0
; RV32-NEXT: xor a0, a1, a0
; RV32-NEXT: xor a1, a1, a3
; RV32-NEXT: not a1, a1
; RV32-NEXT: and a0, a1, a0
; RV32-NEXT: bgez a0, .LBB48_2
; RV32-NEXT: # %bb.1: # %overflow
; RV32-NEXT: mv a0, zero
; RV32-NEXT: ret
; RV32-NEXT: .LBB48_2: # %continue
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: ret
;
; RV64-LABEL: saddo.br.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: add a2, a0, a1
; RV64-NEXT: slt a0, a2, a0
; RV64-NEXT: slti a1, a1, 0
; RV64-NEXT: beq a1, a0, .LBB48_2
; RV64-NEXT: # %bb.1: # %overflow
; RV64-NEXT: mv a0, zero
; RV64-NEXT: ret
; RV64-NEXT: .LBB48_2: # %continue
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: saddo.br.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: add a4, a1, a3
; RV32ZBA-NEXT: add a2, a0, a2
; RV32ZBA-NEXT: sltu a0, a2, a0
; RV32ZBA-NEXT: add a0, a4, a0
; RV32ZBA-NEXT: xor a0, a1, a0
; RV32ZBA-NEXT: xor a1, a1, a3
; RV32ZBA-NEXT: not a1, a1
; RV32ZBA-NEXT: and a0, a1, a0
; RV32ZBA-NEXT: bgez a0, .LBB48_2
; RV32ZBA-NEXT: # %bb.1: # %overflow
; RV32ZBA-NEXT: mv a0, zero
; RV32ZBA-NEXT: ret
; RV32ZBA-NEXT: .LBB48_2: # %continue
; RV32ZBA-NEXT: addi a0, zero, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: saddo.br.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: add a2, a0, a1
; RV64ZBA-NEXT: slt a0, a2, a0
; RV64ZBA-NEXT: slti a1, a1, 0
; RV64ZBA-NEXT: beq a1, a0, .LBB48_2
; RV64ZBA-NEXT: # %bb.1: # %overflow
; RV64ZBA-NEXT: mv a0, zero
; RV64ZBA-NEXT: ret
; RV64ZBA-NEXT: .LBB48_2: # %continue
; RV64ZBA-NEXT: addi a0, zero, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
br i1 %obit, label %overflow, label %continue
overflow:
ret i1 false
continue:
ret i1 true
}
define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: uaddo.br.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: add a1, a0, a1
; RV32-NEXT: bgeu a1, a0, .LBB49_2
; RV32-NEXT: # %bb.1: # %overflow
; RV32-NEXT: mv a0, zero
; RV32-NEXT: ret
; RV32-NEXT: .LBB49_2: # %continue
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: ret
;
; RV64-LABEL: uaddo.br.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addw a1, a0, a1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: bgeu a1, a0, .LBB49_2
; RV64-NEXT: # %bb.1: # %overflow
; RV64-NEXT: mv a0, zero
; RV64-NEXT: ret
; RV64-NEXT: .LBB49_2: # %continue
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: uaddo.br.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: add a1, a0, a1
; RV32ZBA-NEXT: bgeu a1, a0, .LBB49_2
; RV32ZBA-NEXT: # %bb.1: # %overflow
; RV32ZBA-NEXT: mv a0, zero
; RV32ZBA-NEXT: ret
; RV32ZBA-NEXT: .LBB49_2: # %continue
; RV32ZBA-NEXT: addi a0, zero, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: uaddo.br.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: addw a1, a0, a1
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: bgeu a1, a0, .LBB49_2
; RV64ZBA-NEXT: # %bb.1: # %overflow
; RV64ZBA-NEXT: mv a0, zero
; RV64ZBA-NEXT: ret
; RV64ZBA-NEXT: .LBB49_2: # %continue
; RV64ZBA-NEXT: addi a0, zero, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
br i1 %obit, label %overflow, label %continue
overflow:
ret i1 false
continue:
ret i1 true
}
define zeroext i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: uaddo.br.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: add a3, a1, a3
; RV32-NEXT: add a2, a0, a2
; RV32-NEXT: sltu a0, a2, a0
; RV32-NEXT: add a2, a3, a0
; RV32-NEXT: beq a2, a1, .LBB50_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: sltu a0, a2, a1
; RV32-NEXT: .LBB50_2: # %entry
; RV32-NEXT: beqz a0, .LBB50_4
; RV32-NEXT: # %bb.3: # %overflow
; RV32-NEXT: mv a0, zero
; RV32-NEXT: ret
; RV32-NEXT: .LBB50_4: # %continue
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: ret
;
; RV64-LABEL: uaddo.br.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: add a1, a0, a1
; RV64-NEXT: bgeu a1, a0, .LBB50_2
; RV64-NEXT: # %bb.1: # %overflow
; RV64-NEXT: mv a0, zero
; RV64-NEXT: ret
; RV64-NEXT: .LBB50_2: # %continue
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: uaddo.br.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: add a3, a1, a3
; RV32ZBA-NEXT: add a2, a0, a2
; RV32ZBA-NEXT: sltu a0, a2, a0
; RV32ZBA-NEXT: add a2, a3, a0
; RV32ZBA-NEXT: beq a2, a1, .LBB50_2
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: sltu a0, a2, a1
; RV32ZBA-NEXT: .LBB50_2: # %entry
; RV32ZBA-NEXT: beqz a0, .LBB50_4
; RV32ZBA-NEXT: # %bb.3: # %overflow
; RV32ZBA-NEXT: mv a0, zero
; RV32ZBA-NEXT: ret
; RV32ZBA-NEXT: .LBB50_4: # %continue
; RV32ZBA-NEXT: addi a0, zero, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: uaddo.br.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: add a1, a0, a1
; RV64ZBA-NEXT: bgeu a1, a0, .LBB50_2
; RV64ZBA-NEXT: # %bb.1: # %overflow
; RV64ZBA-NEXT: mv a0, zero
; RV64ZBA-NEXT: ret
; RV64ZBA-NEXT: .LBB50_2: # %continue
; RV64ZBA-NEXT: addi a0, zero, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
br i1 %obit, label %overflow, label %continue
overflow:
ret i1 false
continue:
ret i1 true
}
define zeroext i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: ssubo.br.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: sgtz a2, a1
; RV32-NEXT: sub a1, a0, a1
; RV32-NEXT: slt a0, a1, a0
; RV32-NEXT: beq a2, a0, .LBB51_2
; RV32-NEXT: # %bb.1: # %overflow
; RV32-NEXT: mv a0, zero
; RV32-NEXT: ret
; RV32-NEXT: .LBB51_2: # %continue
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: ret
;
; RV64-LABEL: ssubo.br.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: sub a2, a0, a1
; RV64-NEXT: subw a0, a0, a1
; RV64-NEXT: beq a0, a2, .LBB51_2
; RV64-NEXT: # %bb.1: # %overflow
; RV64-NEXT: mv a0, zero
; RV64-NEXT: ret
; RV64-NEXT: .LBB51_2: # %continue
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: ssubo.br.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: sgtz a2, a1
; RV32ZBA-NEXT: sub a1, a0, a1
; RV32ZBA-NEXT: slt a0, a1, a0
; RV32ZBA-NEXT: beq a2, a0, .LBB51_2
; RV32ZBA-NEXT: # %bb.1: # %overflow
; RV32ZBA-NEXT: mv a0, zero
; RV32ZBA-NEXT: ret
; RV32ZBA-NEXT: .LBB51_2: # %continue
; RV32ZBA-NEXT: addi a0, zero, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: ssubo.br.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sext.w a1, a1
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: sub a2, a0, a1
; RV64ZBA-NEXT: subw a0, a0, a1
; RV64ZBA-NEXT: beq a0, a2, .LBB51_2
; RV64ZBA-NEXT: # %bb.1: # %overflow
; RV64ZBA-NEXT: mv a0, zero
; RV64ZBA-NEXT: ret
; RV64ZBA-NEXT: .LBB51_2: # %continue
; RV64ZBA-NEXT: addi a0, zero, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
br i1 %obit, label %overflow, label %continue
overflow:
ret i1 false
continue:
ret i1 true
}
define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: ssubo.br.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: sltu a0, a0, a2
; RV32-NEXT: sub a2, a1, a3
; RV32-NEXT: sub a0, a2, a0
; RV32-NEXT: xor a0, a1, a0
; RV32-NEXT: xor a1, a1, a3
; RV32-NEXT: and a0, a1, a0
; RV32-NEXT: bgez a0, .LBB52_2
; RV32-NEXT: # %bb.1: # %overflow
; RV32-NEXT: mv a0, zero
; RV32-NEXT: ret
; RV32-NEXT: .LBB52_2: # %continue
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: ret
;
; RV64-LABEL: ssubo.br.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sgtz a2, a1
; RV64-NEXT: sub a1, a0, a1
; RV64-NEXT: slt a0, a1, a0
; RV64-NEXT: beq a2, a0, .LBB52_2
; RV64-NEXT: # %bb.1: # %overflow
; RV64-NEXT: mv a0, zero
; RV64-NEXT: ret
; RV64-NEXT: .LBB52_2: # %continue
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: ssubo.br.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: sltu a0, a0, a2
; RV32ZBA-NEXT: sub a2, a1, a3
; RV32ZBA-NEXT: sub a0, a2, a0
; RV32ZBA-NEXT: xor a0, a1, a0
; RV32ZBA-NEXT: xor a1, a1, a3
; RV32ZBA-NEXT: and a0, a1, a0
; RV32ZBA-NEXT: bgez a0, .LBB52_2
; RV32ZBA-NEXT: # %bb.1: # %overflow
; RV32ZBA-NEXT: mv a0, zero
; RV32ZBA-NEXT: ret
; RV32ZBA-NEXT: .LBB52_2: # %continue
; RV32ZBA-NEXT: addi a0, zero, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: ssubo.br.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sgtz a2, a1
; RV64ZBA-NEXT: sub a1, a0, a1
; RV64ZBA-NEXT: slt a0, a1, a0
; RV64ZBA-NEXT: beq a2, a0, .LBB52_2
; RV64ZBA-NEXT: # %bb.1: # %overflow
; RV64ZBA-NEXT: mv a0, zero
; RV64ZBA-NEXT: ret
; RV64ZBA-NEXT: .LBB52_2: # %continue
; RV64ZBA-NEXT: addi a0, zero, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
br i1 %obit, label %overflow, label %continue
overflow:
ret i1 false
continue:
ret i1 true
}
define zeroext i1 @usubo.br.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: usubo.br.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: sub a1, a0, a1
; RV32-NEXT: bgeu a0, a1, .LBB53_2
; RV32-NEXT: # %bb.1: # %overflow
; RV32-NEXT: mv a0, zero
; RV32-NEXT: ret
; RV32-NEXT: .LBB53_2: # %continue
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: ret
;
; RV64-LABEL: usubo.br.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: subw a1, a0, a1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: bgeu a0, a1, .LBB53_2
; RV64-NEXT: # %bb.1: # %overflow
; RV64-NEXT: mv a0, zero
; RV64-NEXT: ret
; RV64-NEXT: .LBB53_2: # %continue
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: usubo.br.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: sub a1, a0, a1
; RV32ZBA-NEXT: bgeu a0, a1, .LBB53_2
; RV32ZBA-NEXT: # %bb.1: # %overflow
; RV32ZBA-NEXT: mv a0, zero
; RV32ZBA-NEXT: ret
; RV32ZBA-NEXT: .LBB53_2: # %continue
; RV32ZBA-NEXT: addi a0, zero, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: usubo.br.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: subw a1, a0, a1
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: bgeu a0, a1, .LBB53_2
; RV64ZBA-NEXT: # %bb.1: # %overflow
; RV64ZBA-NEXT: mv a0, zero
; RV64ZBA-NEXT: ret
; RV64ZBA-NEXT: .LBB53_2: # %continue
; RV64ZBA-NEXT: addi a0, zero, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
br i1 %obit, label %overflow, label %continue
overflow:
ret i1 false
continue:
ret i1 true
}
define zeroext i1 @usubo.br.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: usubo.br.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: sltu a4, a0, a2
; RV32-NEXT: sub a3, a1, a3
; RV32-NEXT: sub a3, a3, a4
; RV32-NEXT: beq a3, a1, .LBB54_3
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: sltu a0, a1, a3
; RV32-NEXT: bnez a0, .LBB54_4
; RV32-NEXT: .LBB54_2: # %continue
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: ret
; RV32-NEXT: .LBB54_3:
; RV32-NEXT: sub a1, a0, a2
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: beqz a0, .LBB54_2
; RV32-NEXT: .LBB54_4: # %overflow
; RV32-NEXT: mv a0, zero
; RV32-NEXT: ret
;
; RV64-LABEL: usubo.br.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sub a1, a0, a1
; RV64-NEXT: bgeu a0, a1, .LBB54_2
; RV64-NEXT: # %bb.1: # %overflow
; RV64-NEXT: mv a0, zero
; RV64-NEXT: ret
; RV64-NEXT: .LBB54_2: # %continue
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: usubo.br.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: sltu a4, a0, a2
; RV32ZBA-NEXT: sub a3, a1, a3
; RV32ZBA-NEXT: sub a3, a3, a4
; RV32ZBA-NEXT: beq a3, a1, .LBB54_3
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: sltu a0, a1, a3
; RV32ZBA-NEXT: bnez a0, .LBB54_4
; RV32ZBA-NEXT: .LBB54_2: # %continue
; RV32ZBA-NEXT: addi a0, zero, 1
; RV32ZBA-NEXT: ret
; RV32ZBA-NEXT: .LBB54_3:
; RV32ZBA-NEXT: sub a1, a0, a2
; RV32ZBA-NEXT: sltu a0, a0, a1
; RV32ZBA-NEXT: beqz a0, .LBB54_2
; RV32ZBA-NEXT: .LBB54_4: # %overflow
; RV32ZBA-NEXT: mv a0, zero
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: usubo.br.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sub a1, a0, a1
; RV64ZBA-NEXT: bgeu a0, a1, .LBB54_2
; RV64ZBA-NEXT: # %bb.1: # %overflow
; RV64ZBA-NEXT: mv a0, zero
; RV64ZBA-NEXT: ret
; RV64ZBA-NEXT: .LBB54_2: # %continue
; RV64ZBA-NEXT: addi a0, zero, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
br i1 %obit, label %overflow, label %continue
overflow:
ret i1 false
continue:
ret i1 true
}
define zeroext i1 @smulo.br.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: smulo.br.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: mulh a2, a0, a1
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: srai a0, a0, 31
; RV32-NEXT: beq a2, a0, .LBB55_2
; RV32-NEXT: # %bb.1: # %overflow
; RV32-NEXT: mv a0, zero
; RV32-NEXT: ret
; RV32-NEXT: .LBB55_2: # %continue
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: ret
;
; RV64-LABEL: smulo.br.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: mul a2, a0, a1
; RV64-NEXT: mulw a0, a0, a1
; RV64-NEXT: beq a0, a2, .LBB55_2
; RV64-NEXT: # %bb.1: # %overflow
; RV64-NEXT: mv a0, zero
; RV64-NEXT: ret
; RV64-NEXT: .LBB55_2: # %continue
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: smulo.br.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: mulh a2, a0, a1
; RV32ZBA-NEXT: mul a0, a0, a1
; RV32ZBA-NEXT: srai a0, a0, 31
; RV32ZBA-NEXT: beq a2, a0, .LBB55_2
; RV32ZBA-NEXT: # %bb.1: # %overflow
; RV32ZBA-NEXT: mv a0, zero
; RV32ZBA-NEXT: ret
; RV32ZBA-NEXT: .LBB55_2: # %continue
; RV32ZBA-NEXT: addi a0, zero, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: smulo.br.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: sext.w a1, a1
; RV64ZBA-NEXT: sext.w a0, a0
; RV64ZBA-NEXT: mul a2, a0, a1
; RV64ZBA-NEXT: mulw a0, a0, a1
; RV64ZBA-NEXT: beq a0, a2, .LBB55_2
; RV64ZBA-NEXT: # %bb.1: # %overflow
; RV64ZBA-NEXT: mv a0, zero
; RV64ZBA-NEXT: ret
; RV64ZBA-NEXT: .LBB55_2: # %continue
; RV64ZBA-NEXT: addi a0, zero, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
br i1 %obit, label %overflow, label %continue
overflow:
ret i1 false
continue:
ret i1 true
}
define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: smulo.br.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: sw zero, 8(sp)
; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: call __mulodi4@plt
; RV32-NEXT: lw a0, 8(sp)
; RV32-NEXT: beqz a0, .LBB56_2
; RV32-NEXT: # %bb.1: # %overflow
; RV32-NEXT: mv a0, zero
; RV32-NEXT: j .LBB56_3
; RV32-NEXT: .LBB56_2: # %continue
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: .LBB56_3: # %overflow
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: smulo.br.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: mulh a2, a0, a1
; RV64-NEXT: mul a0, a0, a1
; RV64-NEXT: srai a0, a0, 63
; RV64-NEXT: beq a2, a0, .LBB56_2
; RV64-NEXT: # %bb.1: # %overflow
; RV64-NEXT: mv a0, zero
; RV64-NEXT: ret
; RV64-NEXT: .LBB56_2: # %continue
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: smulo.br.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: addi sp, sp, -16
; RV32ZBA-NEXT: .cfi_def_cfa_offset 16
; RV32ZBA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ZBA-NEXT: .cfi_offset ra, -4
; RV32ZBA-NEXT: sw zero, 8(sp)
; RV32ZBA-NEXT: addi a4, sp, 8
; RV32ZBA-NEXT: call __mulodi4@plt
; RV32ZBA-NEXT: lw a0, 8(sp)
; RV32ZBA-NEXT: beqz a0, .LBB56_2
; RV32ZBA-NEXT: # %bb.1: # %overflow
; RV32ZBA-NEXT: mv a0, zero
; RV32ZBA-NEXT: j .LBB56_3
; RV32ZBA-NEXT: .LBB56_2: # %continue
; RV32ZBA-NEXT: addi a0, zero, 1
; RV32ZBA-NEXT: .LBB56_3: # %overflow
; RV32ZBA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ZBA-NEXT: addi sp, sp, 16
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: smulo.br.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: mulh a2, a0, a1
; RV64ZBA-NEXT: mul a0, a0, a1
; RV64ZBA-NEXT: srai a0, a0, 63
; RV64ZBA-NEXT: beq a2, a0, .LBB56_2
; RV64ZBA-NEXT: # %bb.1: # %overflow
; RV64ZBA-NEXT: mv a0, zero
; RV64ZBA-NEXT: ret
; RV64ZBA-NEXT: .LBB56_2: # %continue
; RV64ZBA-NEXT: addi a0, zero, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
br i1 %obit, label %overflow, label %continue
overflow:
ret i1 false
continue:
ret i1 true
}
define zeroext i1 @smulo2.br.i64(i64 %v1) {
; RV32-LABEL: smulo2.br.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: sw zero, 8(sp)
; RV32-NEXT: addi a2, zero, -13
; RV32-NEXT: addi a3, zero, -1
; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: call __mulodi4@plt
; RV32-NEXT: lw a0, 8(sp)
; RV32-NEXT: beqz a0, .LBB57_2
; RV32-NEXT: # %bb.1: # %overflow
; RV32-NEXT: mv a0, zero
; RV32-NEXT: j .LBB57_3
; RV32-NEXT: .LBB57_2: # %continue
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: .LBB57_3: # %overflow
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: smulo2.br.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi a1, zero, -13
; RV64-NEXT: mulh a2, a0, a1
; RV64-NEXT: mul a0, a0, a1
; RV64-NEXT: srai a0, a0, 63
; RV64-NEXT: beq a2, a0, .LBB57_2
; RV64-NEXT: # %bb.1: # %overflow
; RV64-NEXT: mv a0, zero
; RV64-NEXT: ret
; RV64-NEXT: .LBB57_2: # %continue
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: smulo2.br.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: addi sp, sp, -16
; RV32ZBA-NEXT: .cfi_def_cfa_offset 16
; RV32ZBA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ZBA-NEXT: .cfi_offset ra, -4
; RV32ZBA-NEXT: sw zero, 8(sp)
; RV32ZBA-NEXT: addi a2, zero, -13
; RV32ZBA-NEXT: addi a3, zero, -1
; RV32ZBA-NEXT: addi a4, sp, 8
; RV32ZBA-NEXT: call __mulodi4@plt
; RV32ZBA-NEXT: lw a0, 8(sp)
; RV32ZBA-NEXT: beqz a0, .LBB57_2
; RV32ZBA-NEXT: # %bb.1: # %overflow
; RV32ZBA-NEXT: mv a0, zero
; RV32ZBA-NEXT: j .LBB57_3
; RV32ZBA-NEXT: .LBB57_2: # %continue
; RV32ZBA-NEXT: addi a0, zero, 1
; RV32ZBA-NEXT: .LBB57_3: # %overflow
; RV32ZBA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ZBA-NEXT: addi sp, sp, 16
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: smulo2.br.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: addi a1, zero, -13
; RV64ZBA-NEXT: mulh a2, a0, a1
; RV64ZBA-NEXT: mul a0, a0, a1
; RV64ZBA-NEXT: srai a0, a0, 63
; RV64ZBA-NEXT: beq a2, a0, .LBB57_2
; RV64ZBA-NEXT: # %bb.1: # %overflow
; RV64ZBA-NEXT: mv a0, zero
; RV64ZBA-NEXT: ret
; RV64ZBA-NEXT: .LBB57_2: # %continue
; RV64ZBA-NEXT: addi a0, zero, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 -13)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
br i1 %obit, label %overflow, label %continue
overflow:
ret i1 false
continue:
ret i1 true
}
define zeroext i1 @umulo.br.i32(i32 %v1, i32 %v2) {
; RV32-LABEL: umulo.br.i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: mulhu a0, a0, a1
; RV32-NEXT: beqz a0, .LBB58_2
; RV32-NEXT: # %bb.1: # %overflow
; RV32-NEXT: mv a0, zero
; RV32-NEXT: ret
; RV32-NEXT: .LBB58_2: # %continue
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: ret
;
; RV64-LABEL: umulo.br.i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: slli a1, a1, 32
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: mulhu a0, a0, a1
; RV64-NEXT: srli a0, a0, 32
; RV64-NEXT: beqz a0, .LBB58_2
; RV64-NEXT: # %bb.1: # %overflow
; RV64-NEXT: mv a0, zero
; RV64-NEXT: ret
; RV64-NEXT: .LBB58_2: # %continue
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: umulo.br.i32:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: mulhu a0, a0, a1
; RV32ZBA-NEXT: beqz a0, .LBB58_2
; RV32ZBA-NEXT: # %bb.1: # %overflow
; RV32ZBA-NEXT: mv a0, zero
; RV32ZBA-NEXT: ret
; RV32ZBA-NEXT: .LBB58_2: # %continue
; RV32ZBA-NEXT: addi a0, zero, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: umulo.br.i32:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: zext.w a1, a1
; RV64ZBA-NEXT: zext.w a0, a0
; RV64ZBA-NEXT: mul a0, a0, a1
; RV64ZBA-NEXT: srli a0, a0, 32
; RV64ZBA-NEXT: beqz a0, .LBB58_2
; RV64ZBA-NEXT: # %bb.1: # %overflow
; RV64ZBA-NEXT: mv a0, zero
; RV64ZBA-NEXT: ret
; RV64ZBA-NEXT: .LBB58_2: # %continue
; RV64ZBA-NEXT: addi a0, zero, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
br i1 %obit, label %overflow, label %continue
overflow:
ret i1 false
continue:
ret i1 true
}
define zeroext i1 @umulo.br.i64(i64 %v1, i64 %v2) {
; RV32-LABEL: umulo.br.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: mul a4, a3, a0
; RV32-NEXT: mul a5, a1, a2
; RV32-NEXT: add a4, a5, a4
; RV32-NEXT: mulhu a5, a0, a2
; RV32-NEXT: add a4, a5, a4
; RV32-NEXT: sltu a6, a4, a5
; RV32-NEXT: snez a5, a3
; RV32-NEXT: snez a4, a1
; RV32-NEXT: and a4, a4, a5
; RV32-NEXT: mulhu a1, a1, a2
; RV32-NEXT: snez a1, a1
; RV32-NEXT: or a1, a4, a1
; RV32-NEXT: mulhu a0, a3, a0
; RV32-NEXT: snez a0, a0
; RV32-NEXT: or a0, a1, a0
; RV32-NEXT: or a0, a0, a6
; RV32-NEXT: beqz a0, .LBB59_2
; RV32-NEXT: # %bb.1: # %overflow
; RV32-NEXT: mv a0, zero
; RV32-NEXT: ret
; RV32-NEXT: .LBB59_2: # %continue
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: ret
;
; RV64-LABEL: umulo.br.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: mulhu a0, a0, a1
; RV64-NEXT: beqz a0, .LBB59_2
; RV64-NEXT: # %bb.1: # %overflow
; RV64-NEXT: mv a0, zero
; RV64-NEXT: ret
; RV64-NEXT: .LBB59_2: # %continue
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: umulo.br.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: mul a4, a3, a0
; RV32ZBA-NEXT: mul a5, a1, a2
; RV32ZBA-NEXT: add a4, a5, a4
; RV32ZBA-NEXT: mulhu a5, a0, a2
; RV32ZBA-NEXT: add a4, a5, a4
; RV32ZBA-NEXT: sltu a6, a4, a5
; RV32ZBA-NEXT: snez a5, a3
; RV32ZBA-NEXT: snez a4, a1
; RV32ZBA-NEXT: and a4, a4, a5
; RV32ZBA-NEXT: mulhu a1, a1, a2
; RV32ZBA-NEXT: snez a1, a1
; RV32ZBA-NEXT: or a1, a4, a1
; RV32ZBA-NEXT: mulhu a0, a3, a0
; RV32ZBA-NEXT: snez a0, a0
; RV32ZBA-NEXT: or a0, a1, a0
; RV32ZBA-NEXT: or a0, a0, a6
; RV32ZBA-NEXT: beqz a0, .LBB59_2
; RV32ZBA-NEXT: # %bb.1: # %overflow
; RV32ZBA-NEXT: mv a0, zero
; RV32ZBA-NEXT: ret
; RV32ZBA-NEXT: .LBB59_2: # %continue
; RV32ZBA-NEXT: addi a0, zero, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: umulo.br.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: mulhu a0, a0, a1
; RV64ZBA-NEXT: beqz a0, .LBB59_2
; RV64ZBA-NEXT: # %bb.1: # %overflow
; RV64ZBA-NEXT: mv a0, zero
; RV64ZBA-NEXT: ret
; RV64ZBA-NEXT: .LBB59_2: # %continue
; RV64ZBA-NEXT: addi a0, zero, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
br i1 %obit, label %overflow, label %continue
overflow:
ret i1 false
continue:
ret i1 true
}
define zeroext i1 @umulo2.br.i64(i64 %v1) {
; RV32-LABEL: umulo2.br.i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: add a2, a0, a0
; RV32-NEXT: sltu a0, a2, a0
; RV32-NEXT: add a2, a1, a1
; RV32-NEXT: add a2, a2, a0
; RV32-NEXT: beq a2, a1, .LBB60_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: sltu a0, a2, a1
; RV32-NEXT: .LBB60_2: # %entry
; RV32-NEXT: beqz a0, .LBB60_4
; RV32-NEXT: # %bb.3: # %overflow
; RV32-NEXT: mv a0, zero
; RV32-NEXT: ret
; RV32-NEXT: .LBB60_4: # %continue
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: ret
;
; RV64-LABEL: umulo2.br.i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: add a1, a0, a0
; RV64-NEXT: bgeu a1, a0, .LBB60_2
; RV64-NEXT: # %bb.1: # %overflow
; RV64-NEXT: mv a0, zero
; RV64-NEXT: ret
; RV64-NEXT: .LBB60_2: # %continue
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: ret
;
; RV32ZBA-LABEL: umulo2.br.i64:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: add a2, a0, a0
; RV32ZBA-NEXT: sltu a0, a2, a0
; RV32ZBA-NEXT: add a2, a1, a1
; RV32ZBA-NEXT: add a2, a2, a0
; RV32ZBA-NEXT: beq a2, a1, .LBB60_2
; RV32ZBA-NEXT: # %bb.1: # %entry
; RV32ZBA-NEXT: sltu a0, a2, a1
; RV32ZBA-NEXT: .LBB60_2: # %entry
; RV32ZBA-NEXT: beqz a0, .LBB60_4
; RV32ZBA-NEXT: # %bb.3: # %overflow
; RV32ZBA-NEXT: mv a0, zero
; RV32ZBA-NEXT: ret
; RV32ZBA-NEXT: .LBB60_4: # %continue
; RV32ZBA-NEXT: addi a0, zero, 1
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: umulo2.br.i64:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: add a1, a0, a0
; RV64ZBA-NEXT: bgeu a1, a0, .LBB60_2
; RV64ZBA-NEXT: # %bb.1: # %overflow
; RV64ZBA-NEXT: mv a0, zero
; RV64ZBA-NEXT: ret
; RV64ZBA-NEXT: .LBB60_2: # %continue
; RV64ZBA-NEXT: addi a0, zero, 1
; RV64ZBA-NEXT: ret
entry:
%t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
br i1 %obit, label %overflow, label %continue
overflow:
ret i1 false
continue:
ret i1 true
}
declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone
declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone
declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone