blob: fe19a4fa8bbd815abd11aeff66102bcb40a790d7 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix=RV32I
; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix=RV32ZBB
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix=RV64I
; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix=RV64ZBB
declare i32 @llvm.abs.i32(i32, i1 immarg)
declare i64 @llvm.abs.i64(i64, i1 immarg)
define i32 @neg_abs32(i32 %x) {
; RV32I-LABEL: neg_abs32:
; RV32I: # %bb.0:
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a1, a0
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: neg_abs32:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: neg a1, a0
; RV32ZBB-NEXT: min a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV64I-LABEL: neg_abs32:
; RV64I: # %bb.0:
; RV64I-NEXT: sraiw a1, a0, 31
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: subw a0, a1, a0
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: neg_abs32:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sraiw a1, a0, 31
; RV64ZBB-NEXT: xor a0, a0, a1
; RV64ZBB-NEXT: subw a0, a1, a0
; RV64ZBB-NEXT: ret
%abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
%neg = sub nsw i32 0, %abs
ret i32 %neg
}
define i32 @select_neg_abs32(i32 %x) {
; RV32I-LABEL: select_neg_abs32:
; RV32I: # %bb.0:
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a1, a0
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: select_neg_abs32:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: neg a1, a0
; RV32ZBB-NEXT: min a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV64I-LABEL: select_neg_abs32:
; RV64I: # %bb.0:
; RV64I-NEXT: sraiw a1, a0, 31
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: subw a0, a1, a0
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: select_neg_abs32:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sraiw a1, a0, 31
; RV64ZBB-NEXT: xor a0, a0, a1
; RV64ZBB-NEXT: subw a0, a1, a0
; RV64ZBB-NEXT: ret
%1 = icmp slt i32 %x, 0
%2 = sub nsw i32 0, %x
%3 = select i1 %1, i32 %x, i32 %2
ret i32 %3
}
define i64 @neg_abs64(i64 %x) {
; RV32I-LABEL: neg_abs64:
; RV32I: # %bb.0:
; RV32I-NEXT: srai a2, a1, 31
; RV32I-NEXT: xor a0, a0, a2
; RV32I-NEXT: xor a1, a1, a2
; RV32I-NEXT: sltu a3, a2, a0
; RV32I-NEXT: sub a1, a2, a1
; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: sub a0, a2, a0
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: neg_abs64:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: srai a2, a1, 31
; RV32ZBB-NEXT: xor a0, a0, a2
; RV32ZBB-NEXT: xor a1, a1, a2
; RV32ZBB-NEXT: sltu a3, a2, a0
; RV32ZBB-NEXT: sub a1, a2, a1
; RV32ZBB-NEXT: sub a1, a1, a3
; RV32ZBB-NEXT: sub a0, a2, a0
; RV32ZBB-NEXT: ret
;
; RV64I-LABEL: neg_abs64:
; RV64I: # %bb.0:
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a1, a0
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: neg_abs64:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: min a0, a0, a1
; RV64ZBB-NEXT: ret
%abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
%neg = sub nsw i64 0, %abs
ret i64 %neg
}
define i64 @select_neg_abs64(i64 %x) {
; RV32I-LABEL: select_neg_abs64:
; RV32I: # %bb.0:
; RV32I-NEXT: srai a2, a1, 31
; RV32I-NEXT: xor a0, a0, a2
; RV32I-NEXT: xor a1, a1, a2
; RV32I-NEXT: sltu a3, a2, a0
; RV32I-NEXT: sub a1, a2, a1
; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: sub a0, a2, a0
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: select_neg_abs64:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: srai a2, a1, 31
; RV32ZBB-NEXT: xor a0, a0, a2
; RV32ZBB-NEXT: xor a1, a1, a2
; RV32ZBB-NEXT: sltu a3, a2, a0
; RV32ZBB-NEXT: sub a1, a2, a1
; RV32ZBB-NEXT: sub a1, a1, a3
; RV32ZBB-NEXT: sub a0, a2, a0
; RV32ZBB-NEXT: ret
;
; RV64I-LABEL: select_neg_abs64:
; RV64I: # %bb.0:
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a1, a0
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: select_neg_abs64:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: min a0, a0, a1
; RV64ZBB-NEXT: ret
%1 = icmp slt i64 %x, 0
%2 = sub nsw i64 0, %x
%3 = select i1 %1, i64 %x, i64 %2
ret i64 %3
}
define i32 @neg_abs32_multiuse(i32 %x, ptr %y) {
; RV32I-LABEL: neg_abs32_multiuse:
; RV32I: # %bb.0:
; RV32I-NEXT: srai a2, a0, 31
; RV32I-NEXT: xor a0, a0, a2
; RV32I-NEXT: sub a2, a0, a2
; RV32I-NEXT: neg a0, a2
; RV32I-NEXT: sw a2, 0(a1)
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: neg_abs32_multiuse:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: neg a2, a0
; RV32ZBB-NEXT: max a2, a0, a2
; RV32ZBB-NEXT: neg a0, a2
; RV32ZBB-NEXT: sw a2, 0(a1)
; RV32ZBB-NEXT: ret
;
; RV64I-LABEL: neg_abs32_multiuse:
; RV64I: # %bb.0:
; RV64I-NEXT: sraiw a2, a0, 31
; RV64I-NEXT: xor a0, a0, a2
; RV64I-NEXT: subw a2, a0, a2
; RV64I-NEXT: negw a0, a2
; RV64I-NEXT: sw a2, 0(a1)
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: neg_abs32_multiuse:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sext.w a0, a0
; RV64ZBB-NEXT: negw a2, a0
; RV64ZBB-NEXT: max a2, a0, a2
; RV64ZBB-NEXT: negw a0, a2
; RV64ZBB-NEXT: sw a2, 0(a1)
; RV64ZBB-NEXT: ret
%abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
store i32 %abs, ptr %y
%neg = sub nsw i32 0, %abs
ret i32 %neg
}
define i64 @neg_abs64_multiuse(i64 %x, ptr %y) {
; RV32I-LABEL: neg_abs64_multiuse:
; RV32I: # %bb.0:
; RV32I-NEXT: bgez a1, .LBB5_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: snez a3, a0
; RV32I-NEXT: neg a1, a1
; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: .LBB5_2:
; RV32I-NEXT: snez a3, a0
; RV32I-NEXT: neg a4, a1
; RV32I-NEXT: sub a3, a4, a3
; RV32I-NEXT: neg a4, a0
; RV32I-NEXT: sw a0, 0(a2)
; RV32I-NEXT: sw a1, 4(a2)
; RV32I-NEXT: mv a0, a4
; RV32I-NEXT: mv a1, a3
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: neg_abs64_multiuse:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: bgez a1, .LBB5_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: snez a3, a0
; RV32ZBB-NEXT: neg a1, a1
; RV32ZBB-NEXT: sub a1, a1, a3
; RV32ZBB-NEXT: neg a0, a0
; RV32ZBB-NEXT: .LBB5_2:
; RV32ZBB-NEXT: snez a3, a0
; RV32ZBB-NEXT: neg a4, a1
; RV32ZBB-NEXT: sub a3, a4, a3
; RV32ZBB-NEXT: neg a4, a0
; RV32ZBB-NEXT: sw a0, 0(a2)
; RV32ZBB-NEXT: sw a1, 4(a2)
; RV32ZBB-NEXT: mv a0, a4
; RV32ZBB-NEXT: mv a1, a3
; RV32ZBB-NEXT: ret
;
; RV64I-LABEL: neg_abs64_multiuse:
; RV64I: # %bb.0:
; RV64I-NEXT: srai a2, a0, 63
; RV64I-NEXT: xor a0, a0, a2
; RV64I-NEXT: sub a2, a0, a2
; RV64I-NEXT: neg a0, a2
; RV64I-NEXT: sd a2, 0(a1)
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: neg_abs64_multiuse:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: neg a2, a0
; RV64ZBB-NEXT: max a2, a0, a2
; RV64ZBB-NEXT: neg a0, a2
; RV64ZBB-NEXT: sd a2, 0(a1)
; RV64ZBB-NEXT: ret
%abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
store i64 %abs, ptr %y
%neg = sub nsw i64 0, %abs
ret i64 %neg
}
define i32 @expanded_neg_abs32(i32 %x) {
; RV32I-LABEL: expanded_neg_abs32:
; RV32I: # %bb.0:
; RV32I-NEXT: neg a1, a0
; RV32I-NEXT: blt a0, a1, .LBB6_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: .LBB6_2:
; RV32I-NEXT: neg a0, a1
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: expanded_neg_abs32:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: neg a1, a0
; RV32ZBB-NEXT: min a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV64I-LABEL: expanded_neg_abs32:
; RV64I: # %bb.0:
; RV64I-NEXT: sext.w a1, a0
; RV64I-NEXT: negw a0, a0
; RV64I-NEXT: blt a1, a0, .LBB6_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: .LBB6_2:
; RV64I-NEXT: negw a0, a0
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: expanded_neg_abs32:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sext.w a1, a0
; RV64ZBB-NEXT: negw a0, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: negw a0, a0
; RV64ZBB-NEXT: ret
%n = sub i32 0, %x
%t = call i32 @llvm.smax.i32(i32 %n, i32 %x)
%r = sub i32 0, %t
ret i32 %r
}
define i32 @expanded_neg_abs32_unsigned(i32 %x) {
; RV32I-LABEL: expanded_neg_abs32_unsigned:
; RV32I: # %bb.0:
; RV32I-NEXT: neg a1, a0
; RV32I-NEXT: bltu a0, a1, .LBB7_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: .LBB7_2:
; RV32I-NEXT: neg a0, a1
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: expanded_neg_abs32_unsigned:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: neg a1, a0
; RV32ZBB-NEXT: minu a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV64I-LABEL: expanded_neg_abs32_unsigned:
; RV64I: # %bb.0:
; RV64I-NEXT: sext.w a1, a0
; RV64I-NEXT: negw a0, a0
; RV64I-NEXT: bltu a1, a0, .LBB7_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: .LBB7_2:
; RV64I-NEXT: negw a0, a0
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: expanded_neg_abs32_unsigned:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sext.w a1, a0
; RV64ZBB-NEXT: negw a0, a0
; RV64ZBB-NEXT: maxu a0, a0, a1
; RV64ZBB-NEXT: negw a0, a0
; RV64ZBB-NEXT: ret
%n = sub i32 0, %x
%t = call i32 @llvm.umax.i32(i32 %n, i32 %x)
%r = sub i32 0, %t
ret i32 %r
}
define i64 @expanded_neg_abs64(i64 %x) {
; RV32I-LABEL: expanded_neg_abs64:
; RV32I: # %bb.0:
; RV32I-NEXT: snez a2, a0
; RV32I-NEXT: neg a3, a1
; RV32I-NEXT: sub a2, a3, a2
; RV32I-NEXT: neg a3, a0
; RV32I-NEXT: beq a2, a1, .LBB8_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: slt a4, a1, a2
; RV32I-NEXT: beqz a4, .LBB8_3
; RV32I-NEXT: j .LBB8_4
; RV32I-NEXT: .LBB8_2:
; RV32I-NEXT: sltu a4, a0, a3
; RV32I-NEXT: bnez a4, .LBB8_4
; RV32I-NEXT: .LBB8_3:
; RV32I-NEXT: mv a2, a1
; RV32I-NEXT: mv a3, a0
; RV32I-NEXT: .LBB8_4:
; RV32I-NEXT: snez a0, a3
; RV32I-NEXT: add a0, a2, a0
; RV32I-NEXT: neg a1, a0
; RV32I-NEXT: neg a0, a3
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: expanded_neg_abs64:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: snez a2, a0
; RV32ZBB-NEXT: neg a3, a1
; RV32ZBB-NEXT: sub a2, a3, a2
; RV32ZBB-NEXT: neg a3, a0
; RV32ZBB-NEXT: beq a2, a1, .LBB8_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: slt a4, a1, a2
; RV32ZBB-NEXT: beqz a4, .LBB8_3
; RV32ZBB-NEXT: j .LBB8_4
; RV32ZBB-NEXT: .LBB8_2:
; RV32ZBB-NEXT: sltu a4, a0, a3
; RV32ZBB-NEXT: bnez a4, .LBB8_4
; RV32ZBB-NEXT: .LBB8_3:
; RV32ZBB-NEXT: mv a2, a1
; RV32ZBB-NEXT: mv a3, a0
; RV32ZBB-NEXT: .LBB8_4:
; RV32ZBB-NEXT: snez a0, a3
; RV32ZBB-NEXT: add a0, a2, a0
; RV32ZBB-NEXT: neg a1, a0
; RV32ZBB-NEXT: neg a0, a3
; RV32ZBB-NEXT: ret
;
; RV64I-LABEL: expanded_neg_abs64:
; RV64I: # %bb.0:
; RV64I-NEXT: neg a1, a0
; RV64I-NEXT: blt a0, a1, .LBB8_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: .LBB8_2:
; RV64I-NEXT: neg a0, a1
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: expanded_neg_abs64:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: min a0, a0, a1
; RV64ZBB-NEXT: ret
%n = sub i64 0, %x
%t = call i64 @llvm.smax.i64(i64 %n, i64 %x)
%r = sub i64 0, %t
ret i64 %r
}
define i64 @expanded_neg_abs64_unsigned(i64 %x) {
; RV32I-LABEL: expanded_neg_abs64_unsigned:
; RV32I: # %bb.0:
; RV32I-NEXT: snez a2, a0
; RV32I-NEXT: neg a3, a1
; RV32I-NEXT: sub a2, a3, a2
; RV32I-NEXT: neg a3, a0
; RV32I-NEXT: beq a2, a1, .LBB9_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sltu a4, a1, a2
; RV32I-NEXT: beqz a4, .LBB9_3
; RV32I-NEXT: j .LBB9_4
; RV32I-NEXT: .LBB9_2:
; RV32I-NEXT: sltu a4, a0, a3
; RV32I-NEXT: bnez a4, .LBB9_4
; RV32I-NEXT: .LBB9_3:
; RV32I-NEXT: mv a2, a1
; RV32I-NEXT: mv a3, a0
; RV32I-NEXT: .LBB9_4:
; RV32I-NEXT: snez a0, a3
; RV32I-NEXT: add a0, a2, a0
; RV32I-NEXT: neg a1, a0
; RV32I-NEXT: neg a0, a3
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: expanded_neg_abs64_unsigned:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: snez a2, a0
; RV32ZBB-NEXT: neg a3, a1
; RV32ZBB-NEXT: sub a2, a3, a2
; RV32ZBB-NEXT: neg a3, a0
; RV32ZBB-NEXT: beq a2, a1, .LBB9_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: sltu a4, a1, a2
; RV32ZBB-NEXT: beqz a4, .LBB9_3
; RV32ZBB-NEXT: j .LBB9_4
; RV32ZBB-NEXT: .LBB9_2:
; RV32ZBB-NEXT: sltu a4, a0, a3
; RV32ZBB-NEXT: bnez a4, .LBB9_4
; RV32ZBB-NEXT: .LBB9_3:
; RV32ZBB-NEXT: mv a2, a1
; RV32ZBB-NEXT: mv a3, a0
; RV32ZBB-NEXT: .LBB9_4:
; RV32ZBB-NEXT: snez a0, a3
; RV32ZBB-NEXT: add a0, a2, a0
; RV32ZBB-NEXT: neg a1, a0
; RV32ZBB-NEXT: neg a0, a3
; RV32ZBB-NEXT: ret
;
; RV64I-LABEL: expanded_neg_abs64_unsigned:
; RV64I: # %bb.0:
; RV64I-NEXT: neg a1, a0
; RV64I-NEXT: bltu a0, a1, .LBB9_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: .LBB9_2:
; RV64I-NEXT: neg a0, a1
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: expanded_neg_abs64_unsigned:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: minu a0, a0, a1
; RV64ZBB-NEXT: ret
%n = sub i64 0, %x
%t = call i64 @llvm.umax.i64(i64 %n, i64 %x)
%r = sub i64 0, %t
ret i64 %r
}
define i32 @expanded_neg_inv_abs32(i32 %x) {
; RV32I-LABEL: expanded_neg_inv_abs32:
; RV32I: # %bb.0:
; RV32I-NEXT: neg a1, a0
; RV32I-NEXT: blt a1, a0, .LBB10_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: .LBB10_2:
; RV32I-NEXT: neg a0, a1
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: expanded_neg_inv_abs32:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: neg a1, a0
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV64I-LABEL: expanded_neg_inv_abs32:
; RV64I: # %bb.0:
; RV64I-NEXT: sext.w a1, a0
; RV64I-NEXT: negw a0, a0
; RV64I-NEXT: blt a0, a1, .LBB10_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: .LBB10_2:
; RV64I-NEXT: negw a0, a0
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: expanded_neg_inv_abs32:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sext.w a1, a0
; RV64ZBB-NEXT: negw a0, a0
; RV64ZBB-NEXT: min a0, a0, a1
; RV64ZBB-NEXT: negw a0, a0
; RV64ZBB-NEXT: ret
%n = sub i32 0, %x
%t = call i32 @llvm.smin.i32(i32 %n, i32 %x)
%r = sub i32 0, %t
ret i32 %r
}
define i32 @expanded_neg_inv_abs32_unsigned(i32 %x) {
; RV32I-LABEL: expanded_neg_inv_abs32_unsigned:
; RV32I: # %bb.0:
; RV32I-NEXT: neg a1, a0
; RV32I-NEXT: bltu a1, a0, .LBB11_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: .LBB11_2:
; RV32I-NEXT: neg a0, a1
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: expanded_neg_inv_abs32_unsigned:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: neg a1, a0
; RV32ZBB-NEXT: maxu a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV64I-LABEL: expanded_neg_inv_abs32_unsigned:
; RV64I: # %bb.0:
; RV64I-NEXT: sext.w a1, a0
; RV64I-NEXT: negw a0, a0
; RV64I-NEXT: bltu a0, a1, .LBB11_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: .LBB11_2:
; RV64I-NEXT: negw a0, a0
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: expanded_neg_inv_abs32_unsigned:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sext.w a1, a0
; RV64ZBB-NEXT: negw a0, a0
; RV64ZBB-NEXT: minu a0, a0, a1
; RV64ZBB-NEXT: negw a0, a0
; RV64ZBB-NEXT: ret
%n = sub i32 0, %x
%t = call i32 @llvm.umin.i32(i32 %n, i32 %x)
%r = sub i32 0, %t
ret i32 %r
}
define i64 @expanded_neg_inv_abs64(i64 %x) {
; RV32I-LABEL: expanded_neg_inv_abs64:
; RV32I: # %bb.0:
; RV32I-NEXT: snez a2, a0
; RV32I-NEXT: neg a3, a1
; RV32I-NEXT: sub a2, a3, a2
; RV32I-NEXT: neg a3, a0
; RV32I-NEXT: beq a2, a1, .LBB12_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: slt a4, a2, a1
; RV32I-NEXT: beqz a4, .LBB12_3
; RV32I-NEXT: j .LBB12_4
; RV32I-NEXT: .LBB12_2:
; RV32I-NEXT: sltu a4, a3, a0
; RV32I-NEXT: bnez a4, .LBB12_4
; RV32I-NEXT: .LBB12_3:
; RV32I-NEXT: mv a2, a1
; RV32I-NEXT: mv a3, a0
; RV32I-NEXT: .LBB12_4:
; RV32I-NEXT: snez a0, a3
; RV32I-NEXT: add a0, a2, a0
; RV32I-NEXT: neg a1, a0
; RV32I-NEXT: neg a0, a3
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: expanded_neg_inv_abs64:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: snez a2, a0
; RV32ZBB-NEXT: neg a3, a1
; RV32ZBB-NEXT: sub a2, a3, a2
; RV32ZBB-NEXT: neg a3, a0
; RV32ZBB-NEXT: beq a2, a1, .LBB12_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: slt a4, a2, a1
; RV32ZBB-NEXT: beqz a4, .LBB12_3
; RV32ZBB-NEXT: j .LBB12_4
; RV32ZBB-NEXT: .LBB12_2:
; RV32ZBB-NEXT: sltu a4, a3, a0
; RV32ZBB-NEXT: bnez a4, .LBB12_4
; RV32ZBB-NEXT: .LBB12_3:
; RV32ZBB-NEXT: mv a2, a1
; RV32ZBB-NEXT: mv a3, a0
; RV32ZBB-NEXT: .LBB12_4:
; RV32ZBB-NEXT: snez a0, a3
; RV32ZBB-NEXT: add a0, a2, a0
; RV32ZBB-NEXT: neg a1, a0
; RV32ZBB-NEXT: neg a0, a3
; RV32ZBB-NEXT: ret
;
; RV64I-LABEL: expanded_neg_inv_abs64:
; RV64I: # %bb.0:
; RV64I-NEXT: neg a1, a0
; RV64I-NEXT: blt a1, a0, .LBB12_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: .LBB12_2:
; RV64I-NEXT: neg a0, a1
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: expanded_neg_inv_abs64:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
%n = sub i64 0, %x
%t = call i64 @llvm.smin.i64(i64 %n, i64 %x)
%r = sub i64 0, %t
ret i64 %r
}
define i64 @expanded_neg_inv_abs64_unsigned(i64 %x) {
; RV32I-LABEL: expanded_neg_inv_abs64_unsigned:
; RV32I: # %bb.0:
; RV32I-NEXT: snez a2, a0
; RV32I-NEXT: neg a3, a1
; RV32I-NEXT: sub a2, a3, a2
; RV32I-NEXT: neg a3, a0
; RV32I-NEXT: beq a2, a1, .LBB13_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sltu a4, a2, a1
; RV32I-NEXT: beqz a4, .LBB13_3
; RV32I-NEXT: j .LBB13_4
; RV32I-NEXT: .LBB13_2:
; RV32I-NEXT: sltu a4, a3, a0
; RV32I-NEXT: bnez a4, .LBB13_4
; RV32I-NEXT: .LBB13_3:
; RV32I-NEXT: mv a2, a1
; RV32I-NEXT: mv a3, a0
; RV32I-NEXT: .LBB13_4:
; RV32I-NEXT: snez a0, a3
; RV32I-NEXT: add a0, a2, a0
; RV32I-NEXT: neg a1, a0
; RV32I-NEXT: neg a0, a3
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: expanded_neg_inv_abs64_unsigned:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: snez a2, a0
; RV32ZBB-NEXT: neg a3, a1
; RV32ZBB-NEXT: sub a2, a3, a2
; RV32ZBB-NEXT: neg a3, a0
; RV32ZBB-NEXT: beq a2, a1, .LBB13_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: sltu a4, a2, a1
; RV32ZBB-NEXT: beqz a4, .LBB13_3
; RV32ZBB-NEXT: j .LBB13_4
; RV32ZBB-NEXT: .LBB13_2:
; RV32ZBB-NEXT: sltu a4, a3, a0
; RV32ZBB-NEXT: bnez a4, .LBB13_4
; RV32ZBB-NEXT: .LBB13_3:
; RV32ZBB-NEXT: mv a2, a1
; RV32ZBB-NEXT: mv a3, a0
; RV32ZBB-NEXT: .LBB13_4:
; RV32ZBB-NEXT: snez a0, a3
; RV32ZBB-NEXT: add a0, a2, a0
; RV32ZBB-NEXT: neg a1, a0
; RV32ZBB-NEXT: neg a0, a3
; RV32ZBB-NEXT: ret
;
; RV64I-LABEL: expanded_neg_inv_abs64_unsigned:
; RV64I: # %bb.0:
; RV64I-NEXT: neg a1, a0
; RV64I-NEXT: bltu a1, a0, .LBB13_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: .LBB13_2:
; RV64I-NEXT: neg a0, a1
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: expanded_neg_inv_abs64_unsigned:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: maxu a0, a0, a1
; RV64ZBB-NEXT: ret
%n = sub i64 0, %x
%t = call i64 @llvm.umin.i64(i64 %n, i64 %x)
%r = sub i64 0, %t
ret i64 %r
}