blob: 6739be52d47f68b5f5aa76c47a27cf04d51f3f97 [file] [edit]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: llc < %s -mtriple=x86_64 | FileCheck %s
; GitHub issue #161036
; Positive test : umin(sub(a,b),a) with scalar types should be folded
define i64 @underflow_compare_fold_i64(i64 %a, i64 %b) {
; CHECK-LABEL: underflow_compare_fold_i64:
; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: subq %rsi, %rax
; CHECK-NEXT: cmpq %rdi, %rax
; CHECK-NEXT: cmovaeq %rdi, %rax
; CHECK-NEXT: retq
%sub = sub i64 %a, %b
%cond = tail call i64 @llvm.umin.i64(i64 %sub, i64 %a)
ret i64 %cond
}
; Positive test : umin(a,sub(a,b)) with scalar types should be folded
define i64 @underflow_compare_fold_i64_commute(i64 %a, i64 %b) {
; CHECK-LABEL: underflow_compare_fold_i64_commute:
; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: subq %rsi, %rax
; CHECK-NEXT: cmpq %rax, %rdi
; CHECK-NEXT: cmovbq %rdi, %rax
; CHECK-NEXT: retq
%sub = sub i64 %a, %b
%cond = tail call i64 @llvm.umin.i64(i64 %a, i64 %sub)
ret i64 %cond
}
; Positive test : multi-use is OK since the sub instruction still runs once
define i64 @underflow_compare_fold_i64_multi_use(i64 %a, i64 %b, ptr addrspace(1) %ptr) {
; CHECK-LABEL: underflow_compare_fold_i64_multi_use:
; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: subq %rsi, %rax
; CHECK-NEXT: movq %rax, (%rdx)
; CHECK-NEXT: cmpq %rdi, %rax
; CHECK-NEXT: cmovaeq %rdi, %rax
; CHECK-NEXT: retq
%sub = sub i64 %a, %b
store i64 %sub, ptr addrspace(1) %ptr
%cond = call i64 @llvm.umin.i64(i64 %sub, i64 %a)
ret i64 %cond
}
; Positive test : i32
define i32 @underflow_compare_fold_i32(i32 %a, i32 %b) {
; CHECK-LABEL: underflow_compare_fold_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: subl %esi, %eax
; CHECK-NEXT: cmpl %edi, %eax
; CHECK-NEXT: cmovael %edi, %eax
; CHECK-NEXT: retq
%sub = sub i32 %a, %b
%cond = tail call i32 @llvm.umin.i32(i32 %sub, i32 %a)
ret i32 %cond
}
; Positive test : i32
define i32 @underflow_compare_fold_i32_commute(i32 %a, i32 %b) {
; CHECK-LABEL: underflow_compare_fold_i32_commute:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: subl %esi, %eax
; CHECK-NEXT: cmpl %eax, %edi
; CHECK-NEXT: cmovbl %edi, %eax
; CHECK-NEXT: retq
%sub = sub i32 %a, %b
%cond = tail call i32 @llvm.umin.i32(i32 %a, i32 %sub)
ret i32 %cond
}
; Positive test : i32
define i32 @underflow_compare_fold_i32_multi_use(i32 %a, i32 %b, ptr addrspace(1) %ptr) {
; CHECK-LABEL: underflow_compare_fold_i32_multi_use:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: subl %esi, %eax
; CHECK-NEXT: movl %eax, (%rdx)
; CHECK-NEXT: cmpl %edi, %eax
; CHECK-NEXT: cmovael %edi, %eax
; CHECK-NEXT: retq
%sub = sub i32 %a, %b
store i32 %sub, ptr addrspace(1) %ptr
%cond = call i32 @llvm.umin.i32(i32 %sub, i32 %a)
ret i32 %cond
}
; Positive test : i16
define i16 @underflow_compare_fold_i16(i16 %a, i16 %b) {
; CHECK-LABEL: underflow_compare_fold_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: subl %esi, %eax
; CHECK-NEXT: cmpw %di, %ax
; CHECK-NEXT: cmovael %edi, %eax
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%sub = sub i16 %a, %b
%cond = tail call i16 @llvm.umin.i16(i16 %sub, i16 %a)
ret i16 %cond
}
; Positive test : i16
define i16 @underflow_compare_fold_i16_commute(i16 %a, i16 %b) {
; CHECK-LABEL: underflow_compare_fold_i16_commute:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: subl %esi, %eax
; CHECK-NEXT: cmpw %ax, %di
; CHECK-NEXT: cmovbl %edi, %eax
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%sub = sub i16 %a, %b
%cond = tail call i16 @llvm.umin.i16(i16 %a, i16 %sub)
ret i16 %cond
}
; Positive test : i16
define i16 @underflow_compare_fold_i16_multi_use(i16 %a, i16 %b, ptr addrspace(1) %ptr) {
; CHECK-LABEL: underflow_compare_fold_i16_multi_use:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: subl %esi, %eax
; CHECK-NEXT: movw %ax, (%rdx)
; CHECK-NEXT: cmpw %di, %ax
; CHECK-NEXT: cmovael %edi, %eax
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%sub = sub i16 %a, %b
store i16 %sub, ptr addrspace(1) %ptr
%cond = call i16 @llvm.umin.i16(i16 %sub, i16 %a)
ret i16 %cond
}
; Negative test, vector types : umin(sub(a,b),a) but with vectors
define <16 x i8> @underflow_compare_dontfold_vectors(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: underflow_compare_dontfold_vectors:
; CHECK: # %bb.0:
; CHECK-NEXT: movdqa %xmm0, %xmm2
; CHECK-NEXT: psubb %xmm1, %xmm2
; CHECK-NEXT: pminub %xmm2, %xmm0
; CHECK-NEXT: retq
%sub = sub <16 x i8> %a, %b
%cond = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %sub, <16 x i8> %a)
ret <16 x i8> %cond
}
; Negative test, pattern mismatch : umin(add(a,b),a)
define i64 @umin_add(i64 %a, i64 %b) {
; CHECK-LABEL: umin_add:
; CHECK: # %bb.0:
; CHECK-NEXT: leaq (%rsi,%rdi), %rax
; CHECK-NEXT: cmpq %rdi, %rax
; CHECK-NEXT: cmovaeq %rdi, %rax
; CHECK-NEXT: retq
%add = add i64 %a, %b
%cond = tail call i64 @llvm.umin.i64(i64 %add, i64 %a)
ret i64 %cond
}