[RISCV] Allow conversion of CC logic to bitwise logic

Indicates in the TargetLowering interface that conversions from CC logic to
bitwise logic are allowed. Adds tests that show the benefit when optimization
opportunities are detected. Also adds tests that show that when the optimization
is not applied correct code is generated (but opportunities for other
optimizations remain).

Differential Revision: https://reviews.llvm.org/D59596
Patch by Luís Marques.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@356740 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/RISCV/RISCVISelLowering.h b/lib/Target/RISCV/RISCVISelLowering.h
index fc69682..57546a0 100644
--- a/lib/Target/RISCV/RISCVISelLowering.h
+++ b/lib/Target/RISCV/RISCVISelLowering.h
@@ -98,6 +98,10 @@
   EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
                          EVT VT) const override;
 
+  bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
+    return VT.isScalarInteger();
+  }
+
   bool shouldInsertFencesForAtomic(const Instruction *I) const override {
     return isa<LoadInst>(I) || isa<StoreInst>(I);
   }
diff --git a/test/CodeGen/RISCV/setcc-logic.ll b/test/CodeGen/RISCV/setcc-logic.ll
new file mode 100644
index 0000000..2303e55
--- /dev/null
+++ b/test/CodeGen/RISCV/setcc-logic.ll
@@ -0,0 +1,130 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64I
+
+define i1 @and_icmp_eq(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
+; RV32I-LABEL: and_icmp_eq:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: and_icmp_eq:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    or a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ret
+  %cmp1 = icmp eq i32 %a, %b
+  %cmp2 = icmp eq i32 %c, %d
+  %and = and i1 %cmp1, %cmp2
+  ret i1 %and
+}
+
+define i1 @or_icmp_ne(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
+; RV32I-LABEL: or_icmp_ne:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    xor a2, a2, a3
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: or_icmp_ne:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xor a2, a2, a3
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    or a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    ret
+  %cmp1 = icmp ne i32 %a, %b
+  %cmp2 = icmp ne i32 %c, %d
+  %or = or i1 %cmp1, %cmp2
+  ret i1 %or
+}
+
+define i1 @or_icmps_const_1bit_diff(i64 %x) nounwind {
+; RV32I-LABEL: or_icmps_const_1bit_diff:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a2, a0, -13
+; RV32I-NEXT:    sltu a0, a2, a0
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    addi a0, a0, -1
+; RV32I-NEXT:    andi a1, a2, -5
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: or_icmps_const_1bit_diff:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, -13
+; RV64I-NEXT:    andi a0, a0, -5
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ret
+  %a = icmp eq i64 %x, 17
+  %b = icmp eq i64 %x, 13
+  %r = or i1 %a, %b
+  ret i1 %r
+}
+
+define i1 @and_icmps_const_1bit_diff(i32 %x) nounwind {
+; RV32I-LABEL: and_icmps_const_1bit_diff:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a0, a0, -44
+; RV32I-NEXT:    andi a0, a0, -17
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: and_icmps_const_1bit_diff:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, -44
+; RV64I-NEXT:    addi a1, zero, 1
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    addi a1, a1, -17
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    ret
+  %a = icmp ne i32 %x, 44
+  %b = icmp ne i32 %x, 60
+  %r = and i1 %a, %b
+  ret i1 %r
+}
+
+define i1 @and_icmps_const_not1bit_diff(i32 %x) nounwind {
+; RV32I-LABEL: and_icmps_const_not1bit_diff:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a1, zero, 44
+; RV32I-NEXT:    xor a1, a0, a1
+; RV32I-NEXT:    addi a2, zero, 92
+; RV32I-NEXT:    xor a0, a0, a2
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    snez a1, a1
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: and_icmps_const_not1bit_diff:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    addi a1, zero, 44
+; RV64I-NEXT:    xor a1, a0, a1
+; RV64I-NEXT:    addi a2, zero, 92
+; RV64I-NEXT:    xor a0, a0, a2
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    snez a1, a1
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    ret
+  %a = icmp ne i32 %x, 44
+  %b = icmp ne i32 %x, 92
+  %r = and i1 %a, %b
+  ret i1 %r
+}