| //===- InstructionSimplify.cpp - Fold instruction operands ----------------===// |
| // |
| // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| // See https://llvm.org/LICENSE.txt for license information. |
| // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| // |
| //===----------------------------------------------------------------------===// |
| // |
| // This file implements routines for folding instructions into simpler forms |
| // that do not require creating new instructions. This does constant folding |
| // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either |
| // returning a constant ("and i32 %x, 0" -> "0") or an already existing value |
| // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been |
| // simplified: This is usually true and assuming it simplifies the logic (if |
| // they have not been simplified then results are correct but maybe suboptimal). |
| // |
| //===----------------------------------------------------------------------===// |
| |
| #include "llvm/Analysis/InstructionSimplify.h" |
| |
| #include "llvm/ADT/STLExtras.h" |
| #include "llvm/ADT/SetVector.h" |
| #include "llvm/ADT/SmallPtrSet.h" |
| #include "llvm/ADT/Statistic.h" |
| #include "llvm/Analysis/AliasAnalysis.h" |
| #include "llvm/Analysis/AssumptionCache.h" |
| #include "llvm/Analysis/CaptureTracking.h" |
| #include "llvm/Analysis/CmpInstAnalysis.h" |
| #include "llvm/Analysis/ConstantFolding.h" |
| #include "llvm/Analysis/LoopAnalysisManager.h" |
| #include "llvm/Analysis/MemoryBuiltins.h" |
| #include "llvm/Analysis/OverflowInstAnalysis.h" |
| #include "llvm/Analysis/ValueTracking.h" |
| #include "llvm/Analysis/VectorUtils.h" |
| #include "llvm/IR/ConstantRange.h" |
| #include "llvm/IR/DataLayout.h" |
| #include "llvm/IR/Dominators.h" |
| #include "llvm/IR/GetElementPtrTypeIterator.h" |
| #include "llvm/IR/GlobalAlias.h" |
| #include "llvm/IR/InstrTypes.h" |
| #include "llvm/IR/Instructions.h" |
| #include "llvm/IR/Operator.h" |
| #include "llvm/IR/PatternMatch.h" |
| #include "llvm/IR/ValueHandle.h" |
| #include "llvm/Support/KnownBits.h" |
| #include <algorithm> |
| using namespace llvm; |
| using namespace llvm::PatternMatch; |
| |
| #define DEBUG_TYPE "instsimplify" |
| |
| enum { RecursionLimit = 3 }; |
| |
| STATISTIC(NumExpand, "Number of expansions"); |
| STATISTIC(NumReassoc, "Number of reassociations"); |
| |
| static Value *SimplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned); |
| static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned); |
| static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &, |
| const SimplifyQuery &, unsigned); |
| static Value *SimplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &, |
| unsigned); |
| static Value *SimplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &, |
| const SimplifyQuery &, unsigned); |
| static Value *SimplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &, |
| unsigned); |
| static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, |
| const SimplifyQuery &Q, unsigned MaxRecurse); |
| static Value *SimplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned); |
| static Value *SimplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned); |
| static Value *SimplifyCastInst(unsigned, Value *, Type *, |
| const SimplifyQuery &, unsigned); |
| static Value *SimplifyGEPInst(Type *, ArrayRef<Value *>, bool, |
| const SimplifyQuery &, unsigned); |
| static Value *SimplifySelectInst(Value *, Value *, Value *, |
| const SimplifyQuery &, unsigned); |
| |
| static Value *foldSelectWithBinaryOp(Value *Cond, Value *TrueVal, |
| Value *FalseVal) { |
| BinaryOperator::BinaryOps BinOpCode; |
| if (auto *BO = dyn_cast<BinaryOperator>(Cond)) |
| BinOpCode = BO->getOpcode(); |
| else |
| return nullptr; |
| |
| CmpInst::Predicate ExpectedPred, Pred1, Pred2; |
| if (BinOpCode == BinaryOperator::Or) { |
| ExpectedPred = ICmpInst::ICMP_NE; |
| } else if (BinOpCode == BinaryOperator::And) { |
| ExpectedPred = ICmpInst::ICMP_EQ; |
| } else |
| return nullptr; |
| |
| // %A = icmp eq %TV, %FV |
| // %B = icmp eq %X, %Y (and one of these is a select operand) |
| // %C = and %A, %B |
| // %D = select %C, %TV, %FV |
| // --> |
| // %FV |
| |
| // %A = icmp ne %TV, %FV |
| // %B = icmp ne %X, %Y (and one of these is a select operand) |
| // %C = or %A, %B |
| // %D = select %C, %TV, %FV |
| // --> |
| // %TV |
| Value *X, *Y; |
| if (!match(Cond, m_c_BinOp(m_c_ICmp(Pred1, m_Specific(TrueVal), |
| m_Specific(FalseVal)), |
| m_ICmp(Pred2, m_Value(X), m_Value(Y)))) || |
| Pred1 != Pred2 || Pred1 != ExpectedPred) |
| return nullptr; |
| |
| if (X == TrueVal || X == FalseVal || Y == TrueVal || Y == FalseVal) |
| return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal; |
| |
| return nullptr; |
| } |
| |
| /// For a boolean type or a vector of boolean type, return false or a vector |
| /// with every element false. |
| static Constant *getFalse(Type *Ty) { |
| return ConstantInt::getFalse(Ty); |
| } |
| |
| /// For a boolean type or a vector of boolean type, return true or a vector |
| /// with every element true. |
| static Constant *getTrue(Type *Ty) { |
| return ConstantInt::getTrue(Ty); |
| } |
| |
| /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"? |
| static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS, |
| Value *RHS) { |
| CmpInst *Cmp = dyn_cast<CmpInst>(V); |
| if (!Cmp) |
| return false; |
| CmpInst::Predicate CPred = Cmp->getPredicate(); |
| Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1); |
| if (CPred == Pred && CLHS == LHS && CRHS == RHS) |
| return true; |
| return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS && |
| CRHS == LHS; |
| } |
| |
| /// Simplify comparison with true or false branch of select: |
| /// %sel = select i1 %cond, i32 %tv, i32 %fv |
| /// %cmp = icmp sle i32 %sel, %rhs |
| /// Compose new comparison by substituting %sel with either %tv or %fv |
| /// and see if it simplifies. |
| static Value *simplifyCmpSelCase(CmpInst::Predicate Pred, Value *LHS, |
| Value *RHS, Value *Cond, |
| const SimplifyQuery &Q, unsigned MaxRecurse, |
| Constant *TrueOrFalse) { |
| Value *SimplifiedCmp = SimplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse); |
| if (SimplifiedCmp == Cond) { |
| // %cmp simplified to the select condition (%cond). |
| return TrueOrFalse; |
| } else if (!SimplifiedCmp && isSameCompare(Cond, Pred, LHS, RHS)) { |
| // It didn't simplify. However, if composed comparison is equivalent |
| // to the select condition (%cond) then we can replace it. |
| return TrueOrFalse; |
| } |
| return SimplifiedCmp; |
| } |
| |
| /// Simplify comparison with true branch of select |
| static Value *simplifyCmpSelTrueCase(CmpInst::Predicate Pred, Value *LHS, |
| Value *RHS, Value *Cond, |
| const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse, |
| getTrue(Cond->getType())); |
| } |
| |
| /// Simplify comparison with false branch of select |
| static Value *simplifyCmpSelFalseCase(CmpInst::Predicate Pred, Value *LHS, |
| Value *RHS, Value *Cond, |
| const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse, |
| getFalse(Cond->getType())); |
| } |
| |
| /// We know comparison with both branches of select can be simplified, but they |
| /// are not equal. This routine handles some logical simplifications. |
| static Value *handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp, |
| Value *Cond, |
| const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| // If the false value simplified to false, then the result of the compare |
| // is equal to "Cond && TCmp". This also catches the case when the false |
| // value simplified to false and the true value to true, returning "Cond". |
| // Folding select to and/or isn't poison-safe in general; impliesPoison |
| // checks whether folding it does not convert a well-defined value into |
| // poison. |
| if (match(FCmp, m_Zero()) && impliesPoison(TCmp, Cond)) |
| if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse)) |
| return V; |
| // If the true value simplified to true, then the result of the compare |
| // is equal to "Cond || FCmp". |
| if (match(TCmp, m_One()) && impliesPoison(FCmp, Cond)) |
| if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse)) |
| return V; |
| // Finally, if the false value simplified to true and the true value to |
| // false, then the result of the compare is equal to "!Cond". |
| if (match(FCmp, m_One()) && match(TCmp, m_Zero())) |
| if (Value *V = SimplifyXorInst( |
| Cond, Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse)) |
| return V; |
| return nullptr; |
| } |
| |
| /// Does the given value dominate the specified phi node? |
| static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) { |
| Instruction *I = dyn_cast<Instruction>(V); |
| if (!I) |
| // Arguments and constants dominate all instructions. |
| return true; |
| |
| // If we are processing instructions (and/or basic blocks) that have not been |
| // fully added to a function, the parent nodes may still be null. Simply |
| // return the conservative answer in these cases. |
| if (!I->getParent() || !P->getParent() || !I->getFunction()) |
| return false; |
| |
| // If we have a DominatorTree then do a precise test. |
| if (DT) |
| return DT->dominates(I, P); |
| |
| // Otherwise, if the instruction is in the entry block and is not an invoke, |
| // then it obviously dominates all phi nodes. |
| if (I->getParent()->isEntryBlock() && !isa<InvokeInst>(I) && |
| !isa<CallBrInst>(I)) |
| return true; |
| |
| return false; |
| } |
| |
| /// Try to simplify a binary operator of form "V op OtherOp" where V is |
| /// "(B0 opex B1)" by distributing 'op' across 'opex' as |
| /// "(B0 op OtherOp) opex (B1 op OtherOp)". |
| static Value *expandBinOp(Instruction::BinaryOps Opcode, Value *V, |
| Value *OtherOp, Instruction::BinaryOps OpcodeToExpand, |
| const SimplifyQuery &Q, unsigned MaxRecurse) { |
| auto *B = dyn_cast<BinaryOperator>(V); |
| if (!B || B->getOpcode() != OpcodeToExpand) |
| return nullptr; |
| Value *B0 = B->getOperand(0), *B1 = B->getOperand(1); |
| Value *L = SimplifyBinOp(Opcode, B0, OtherOp, Q.getWithoutUndef(), |
| MaxRecurse); |
| if (!L) |
| return nullptr; |
| Value *R = SimplifyBinOp(Opcode, B1, OtherOp, Q.getWithoutUndef(), |
| MaxRecurse); |
| if (!R) |
| return nullptr; |
| |
| // Does the expanded pair of binops simplify to the existing binop? |
| if ((L == B0 && R == B1) || |
| (Instruction::isCommutative(OpcodeToExpand) && L == B1 && R == B0)) { |
| ++NumExpand; |
| return B; |
| } |
| |
| // Otherwise, return "L op' R" if it simplifies. |
| Value *S = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse); |
| if (!S) |
| return nullptr; |
| |
| ++NumExpand; |
| return S; |
| } |
| |
| /// Try to simplify binops of form "A op (B op' C)" or the commuted variant by |
| /// distributing op over op'. |
| static Value *expandCommutativeBinOp(Instruction::BinaryOps Opcode, |
| Value *L, Value *R, |
| Instruction::BinaryOps OpcodeToExpand, |
| const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| // Recursion is always used, so bail out at once if we already hit the limit. |
| if (!MaxRecurse--) |
| return nullptr; |
| |
| if (Value *V = expandBinOp(Opcode, L, R, OpcodeToExpand, Q, MaxRecurse)) |
| return V; |
| if (Value *V = expandBinOp(Opcode, R, L, OpcodeToExpand, Q, MaxRecurse)) |
| return V; |
| return nullptr; |
| } |
| |
| /// Generic simplifications for associative binary operations. |
| /// Returns the simpler value, or null if none was found. |
| static Value *SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode, |
| Value *LHS, Value *RHS, |
| const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| assert(Instruction::isAssociative(Opcode) && "Not an associative operation!"); |
| |
| // Recursion is always used, so bail out at once if we already hit the limit. |
| if (!MaxRecurse--) |
| return nullptr; |
| |
| BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); |
| BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); |
| |
| // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely. |
| if (Op0 && Op0->getOpcode() == Opcode) { |
| Value *A = Op0->getOperand(0); |
| Value *B = Op0->getOperand(1); |
| Value *C = RHS; |
| |
| // Does "B op C" simplify? |
| if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) { |
| // It does! Return "A op V" if it simplifies or is already available. |
| // If V equals B then "A op V" is just the LHS. |
| if (V == B) return LHS; |
| // Otherwise return "A op V" if it simplifies. |
| if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) { |
| ++NumReassoc; |
| return W; |
| } |
| } |
| } |
| |
| // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely. |
| if (Op1 && Op1->getOpcode() == Opcode) { |
| Value *A = LHS; |
| Value *B = Op1->getOperand(0); |
| Value *C = Op1->getOperand(1); |
| |
| // Does "A op B" simplify? |
| if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) { |
| // It does! Return "V op C" if it simplifies or is already available. |
| // If V equals B then "V op C" is just the RHS. |
| if (V == B) return RHS; |
| // Otherwise return "V op C" if it simplifies. |
| if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) { |
| ++NumReassoc; |
| return W; |
| } |
| } |
| } |
| |
| // The remaining transforms require commutativity as well as associativity. |
| if (!Instruction::isCommutative(Opcode)) |
| return nullptr; |
| |
| // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely. |
| if (Op0 && Op0->getOpcode() == Opcode) { |
| Value *A = Op0->getOperand(0); |
| Value *B = Op0->getOperand(1); |
| Value *C = RHS; |
| |
| // Does "C op A" simplify? |
| if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { |
| // It does! Return "V op B" if it simplifies or is already available. |
| // If V equals A then "V op B" is just the LHS. |
| if (V == A) return LHS; |
| // Otherwise return "V op B" if it simplifies. |
| if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) { |
| ++NumReassoc; |
| return W; |
| } |
| } |
| } |
| |
| // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely. |
| if (Op1 && Op1->getOpcode() == Opcode) { |
| Value *A = LHS; |
| Value *B = Op1->getOperand(0); |
| Value *C = Op1->getOperand(1); |
| |
| // Does "C op A" simplify? |
| if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { |
| // It does! Return "B op V" if it simplifies or is already available. |
| // If V equals C then "B op V" is just the RHS. |
| if (V == C) return RHS; |
| // Otherwise return "B op V" if it simplifies. |
| if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) { |
| ++NumReassoc; |
| return W; |
| } |
| } |
| } |
| |
| return nullptr; |
| } |
| |
| /// In the case of a binary operation with a select instruction as an operand, |
| /// try to simplify the binop by seeing whether evaluating it on both branches |
| /// of the select results in the same value. Returns the common value if so, |
| /// otherwise returns null. |
| static Value *ThreadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS, |
| Value *RHS, const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| // Recursion is always used, so bail out at once if we already hit the limit. |
| if (!MaxRecurse--) |
| return nullptr; |
| |
| SelectInst *SI; |
| if (isa<SelectInst>(LHS)) { |
| SI = cast<SelectInst>(LHS); |
| } else { |
| assert(isa<SelectInst>(RHS) && "No select instruction operand!"); |
| SI = cast<SelectInst>(RHS); |
| } |
| |
| // Evaluate the BinOp on the true and false branches of the select. |
| Value *TV; |
| Value *FV; |
| if (SI == LHS) { |
| TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse); |
| FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse); |
| } else { |
| TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse); |
| FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse); |
| } |
| |
| // If they simplified to the same value, then return the common value. |
| // If they both failed to simplify then return null. |
| if (TV == FV) |
| return TV; |
| |
| // If one branch simplified to undef, return the other one. |
| if (TV && Q.isUndefValue(TV)) |
| return FV; |
| if (FV && Q.isUndefValue(FV)) |
| return TV; |
| |
| // If applying the operation did not change the true and false select values, |
| // then the result of the binop is the select itself. |
| if (TV == SI->getTrueValue() && FV == SI->getFalseValue()) |
| return SI; |
| |
| // If one branch simplified and the other did not, and the simplified |
| // value is equal to the unsimplified one, return the simplified value. |
| // For example, select (cond, X, X & Z) & Z -> X & Z. |
| if ((FV && !TV) || (TV && !FV)) { |
| // Check that the simplified value has the form "X op Y" where "op" is the |
| // same as the original operation. |
| Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV); |
| if (Simplified && Simplified->getOpcode() == unsigned(Opcode)) { |
| // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS". |
| // We already know that "op" is the same as for the simplified value. See |
| // if the operands match too. If so, return the simplified value. |
| Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue(); |
| Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS; |
| Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch; |
| if (Simplified->getOperand(0) == UnsimplifiedLHS && |
| Simplified->getOperand(1) == UnsimplifiedRHS) |
| return Simplified; |
| if (Simplified->isCommutative() && |
| Simplified->getOperand(1) == UnsimplifiedLHS && |
| Simplified->getOperand(0) == UnsimplifiedRHS) |
| return Simplified; |
| } |
| } |
| |
| return nullptr; |
| } |
| |
| /// In the case of a comparison with a select instruction, try to simplify the |
| /// comparison by seeing whether both branches of the select result in the same |
| /// value. Returns the common value if so, otherwise returns null. |
| /// For example, if we have: |
| /// %tmp = select i1 %cmp, i32 1, i32 2 |
| /// %cmp1 = icmp sle i32 %tmp, 3 |
| /// We can simplify %cmp1 to true, because both branches of select are |
| /// less than 3. We compose new comparison by substituting %tmp with both |
| /// branches of select and see if it can be simplified. |
| static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS, |
| Value *RHS, const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| // Recursion is always used, so bail out at once if we already hit the limit. |
| if (!MaxRecurse--) |
| return nullptr; |
| |
| // Make sure the select is on the LHS. |
| if (!isa<SelectInst>(LHS)) { |
| std::swap(LHS, RHS); |
| Pred = CmpInst::getSwappedPredicate(Pred); |
| } |
| assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!"); |
| SelectInst *SI = cast<SelectInst>(LHS); |
| Value *Cond = SI->getCondition(); |
| Value *TV = SI->getTrueValue(); |
| Value *FV = SI->getFalseValue(); |
| |
| // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it. |
| // Does "cmp TV, RHS" simplify? |
| Value *TCmp = simplifyCmpSelTrueCase(Pred, TV, RHS, Cond, Q, MaxRecurse); |
| if (!TCmp) |
| return nullptr; |
| |
| // Does "cmp FV, RHS" simplify? |
| Value *FCmp = simplifyCmpSelFalseCase(Pred, FV, RHS, Cond, Q, MaxRecurse); |
| if (!FCmp) |
| return nullptr; |
| |
| // If both sides simplified to the same value, then use it as the result of |
| // the original comparison. |
| if (TCmp == FCmp) |
| return TCmp; |
| |
| // The remaining cases only make sense if the select condition has the same |
| // type as the result of the comparison, so bail out if this is not so. |
| if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy()) |
| return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse); |
| |
| return nullptr; |
| } |
| |
| /// In the case of a binary operation with an operand that is a PHI instruction, |
| /// try to simplify the binop by seeing whether evaluating it on the incoming |
| /// phi values yields the same result for every value. If so returns the common |
| /// value, otherwise returns null. |
| static Value *ThreadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS, |
| Value *RHS, const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| // Recursion is always used, so bail out at once if we already hit the limit. |
| if (!MaxRecurse--) |
| return nullptr; |
| |
| PHINode *PI; |
| if (isa<PHINode>(LHS)) { |
| PI = cast<PHINode>(LHS); |
| // Bail out if RHS and the phi may be mutually interdependent due to a loop. |
| if (!valueDominatesPHI(RHS, PI, Q.DT)) |
| return nullptr; |
| } else { |
| assert(isa<PHINode>(RHS) && "No PHI instruction operand!"); |
| PI = cast<PHINode>(RHS); |
| // Bail out if LHS and the phi may be mutually interdependent due to a loop. |
| if (!valueDominatesPHI(LHS, PI, Q.DT)) |
| return nullptr; |
| } |
| |
| // Evaluate the BinOp on the incoming phi values. |
| Value *CommonValue = nullptr; |
| for (Value *Incoming : PI->incoming_values()) { |
| // If the incoming value is the phi node itself, it can safely be skipped. |
| if (Incoming == PI) continue; |
| Value *V = PI == LHS ? |
| SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) : |
| SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse); |
| // If the operation failed to simplify, or simplified to a different value |
| // to previously, then give up. |
| if (!V || (CommonValue && V != CommonValue)) |
| return nullptr; |
| CommonValue = V; |
| } |
| |
| return CommonValue; |
| } |
| |
| /// In the case of a comparison with a PHI instruction, try to simplify the |
| /// comparison by seeing whether comparing with all of the incoming phi values |
| /// yields the same result every time. If so returns the common result, |
| /// otherwise returns null. |
| static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS, |
| const SimplifyQuery &Q, unsigned MaxRecurse) { |
| // Recursion is always used, so bail out at once if we already hit the limit. |
| if (!MaxRecurse--) |
| return nullptr; |
| |
| // Make sure the phi is on the LHS. |
| if (!isa<PHINode>(LHS)) { |
| std::swap(LHS, RHS); |
| Pred = CmpInst::getSwappedPredicate(Pred); |
| } |
| assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!"); |
| PHINode *PI = cast<PHINode>(LHS); |
| |
| // Bail out if RHS and the phi may be mutually interdependent due to a loop. |
| if (!valueDominatesPHI(RHS, PI, Q.DT)) |
| return nullptr; |
| |
| // Evaluate the BinOp on the incoming phi values. |
| Value *CommonValue = nullptr; |
| for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) { |
| Value *Incoming = PI->getIncomingValue(u); |
| Instruction *InTI = PI->getIncomingBlock(u)->getTerminator(); |
| // If the incoming value is the phi node itself, it can safely be skipped. |
| if (Incoming == PI) continue; |
| // Change the context instruction to the "edge" that flows into the phi. |
| // This is important because that is where incoming is actually "evaluated" |
| // even though it is used later somewhere else. |
| Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q.getWithInstruction(InTI), |
| MaxRecurse); |
| // If the operation failed to simplify, or simplified to a different value |
| // to previously, then give up. |
| if (!V || (CommonValue && V != CommonValue)) |
| return nullptr; |
| CommonValue = V; |
| } |
| |
| return CommonValue; |
| } |
| |
| static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode, |
| Value *&Op0, Value *&Op1, |
| const SimplifyQuery &Q) { |
| if (auto *CLHS = dyn_cast<Constant>(Op0)) { |
| if (auto *CRHS = dyn_cast<Constant>(Op1)) |
| return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL); |
| |
| // Canonicalize the constant to the RHS if this is a commutative operation. |
| if (Instruction::isCommutative(Opcode)) |
| std::swap(Op0, Op1); |
| } |
| return nullptr; |
| } |
| |
| /// Given operands for an Add, see if we can fold the result. |
| /// If not, this returns null. |
| static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, |
| const SimplifyQuery &Q, unsigned MaxRecurse) { |
| if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q)) |
| return C; |
| |
| // X + undef -> undef |
| if (Q.isUndefValue(Op1)) |
| return Op1; |
| |
| // X + 0 -> X |
| if (match(Op1, m_Zero())) |
| return Op0; |
| |
| // If two operands are negative, return 0. |
| if (isKnownNegation(Op0, Op1)) |
| return Constant::getNullValue(Op0->getType()); |
| |
| // X + (Y - X) -> Y |
| // (Y - X) + X -> Y |
| // Eg: X + -X -> 0 |
| Value *Y = nullptr; |
| if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) || |
| match(Op0, m_Sub(m_Value(Y), m_Specific(Op1)))) |
| return Y; |
| |
| // X + ~X -> -1 since ~X = -X-1 |
| Type *Ty = Op0->getType(); |
| if (match(Op0, m_Not(m_Specific(Op1))) || |
| match(Op1, m_Not(m_Specific(Op0)))) |
| return Constant::getAllOnesValue(Ty); |
| |
| // add nsw/nuw (xor Y, signmask), signmask --> Y |
| // The no-wrapping add guarantees that the top bit will be set by the add. |
| // Therefore, the xor must be clearing the already set sign bit of Y. |
| if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) && |
| match(Op0, m_Xor(m_Value(Y), m_SignMask()))) |
| return Y; |
| |
| // add nuw %x, -1 -> -1, because %x can only be 0. |
| if (IsNUW && match(Op1, m_AllOnes())) |
| return Op1; // Which is -1. |
| |
| /// i1 add -> xor. |
| if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1)) |
| if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1)) |
| return V; |
| |
| // Try some generic simplifications for associative operations. |
| if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q, |
| MaxRecurse)) |
| return V; |
| |
| // Threading Add over selects and phi nodes is pointless, so don't bother. |
| // Threading over the select in "A + select(cond, B, C)" means evaluating |
| // "A+B" and "A+C" and seeing if they are equal; but they are equal if and |
| // only if B and C are equal. If B and C are equal then (since we assume |
| // that operands have already been simplified) "select(cond, B, C)" should |
| // have been simplified to the common value of B and C already. Analysing |
| // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly |
| // for threading over phi nodes. |
| |
| return nullptr; |
| } |
| |
| Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, |
| const SimplifyQuery &Query) { |
| return ::SimplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit); |
| } |
| |
| /// Compute the base pointer and cumulative constant offsets for V. |
| /// |
| /// This strips all constant offsets off of V, leaving it the base pointer, and |
| /// accumulates the total constant offset applied in the returned constant. It |
| /// returns 0 if V is not a pointer, and returns the constant '0' if there are |
| /// no constant offsets applied. |
| /// |
| /// This is very similar to GetPointerBaseWithConstantOffset except it doesn't |
| /// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc. |
| /// folding. |
| static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, |
| bool AllowNonInbounds = false) { |
| assert(V->getType()->isPtrOrPtrVectorTy()); |
| |
| APInt Offset = APInt::getZero(DL.getIndexTypeSizeInBits(V->getType())); |
| |
| V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds); |
| // As that strip may trace through `addrspacecast`, need to sext or trunc |
| // the offset calculated. |
| Type *IntIdxTy = DL.getIndexType(V->getType())->getScalarType(); |
| Offset = Offset.sextOrTrunc(IntIdxTy->getIntegerBitWidth()); |
| |
| Constant *OffsetIntPtr = ConstantInt::get(IntIdxTy, Offset); |
| if (VectorType *VecTy = dyn_cast<VectorType>(V->getType())) |
| return ConstantVector::getSplat(VecTy->getElementCount(), OffsetIntPtr); |
| return OffsetIntPtr; |
| } |
| |
| /// Compute the constant difference between two pointer values. |
| /// If the difference is not a constant, returns zero. |
| static Constant *computePointerDifference(const DataLayout &DL, Value *LHS, |
| Value *RHS) { |
| Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS); |
| Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS); |
| |
| // If LHS and RHS are not related via constant offsets to the same base |
| // value, there is nothing we can do here. |
| if (LHS != RHS) |
| return nullptr; |
| |
| // Otherwise, the difference of LHS - RHS can be computed as: |
| // LHS - RHS |
| // = (LHSOffset + Base) - (RHSOffset + Base) |
| // = LHSOffset - RHSOffset |
| return ConstantExpr::getSub(LHSOffset, RHSOffset); |
| } |
| |
| /// Given operands for a Sub, see if we can fold the result. |
| /// If not, this returns null. |
| static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, |
| const SimplifyQuery &Q, unsigned MaxRecurse) { |
| if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q)) |
| return C; |
| |
| // X - poison -> poison |
| // poison - X -> poison |
| if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1)) |
| return PoisonValue::get(Op0->getType()); |
| |
| // X - undef -> undef |
| // undef - X -> undef |
| if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1)) |
| return UndefValue::get(Op0->getType()); |
| |
| // X - 0 -> X |
| if (match(Op1, m_Zero())) |
| return Op0; |
| |
| // X - X -> 0 |
| if (Op0 == Op1) |
| return Constant::getNullValue(Op0->getType()); |
| |
| // Is this a negation? |
| if (match(Op0, m_Zero())) { |
| // 0 - X -> 0 if the sub is NUW. |
| if (isNUW) |
| return Constant::getNullValue(Op0->getType()); |
| |
| KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); |
| if (Known.Zero.isMaxSignedValue()) { |
| // Op1 is either 0 or the minimum signed value. If the sub is NSW, then |
| // Op1 must be 0 because negating the minimum signed value is undefined. |
| if (isNSW) |
| return Constant::getNullValue(Op0->getType()); |
| |
| // 0 - X -> X if X is 0 or the minimum signed value. |
| return Op1; |
| } |
| } |
| |
| // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies. |
| // For example, (X + Y) - Y -> X; (Y + X) - Y -> X |
| Value *X = nullptr, *Y = nullptr, *Z = Op1; |
| if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z |
| // See if "V === Y - Z" simplifies. |
| if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1)) |
| // It does! Now see if "X + V" simplifies. |
| if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) { |
| // It does, we successfully reassociated! |
| ++NumReassoc; |
| return W; |
| } |
| // See if "V === X - Z" simplifies. |
| if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1)) |
| // It does! Now see if "Y + V" simplifies. |
| if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) { |
| // It does, we successfully reassociated! |
| ++NumReassoc; |
| return W; |
| } |
| } |
| |
| // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies. |
| // For example, X - (X + 1) -> -1 |
| X = Op0; |
| if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z) |
| // See if "V === X - Y" simplifies. |
| if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1)) |
| // It does! Now see if "V - Z" simplifies. |
| if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) { |
| // It does, we successfully reassociated! |
| ++NumReassoc; |
| return W; |
| } |
| // See if "V === X - Z" simplifies. |
| if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1)) |
| // It does! Now see if "V - Y" simplifies. |
| if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) { |
| // It does, we successfully reassociated! |
| ++NumReassoc; |
| return W; |
| } |
| } |
| |
| // Z - (X - Y) -> (Z - X) + Y if everything simplifies. |
| // For example, X - (X - Y) -> Y. |
| Z = Op0; |
| if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y) |
| // See if "V === Z - X" simplifies. |
| if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1)) |
| // It does! Now see if "V + Y" simplifies. |
| if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) { |
| // It does, we successfully reassociated! |
| ++NumReassoc; |
| return W; |
| } |
| |
| // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies. |
| if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) && |
| match(Op1, m_Trunc(m_Value(Y)))) |
| if (X->getType() == Y->getType()) |
| // See if "V === X - Y" simplifies. |
| if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1)) |
| // It does! Now see if "trunc V" simplifies. |
| if (Value *W = SimplifyCastInst(Instruction::Trunc, V, Op0->getType(), |
| Q, MaxRecurse - 1)) |
| // It does, return the simplified "trunc V". |
| return W; |
| |
| // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...). |
| if (match(Op0, m_PtrToInt(m_Value(X))) && |
| match(Op1, m_PtrToInt(m_Value(Y)))) |
| if (Constant *Result = computePointerDifference(Q.DL, X, Y)) |
| return ConstantExpr::getIntegerCast(Result, Op0->getType(), true); |
| |
| // i1 sub -> xor. |
| if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1)) |
| if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1)) |
| return V; |
| |
| // Threading Sub over selects and phi nodes is pointless, so don't bother. |
| // Threading over the select in "A - select(cond, B, C)" means evaluating |
| // "A-B" and "A-C" and seeing if they are equal; but they are equal if and |
| // only if B and C are equal. If B and C are equal then (since we assume |
| // that operands have already been simplified) "select(cond, B, C)" should |
| // have been simplified to the common value of B and C already. Analysing |
| // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly |
| // for threading over phi nodes. |
| |
| return nullptr; |
| } |
| |
| Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, |
| const SimplifyQuery &Q) { |
| return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit); |
| } |
| |
| /// Given operands for a Mul, see if we can fold the result. |
| /// If not, this returns null. |
| static Value *SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q)) |
| return C; |
| |
| // X * poison -> poison |
| if (isa<PoisonValue>(Op1)) |
| return Op1; |
| |
| // X * undef -> 0 |
| // X * 0 -> 0 |
| if (Q.isUndefValue(Op1) || match(Op1, m_Zero())) |
| return Constant::getNullValue(Op0->getType()); |
| |
| // X * 1 -> X |
| if (match(Op1, m_One())) |
| return Op0; |
| |
| // (X / Y) * Y -> X if the division is exact. |
| Value *X = nullptr; |
| if (Q.IIQ.UseInstrInfo && |
| (match(Op0, |
| m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y |
| match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y) |
| return X; |
| |
| // i1 mul -> and. |
| if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1)) |
| if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1)) |
| return V; |
| |
| // Try some generic simplifications for associative operations. |
| if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q, |
| MaxRecurse)) |
| return V; |
| |
| // Mul distributes over Add. Try some generic simplifications based on this. |
| if (Value *V = expandCommutativeBinOp(Instruction::Mul, Op0, Op1, |
| Instruction::Add, Q, MaxRecurse)) |
| return V; |
| |
| // If the operation is with the result of a select instruction, check whether |
| // operating on either branch of the select always yields the same value. |
| if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) |
| if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q, |
| MaxRecurse)) |
| return V; |
| |
| // If the operation is with the result of a phi instruction, check whether |
| // operating on all incoming values of the phi always yields the same value. |
| if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) |
| if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q, |
| MaxRecurse)) |
| return V; |
| |
| return nullptr; |
| } |
| |
| Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { |
| return ::SimplifyMulInst(Op0, Op1, Q, RecursionLimit); |
| } |
| |
| /// Check for common or similar folds of integer division or integer remainder. |
| /// This applies to all 4 opcodes (sdiv/udiv/srem/urem). |
| static Value *simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0, |
| Value *Op1, const SimplifyQuery &Q) { |
| bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv); |
| bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem); |
| |
| Type *Ty = Op0->getType(); |
| |
| // X / undef -> poison |
| // X % undef -> poison |
| if (Q.isUndefValue(Op1)) |
| return PoisonValue::get(Ty); |
| |
| // X / 0 -> poison |
| // X % 0 -> poison |
| // We don't need to preserve faults! |
| if (match(Op1, m_Zero())) |
| return PoisonValue::get(Ty); |
| |
| // If any element of a constant divisor fixed width vector is zero or undef |
| // the behavior is undefined and we can fold the whole op to poison. |
| auto *Op1C = dyn_cast<Constant>(Op1); |
| auto *VTy = dyn_cast<FixedVectorType>(Ty); |
| if (Op1C && VTy) { |
| unsigned NumElts = VTy->getNumElements(); |
| for (unsigned i = 0; i != NumElts; ++i) { |
| Constant *Elt = Op1C->getAggregateElement(i); |
| if (Elt && (Elt->isNullValue() || Q.isUndefValue(Elt))) |
| return PoisonValue::get(Ty); |
| } |
| } |
| |
| // poison / X -> poison |
| // poison % X -> poison |
| if (isa<PoisonValue>(Op0)) |
| return Op0; |
| |
| // undef / X -> 0 |
| // undef % X -> 0 |
| if (Q.isUndefValue(Op0)) |
| return Constant::getNullValue(Ty); |
| |
| // 0 / X -> 0 |
| // 0 % X -> 0 |
| if (match(Op0, m_Zero())) |
| return Constant::getNullValue(Op0->getType()); |
| |
| // X / X -> 1 |
| // X % X -> 0 |
| if (Op0 == Op1) |
| return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty); |
| |
| // X / 1 -> X |
| // X % 1 -> 0 |
| // If this is a boolean op (single-bit element type), we can't have |
| // division-by-zero or remainder-by-zero, so assume the divisor is 1. |
| // Similarly, if we're zero-extending a boolean divisor, then assume it's a 1. |
| Value *X; |
| if (match(Op1, m_One()) || Ty->isIntOrIntVectorTy(1) || |
| (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))) |
| return IsDiv ? Op0 : Constant::getNullValue(Ty); |
| |
| // If X * Y does not overflow, then: |
| // X * Y / Y -> X |
| // X * Y % Y -> 0 |
| if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) { |
| auto *Mul = cast<OverflowingBinaryOperator>(Op0); |
| // The multiplication can't overflow if it is defined not to, or if |
| // X == A / Y for some A. |
| if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) || |
| (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)) || |
| (IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) || |
| (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1))))) { |
| return IsDiv ? X : Constant::getNullValue(Op0->getType()); |
| } |
| } |
| |
| return nullptr; |
| } |
| |
| /// Given a predicate and two operands, return true if the comparison is true. |
| /// This is a helper for div/rem simplification where we return some other value |
| /// when we can prove a relationship between the operands. |
| static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS, |
| const SimplifyQuery &Q, unsigned MaxRecurse) { |
| Value *V = SimplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse); |
| Constant *C = dyn_cast_or_null<Constant>(V); |
| return (C && C->isAllOnesValue()); |
| } |
| |
| /// Return true if we can simplify X / Y to 0. Remainder can adapt that answer |
| /// to simplify X % Y to X. |
| static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q, |
| unsigned MaxRecurse, bool IsSigned) { |
| // Recursion is always used, so bail out at once if we already hit the limit. |
| if (!MaxRecurse--) |
| return false; |
| |
| if (IsSigned) { |
| // |X| / |Y| --> 0 |
| // |
| // We require that 1 operand is a simple constant. That could be extended to |
| // 2 variables if we computed the sign bit for each. |
| // |
| // Make sure that a constant is not the minimum signed value because taking |
| // the abs() of that is undefined. |
| Type *Ty = X->getType(); |
| const APInt *C; |
| if (match(X, m_APInt(C)) && !C->isMinSignedValue()) { |
| // Is the variable divisor magnitude always greater than the constant |
| // dividend magnitude? |
| // |Y| > |C| --> Y < -abs(C) or Y > abs(C) |
| Constant *PosDividendC = ConstantInt::get(Ty, C->abs()); |
| Constant *NegDividendC = ConstantInt::get(Ty, -C->abs()); |
| if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) || |
| isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse)) |
| return true; |
| } |
| if (match(Y, m_APInt(C))) { |
| // Special-case: we can't take the abs() of a minimum signed value. If |
| // that's the divisor, then all we have to do is prove that the dividend |
| // is also not the minimum signed value. |
| if (C->isMinSignedValue()) |
| return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse); |
| |
| // Is the variable dividend magnitude always less than the constant |
| // divisor magnitude? |
| // |X| < |C| --> X > -abs(C) and X < abs(C) |
| Constant *PosDivisorC = ConstantInt::get(Ty, C->abs()); |
| Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs()); |
| if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) && |
| isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse)) |
| return true; |
| } |
| return false; |
| } |
| |
| // IsSigned == false. |
| // Is the dividend unsigned less than the divisor? |
| return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse); |
| } |
| |
| /// These are simplifications common to SDiv and UDiv. |
| static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, |
| const SimplifyQuery &Q, unsigned MaxRecurse) { |
| if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) |
| return C; |
| |
| if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q)) |
| return V; |
| |
| bool IsSigned = Opcode == Instruction::SDiv; |
| |
| // (X rem Y) / Y -> 0 |
| if ((IsSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) || |
| (!IsSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1))))) |
| return Constant::getNullValue(Op0->getType()); |
| |
| // (X /u C1) /u C2 -> 0 if C1 * C2 overflow |
| ConstantInt *C1, *C2; |
| if (!IsSigned && match(Op0, m_UDiv(m_Value(), m_ConstantInt(C1))) && |
| match(Op1, m_ConstantInt(C2))) { |
| bool Overflow; |
| (void)C1->getValue().umul_ov(C2->getValue(), Overflow); |
| if (Overflow) |
| return Constant::getNullValue(Op0->getType()); |
| } |
| |
| // If the operation is with the result of a select instruction, check whether |
| // operating on either branch of the select always yields the same value. |
| if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) |
| if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) |
| return V; |
| |
| // If the operation is with the result of a phi instruction, check whether |
| // operating on all incoming values of the phi always yields the same value. |
| if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) |
| if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) |
| return V; |
| |
| if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned)) |
| return Constant::getNullValue(Op0->getType()); |
| |
| return nullptr; |
| } |
| |
| /// These are simplifications common to SRem and URem. |
| static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, |
| const SimplifyQuery &Q, unsigned MaxRecurse) { |
| if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) |
| return C; |
| |
| if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q)) |
| return V; |
| |
| // (X % Y) % Y -> X % Y |
| if ((Opcode == Instruction::SRem && |
| match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) || |
| (Opcode == Instruction::URem && |
| match(Op0, m_URem(m_Value(), m_Specific(Op1))))) |
| return Op0; |
| |
| // (X << Y) % X -> 0 |
| if (Q.IIQ.UseInstrInfo && |
| ((Opcode == Instruction::SRem && |
| match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) || |
| (Opcode == Instruction::URem && |
| match(Op0, m_NUWShl(m_Specific(Op1), m_Value()))))) |
| return Constant::getNullValue(Op0->getType()); |
| |
| // If the operation is with the result of a select instruction, check whether |
| // operating on either branch of the select always yields the same value. |
| if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) |
| if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) |
| return V; |
| |
| // If the operation is with the result of a phi instruction, check whether |
| // operating on all incoming values of the phi always yields the same value. |
| if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) |
| if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) |
| return V; |
| |
| // If X / Y == 0, then X % Y == X. |
| if (isDivZero(Op0, Op1, Q, MaxRecurse, Opcode == Instruction::SRem)) |
| return Op0; |
| |
| return nullptr; |
| } |
| |
| /// Given operands for an SDiv, see if we can fold the result. |
| /// If not, this returns null. |
| static Value *SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| // If two operands are negated and no signed overflow, return -1. |
| if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true)) |
| return Constant::getAllOnesValue(Op0->getType()); |
| |
| return simplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse); |
| } |
| |
| Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { |
| return ::SimplifySDivInst(Op0, Op1, Q, RecursionLimit); |
| } |
| |
| /// Given operands for a UDiv, see if we can fold the result. |
| /// If not, this returns null. |
| static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| return simplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse); |
| } |
| |
| Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { |
| return ::SimplifyUDivInst(Op0, Op1, Q, RecursionLimit); |
| } |
| |
| /// Given operands for an SRem, see if we can fold the result. |
| /// If not, this returns null. |
| static Value *SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| // If the divisor is 0, the result is undefined, so assume the divisor is -1. |
| // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0 |
| Value *X; |
| if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) |
| return ConstantInt::getNullValue(Op0->getType()); |
| |
| // If the two operands are negated, return 0. |
| if (isKnownNegation(Op0, Op1)) |
| return ConstantInt::getNullValue(Op0->getType()); |
| |
| return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse); |
| } |
| |
| Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { |
| return ::SimplifySRemInst(Op0, Op1, Q, RecursionLimit); |
| } |
| |
| /// Given operands for a URem, see if we can fold the result. |
| /// If not, this returns null. |
| static Value *SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse); |
| } |
| |
| Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { |
| return ::SimplifyURemInst(Op0, Op1, Q, RecursionLimit); |
| } |
| |
| /// Returns true if a shift by \c Amount always yields poison. |
| static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q) { |
| Constant *C = dyn_cast<Constant>(Amount); |
| if (!C) |
| return false; |
| |
| // X shift by undef -> poison because it may shift by the bitwidth. |
| if (Q.isUndefValue(C)) |
| return true; |
| |
| // Shifting by the bitwidth or more is undefined. |
| if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) |
| if (CI->getValue().uge(CI->getType()->getScalarSizeInBits())) |
| return true; |
| |
| // If all lanes of a vector shift are undefined the whole shift is. |
| if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) { |
| for (unsigned I = 0, |
| E = cast<FixedVectorType>(C->getType())->getNumElements(); |
| I != E; ++I) |
| if (!isPoisonShift(C->getAggregateElement(I), Q)) |
| return false; |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /// Given operands for an Shl, LShr or AShr, see if we can fold the result. |
| /// If not, this returns null. |
| static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0, |
| Value *Op1, bool IsNSW, const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) |
| return C; |
| |
| // poison shift by X -> poison |
| if (isa<PoisonValue>(Op0)) |
| return Op0; |
| |
| // 0 shift by X -> 0 |
| if (match(Op0, m_Zero())) |
| return Constant::getNullValue(Op0->getType()); |
| |
| // X shift by 0 -> X |
| // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones |
| // would be poison. |
| Value *X; |
| if (match(Op1, m_Zero()) || |
| (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))) |
| return Op0; |
| |
| // Fold undefined shifts. |
| if (isPoisonShift(Op1, Q)) |
| return PoisonValue::get(Op0->getType()); |
| |
| // If the operation is with the result of a select instruction, check whether |
| // operating on either branch of the select always yields the same value. |
| if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) |
| if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) |
| return V; |
| |
| // If the operation is with the result of a phi instruction, check whether |
| // operating on all incoming values of the phi always yields the same value. |
| if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) |
| if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) |
| return V; |
| |
| // If any bits in the shift amount make that value greater than or equal to |
| // the number of bits in the type, the shift is undefined. |
| KnownBits KnownAmt = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); |
| if (KnownAmt.getMinValue().uge(KnownAmt.getBitWidth())) |
| return PoisonValue::get(Op0->getType()); |
| |
| // If all valid bits in the shift amount are known zero, the first operand is |
| // unchanged. |
| unsigned NumValidShiftBits = Log2_32_Ceil(KnownAmt.getBitWidth()); |
| if (KnownAmt.countMinTrailingZeros() >= NumValidShiftBits) |
| return Op0; |
| |
| // Check for nsw shl leading to a poison value. |
| if (IsNSW) { |
| assert(Opcode == Instruction::Shl && "Expected shl for nsw instruction"); |
| KnownBits KnownVal = computeKnownBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); |
| KnownBits KnownShl = KnownBits::shl(KnownVal, KnownAmt); |
| |
| if (KnownVal.Zero.isSignBitSet()) |
| KnownShl.Zero.setSignBit(); |
| if (KnownVal.One.isSignBitSet()) |
| KnownShl.One.setSignBit(); |
| |
| if (KnownShl.hasConflict()) |
| return PoisonValue::get(Op0->getType()); |
| } |
| |
| return nullptr; |
| } |
| |
| /// Given operands for an Shl, LShr or AShr, see if we can |
| /// fold the result. If not, this returns null. |
| static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0, |
| Value *Op1, bool isExact, const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| if (Value *V = |
| SimplifyShift(Opcode, Op0, Op1, /*IsNSW*/ false, Q, MaxRecurse)) |
| return V; |
| |
| // X >> X -> 0 |
| if (Op0 == Op1) |
| return Constant::getNullValue(Op0->getType()); |
| |
| // undef >> X -> 0 |
| // undef >> X -> undef (if it's exact) |
| if (Q.isUndefValue(Op0)) |
| return isExact ? Op0 : Constant::getNullValue(Op0->getType()); |
| |
| // The low bit cannot be shifted out of an exact shift if it is set. |
| if (isExact) { |
| KnownBits Op0Known = computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT); |
| if (Op0Known.One[0]) |
| return Op0; |
| } |
| |
| return nullptr; |
| } |
| |
| /// Given operands for an Shl, see if we can fold the result. |
| /// If not, this returns null. |
| static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, |
| const SimplifyQuery &Q, unsigned MaxRecurse) { |
| if (Value *V = |
| SimplifyShift(Instruction::Shl, Op0, Op1, isNSW, Q, MaxRecurse)) |
| return V; |
| |
| // undef << X -> 0 |
| // undef << X -> undef if (if it's NSW/NUW) |
| if (Q.isUndefValue(Op0)) |
| return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType()); |
| |
| // (X >> A) << A -> X |
| Value *X; |
| if (Q.IIQ.UseInstrInfo && |
| match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1))))) |
| return X; |
| |
| // shl nuw i8 C, %x -> C iff C has sign bit set. |
| if (isNUW && match(Op0, m_Negative())) |
| return Op0; |
| // NOTE: could use computeKnownBits() / LazyValueInfo, |
| // but the cost-benefit analysis suggests it isn't worth it. |
| |
| return nullptr; |
| } |
| |
| Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, |
| const SimplifyQuery &Q) { |
| return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit); |
| } |
| |
| /// Given operands for an LShr, see if we can fold the result. |
| /// If not, this returns null. |
| static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, |
| const SimplifyQuery &Q, unsigned MaxRecurse) { |
| if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q, |
| MaxRecurse)) |
| return V; |
| |
| // (X << A) >> A -> X |
| Value *X; |
| if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1)))) |
| return X; |
| |
| // ((X << A) | Y) >> A -> X if effective width of Y is not larger than A. |
| // We can return X as we do in the above case since OR alters no bits in X. |
| // SimplifyDemandedBits in InstCombine can do more general optimization for |
| // bit manipulation. This pattern aims to provide opportunities for other |
| // optimizers by supporting a simple but common case in InstSimplify. |
| Value *Y; |
| const APInt *ShRAmt, *ShLAmt; |
| if (match(Op1, m_APInt(ShRAmt)) && |
| match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) && |
| *ShRAmt == *ShLAmt) { |
| const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); |
| const unsigned EffWidthY = YKnown.countMaxActiveBits(); |
| if (ShRAmt->uge(EffWidthY)) |
| return X; |
| } |
| |
| return nullptr; |
| } |
| |
| Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, |
| const SimplifyQuery &Q) { |
| return ::SimplifyLShrInst(Op0, Op1, isExact, Q, RecursionLimit); |
| } |
| |
| /// Given operands for an AShr, see if we can fold the result. |
| /// If not, this returns null. |
| static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, |
| const SimplifyQuery &Q, unsigned MaxRecurse) { |
| if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q, |
| MaxRecurse)) |
| return V; |
| |
| // -1 >>a X --> -1 |
| // (-1 << X) a>> X --> -1 |
| // Do not return Op0 because it may contain undef elements if it's a vector. |
| if (match(Op0, m_AllOnes()) || |
| match(Op0, m_Shl(m_AllOnes(), m_Specific(Op1)))) |
| return Constant::getAllOnesValue(Op0->getType()); |
| |
| // (X << A) >> A -> X |
| Value *X; |
| if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1)))) |
| return X; |
| |
| // Arithmetic shifting an all-sign-bit value is a no-op. |
| unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); |
| if (NumSignBits == Op0->getType()->getScalarSizeInBits()) |
| return Op0; |
| |
| return nullptr; |
| } |
| |
| Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, |
| const SimplifyQuery &Q) { |
| return ::SimplifyAShrInst(Op0, Op1, isExact, Q, RecursionLimit); |
| } |
| |
| /// Commuted variants are assumed to be handled by calling this function again |
| /// with the parameters swapped. |
| static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, |
| ICmpInst *UnsignedICmp, bool IsAnd, |
| const SimplifyQuery &Q) { |
| Value *X, *Y; |
| |
| ICmpInst::Predicate EqPred; |
| if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) || |
| !ICmpInst::isEquality(EqPred)) |
| return nullptr; |
| |
| ICmpInst::Predicate UnsignedPred; |
| |
| Value *A, *B; |
| // Y = (A - B); |
| if (match(Y, m_Sub(m_Value(A), m_Value(B)))) { |
| if (match(UnsignedICmp, |
| m_c_ICmp(UnsignedPred, m_Specific(A), m_Specific(B))) && |
| ICmpInst::isUnsigned(UnsignedPred)) { |
| // A >=/<= B || (A - B) != 0 <--> true |
| if ((UnsignedPred == ICmpInst::ICMP_UGE || |
| UnsignedPred == ICmpInst::ICMP_ULE) && |
| EqPred == ICmpInst::ICMP_NE && !IsAnd) |
| return ConstantInt::getTrue(UnsignedICmp->getType()); |
| // A </> B && (A - B) == 0 <--> false |
| if ((UnsignedPred == ICmpInst::ICMP_ULT || |
| UnsignedPred == ICmpInst::ICMP_UGT) && |
| EqPred == ICmpInst::ICMP_EQ && IsAnd) |
| return ConstantInt::getFalse(UnsignedICmp->getType()); |
| |
| // A </> B && (A - B) != 0 <--> A </> B |
| // A </> B || (A - B) != 0 <--> (A - B) != 0 |
| if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT || |
| UnsignedPred == ICmpInst::ICMP_UGT)) |
| return IsAnd ? UnsignedICmp : ZeroICmp; |
| |
| // A <=/>= B && (A - B) == 0 <--> (A - B) == 0 |
| // A <=/>= B || (A - B) == 0 <--> A <=/>= B |
| if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE || |
| UnsignedPred == ICmpInst::ICMP_UGE)) |
| return IsAnd ? ZeroICmp : UnsignedICmp; |
| } |
| |
| // Given Y = (A - B) |
| // Y >= A && Y != 0 --> Y >= A iff B != 0 |
| // Y < A || Y == 0 --> Y < A iff B != 0 |
| if (match(UnsignedICmp, |
| m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) { |
| if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd && |
| EqPred == ICmpInst::ICMP_NE && |
| isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) |
| return UnsignedICmp; |
| if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd && |
| EqPred == ICmpInst::ICMP_EQ && |
| isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) |
| return UnsignedICmp; |
| } |
| } |
| |
| if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) && |
| ICmpInst::isUnsigned(UnsignedPred)) |
| ; |
| else if (match(UnsignedICmp, |
| m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) && |
| ICmpInst::isUnsigned(UnsignedPred)) |
| UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred); |
| else |
| return nullptr; |
| |
| // X > Y && Y == 0 --> Y == 0 iff X != 0 |
| // X > Y || Y == 0 --> X > Y iff X != 0 |
| if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ && |
| isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) |
| return IsAnd ? ZeroICmp : UnsignedICmp; |
| |
| // X <= Y && Y != 0 --> X <= Y iff X != 0 |
| // X <= Y || Y != 0 --> Y != 0 iff X != 0 |
| if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE && |
| isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) |
| return IsAnd ? UnsignedICmp : ZeroICmp; |
| |
| // The transforms below here are expected to be handled more generally with |
| // simplifyAndOrOfICmpsWithLimitConst() or in InstCombine's |
| // foldAndOrOfICmpsWithConstEq(). If we are looking to trim optimizer overlap, |
| // these are candidates for removal. |
| |
| // X < Y && Y != 0 --> X < Y |
| // X < Y || Y != 0 --> Y != 0 |
| if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE) |
| return IsAnd ? UnsignedICmp : ZeroICmp; |
| |
| // X >= Y && Y == 0 --> Y == 0 |
| // X >= Y || Y == 0 --> X >= Y |
| if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ) |
| return IsAnd ? ZeroICmp : UnsignedICmp; |
| |
| // X < Y && Y == 0 --> false |
| if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ && |
| IsAnd) |
| return getFalse(UnsignedICmp->getType()); |
| |
| // X >= Y || Y != 0 --> true |
| if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE && |
| !IsAnd) |
| return getTrue(UnsignedICmp->getType()); |
| |
| return nullptr; |
| } |
| |
| /// Commuted variants are assumed to be handled by calling this function again |
| /// with the parameters swapped. |
| static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) { |
| ICmpInst::Predicate Pred0, Pred1; |
| Value *A ,*B; |
| if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) || |
| !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B)))) |
| return nullptr; |
| |
| // We have (icmp Pred0, A, B) & (icmp Pred1, A, B). |
| // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we |
| // can eliminate Op1 from this 'and'. |
| if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1)) |
| return Op0; |
| |
| // Check for any combination of predicates that are guaranteed to be disjoint. |
| if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) || |
| (Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) || |
| (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT) || |
| (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)) |
| return getFalse(Op0->getType()); |
| |
| return nullptr; |
| } |
| |
| /// Commuted variants are assumed to be handled by calling this function again |
| /// with the parameters swapped. |
| static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) { |
| ICmpInst::Predicate Pred0, Pred1; |
| Value *A ,*B; |
| if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) || |
| !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B)))) |
| return nullptr; |
| |
| // We have (icmp Pred0, A, B) | (icmp Pred1, A, B). |
| // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we |
| // can eliminate Op0 from this 'or'. |
| if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1)) |
| return Op1; |
| |
| // Check for any combination of predicates that cover the entire range of |
| // possibilities. |
| if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) || |
| (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) || |
| (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) || |
| (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE)) |
| return getTrue(Op0->getType()); |
| |
| return nullptr; |
| } |
| |
| /// Test if a pair of compares with a shared operand and 2 constants has an |
| /// empty set intersection, full set union, or if one compare is a superset of |
| /// the other. |
| static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1, |
| bool IsAnd) { |
| // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)). |
| if (Cmp0->getOperand(0) != Cmp1->getOperand(0)) |
| return nullptr; |
| |
| const APInt *C0, *C1; |
| if (!match(Cmp0->getOperand(1), m_APInt(C0)) || |
| !match(Cmp1->getOperand(1), m_APInt(C1))) |
| return nullptr; |
| |
| auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0); |
| auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1); |
| |
| // For and-of-compares, check if the intersection is empty: |
| // (icmp X, C0) && (icmp X, C1) --> empty set --> false |
| if (IsAnd && Range0.intersectWith(Range1).isEmptySet()) |
| return getFalse(Cmp0->getType()); |
| |
| // For or-of-compares, check if the union is full: |
| // (icmp X, C0) || (icmp X, C1) --> full set --> true |
| if (!IsAnd && Range0.unionWith(Range1).isFullSet()) |
| return getTrue(Cmp0->getType()); |
| |
| // Is one range a superset of the other? |
| // If this is and-of-compares, take the smaller set: |
| // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42 |
| // If this is or-of-compares, take the larger set: |
| // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4 |
| if (Range0.contains(Range1)) |
| return IsAnd ? Cmp1 : Cmp0; |
| if (Range1.contains(Range0)) |
| return IsAnd ? Cmp0 : Cmp1; |
| |
| return nullptr; |
| } |
| |
| static Value *simplifyAndOrOfICmpsWithZero(ICmpInst *Cmp0, ICmpInst *Cmp1, |
| bool IsAnd) { |
| ICmpInst::Predicate P0 = Cmp0->getPredicate(), P1 = Cmp1->getPredicate(); |
| if (!match(Cmp0->getOperand(1), m_Zero()) || |
| !match(Cmp1->getOperand(1), m_Zero()) || P0 != P1) |
| return nullptr; |
| |
| if ((IsAnd && P0 != ICmpInst::ICMP_NE) || (!IsAnd && P1 != ICmpInst::ICMP_EQ)) |
| return nullptr; |
| |
| // We have either "(X == 0 || Y == 0)" or "(X != 0 && Y != 0)". |
| Value *X = Cmp0->getOperand(0); |
| Value *Y = Cmp1->getOperand(0); |
| |
| // If one of the compares is a masked version of a (not) null check, then |
| // that compare implies the other, so we eliminate the other. Optionally, look |
| // through a pointer-to-int cast to match a null check of a pointer type. |
| |
| // (X == 0) || (([ptrtoint] X & ?) == 0) --> ([ptrtoint] X & ?) == 0 |
| // (X == 0) || ((? & [ptrtoint] X) == 0) --> (? & [ptrtoint] X) == 0 |
| // (X != 0) && (([ptrtoint] X & ?) != 0) --> ([ptrtoint] X & ?) != 0 |
| // (X != 0) && ((? & [ptrtoint] X) != 0) --> (? & [ptrtoint] X) != 0 |
| if (match(Y, m_c_And(m_Specific(X), m_Value())) || |
| match(Y, m_c_And(m_PtrToInt(m_Specific(X)), m_Value()))) |
| return Cmp1; |
| |
| // (([ptrtoint] Y & ?) == 0) || (Y == 0) --> ([ptrtoint] Y & ?) == 0 |
| // ((? & [ptrtoint] Y) == 0) || (Y == 0) --> (? & [ptrtoint] Y) == 0 |
| // (([ptrtoint] Y & ?) != 0) && (Y != 0) --> ([ptrtoint] Y & ?) != 0 |
| // ((? & [ptrtoint] Y) != 0) && (Y != 0) --> (? & [ptrtoint] Y) != 0 |
| if (match(X, m_c_And(m_Specific(Y), m_Value())) || |
| match(X, m_c_And(m_PtrToInt(m_Specific(Y)), m_Value()))) |
| return Cmp0; |
| |
| return nullptr; |
| } |
| |
| static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, |
| const InstrInfoQuery &IIQ) { |
| // (icmp (add V, C0), C1) & (icmp V, C0) |
| ICmpInst::Predicate Pred0, Pred1; |
| const APInt *C0, *C1; |
| Value *V; |
| if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1)))) |
| return nullptr; |
| |
| if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value()))) |
| return nullptr; |
| |
| auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0)); |
| if (AddInst->getOperand(1) != Op1->getOperand(1)) |
| return nullptr; |
| |
| Type *ITy = Op0->getType(); |
| bool isNSW = IIQ.hasNoSignedWrap(AddInst); |
| bool isNUW = IIQ.hasNoUnsignedWrap(AddInst); |
| |
| const APInt Delta = *C1 - *C0; |
| if (C0->isStrictlyPositive()) { |
| if (Delta == 2) { |
| if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT) |
| return getFalse(ITy); |
| if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW) |
| return getFalse(ITy); |
| } |
| if (Delta == 1) { |
| if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT) |
| return getFalse(ITy); |
| if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW) |
| return getFalse(ITy); |
| } |
| } |
| if (C0->getBoolValue() && isNUW) { |
| if (Delta == 2) |
| if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT) |
| return getFalse(ITy); |
| if (Delta == 1) |
| if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT) |
| return getFalse(ITy); |
| } |
| |
| return nullptr; |
| } |
| |
| /// Try to eliminate compares with signed or unsigned min/max constants. |
| static Value *simplifyAndOrOfICmpsWithLimitConst(ICmpInst *Cmp0, ICmpInst *Cmp1, |
| bool IsAnd) { |
| // Canonicalize an equality compare as Cmp0. |
| if (Cmp1->isEquality()) |
| std::swap(Cmp0, Cmp1); |
| if (!Cmp0->isEquality()) |
| return nullptr; |
| |
| // The non-equality compare must include a common operand (X). Canonicalize |
| // the common operand as operand 0 (the predicate is swapped if the common |
| // operand was operand 1). |
| ICmpInst::Predicate Pred0 = Cmp0->getPredicate(); |
| Value *X = Cmp0->getOperand(0); |
| ICmpInst::Predicate Pred1; |
| bool HasNotOp = match(Cmp1, m_c_ICmp(Pred1, m_Not(m_Specific(X)), m_Value())); |
| if (!HasNotOp && !match(Cmp1, m_c_ICmp(Pred1, m_Specific(X), m_Value()))) |
| return nullptr; |
| if (ICmpInst::isEquality(Pred1)) |
| return nullptr; |
| |
| // The equality compare must be against a constant. Flip bits if we matched |
| // a bitwise not. Convert a null pointer constant to an integer zero value. |
| APInt MinMaxC; |
| const APInt *C; |
| if (match(Cmp0->getOperand(1), m_APInt(C))) |
| MinMaxC = HasNotOp ? ~*C : *C; |
| else if (isa<ConstantPointerNull>(Cmp0->getOperand(1))) |
| MinMaxC = APInt::getZero(8); |
| else |
| return nullptr; |
| |
| // DeMorganize if this is 'or': P0 || P1 --> !P0 && !P1. |
| if (!IsAnd) { |
| Pred0 = ICmpInst::getInversePredicate(Pred0); |
| Pred1 = ICmpInst::getInversePredicate(Pred1); |
| } |
| |
| // Normalize to unsigned compare and unsigned min/max value. |
| // Example for 8-bit: -128 + 128 -> 0; 127 + 128 -> 255 |
| if (ICmpInst::isSigned(Pred1)) { |
| Pred1 = ICmpInst::getUnsignedPredicate(Pred1); |
| MinMaxC += APInt::getSignedMinValue(MinMaxC.getBitWidth()); |
| } |
| |
| // (X != MAX) && (X < Y) --> X < Y |
| // (X == MAX) || (X >= Y) --> X >= Y |
| if (MinMaxC.isMaxValue()) |
| if (Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_ULT) |
| return Cmp1; |
| |
| // (X != MIN) && (X > Y) --> X > Y |
| // (X == MIN) || (X <= Y) --> X <= Y |
| if (MinMaxC.isMinValue()) |
| if (Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_UGT) |
| return Cmp1; |
| |
| return nullptr; |
| } |
| |
| static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1, |
| const SimplifyQuery &Q) { |
| if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q)) |
| return X; |
| if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q)) |
| return X; |
| |
| if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1)) |
| return X; |
| if (Value *X = simplifyAndOfICmpsWithSameOperands(Op1, Op0)) |
| return X; |
| |
| if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true)) |
| return X; |
| |
| if (Value *X = simplifyAndOrOfICmpsWithLimitConst(Op0, Op1, true)) |
| return X; |
| |
| if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, true)) |
| return X; |
| |
| if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ)) |
| return X; |
| if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ)) |
| return X; |
| |
| return nullptr; |
| } |
| |
| static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, |
| const InstrInfoQuery &IIQ) { |
| // (icmp (add V, C0), C1) | (icmp V, C0) |
| ICmpInst::Predicate Pred0, Pred1; |
| const APInt *C0, *C1; |
| Value *V; |
| if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1)))) |
| return nullptr; |
| |
| if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value()))) |
| return nullptr; |
| |
| auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0)); |
| if (AddInst->getOperand(1) != Op1->getOperand(1)) |
| return nullptr; |
| |
| Type *ITy = Op0->getType(); |
| bool isNSW = IIQ.hasNoSignedWrap(AddInst); |
| bool isNUW = IIQ.hasNoUnsignedWrap(AddInst); |
| |
| const APInt Delta = *C1 - *C0; |
| if (C0->isStrictlyPositive()) { |
| if (Delta == 2) { |
| if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE) |
| return getTrue(ITy); |
| if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW) |
| return getTrue(ITy); |
| } |
| if (Delta == 1) { |
| if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE) |
| return getTrue(ITy); |
| if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW) |
| return getTrue(ITy); |
| } |
| } |
| if (C0->getBoolValue() && isNUW) { |
| if (Delta == 2) |
| if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE) |
| return getTrue(ITy); |
| if (Delta == 1) |
| if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE) |
| return getTrue(ITy); |
| } |
| |
| return nullptr; |
| } |
| |
| static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1, |
| const SimplifyQuery &Q) { |
| if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q)) |
| return X; |
| if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q)) |
| return X; |
| |
| if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1)) |
| return X; |
| if (Value *X = simplifyOrOfICmpsWithSameOperands(Op1, Op0)) |
| return X; |
| |
| if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false)) |
| return X; |
| |
| if (Value *X = simplifyAndOrOfICmpsWithLimitConst(Op0, Op1, false)) |
| return X; |
| |
| if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, false)) |
| return X; |
| |
| if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ)) |
| return X; |
| if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ)) |
| return X; |
| |
| return nullptr; |
| } |
| |
| static Value *simplifyAndOrOfFCmps(const TargetLibraryInfo *TLI, |
| FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) { |
| Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1); |
| Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1); |
| if (LHS0->getType() != RHS0->getType()) |
| return nullptr; |
| |
| FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); |
| if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) || |
| (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) { |
| // (fcmp ord NNAN, X) & (fcmp ord X, Y) --> fcmp ord X, Y |
| // (fcmp ord NNAN, X) & (fcmp ord Y, X) --> fcmp ord Y, X |
| // (fcmp ord X, NNAN) & (fcmp ord X, Y) --> fcmp ord X, Y |
| // (fcmp ord X, NNAN) & (fcmp ord Y, X) --> fcmp ord Y, X |
| // (fcmp uno NNAN, X) | (fcmp uno X, Y) --> fcmp uno X, Y |
| // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X |
| // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y |
| // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X |
| if ((isKnownNeverNaN(LHS0, TLI) && (LHS1 == RHS0 || LHS1 == RHS1)) || |
| (isKnownNeverNaN(LHS1, TLI) && (LHS0 == RHS0 || LHS0 == RHS1))) |
| return RHS; |
| |
| // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y |
| // (fcmp ord Y, X) & (fcmp ord NNAN, X) --> fcmp ord Y, X |
| // (fcmp ord X, Y) & (fcmp ord X, NNAN) --> fcmp ord X, Y |
| // (fcmp ord Y, X) & (fcmp ord X, NNAN) --> fcmp ord Y, X |
| // (fcmp uno X, Y) | (fcmp uno NNAN, X) --> fcmp uno X, Y |
| // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X |
| // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y |
| // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X |
| if ((isKnownNeverNaN(RHS0, TLI) && (RHS1 == LHS0 || RHS1 == LHS1)) || |
| (isKnownNeverNaN(RHS1, TLI) && (RHS0 == LHS0 || RHS0 == LHS1))) |
| return LHS; |
| } |
| |
| return nullptr; |
| } |
| |
| static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q, |
| Value *Op0, Value *Op1, bool IsAnd) { |
| // Look through casts of the 'and' operands to find compares. |
| auto *Cast0 = dyn_cast<CastInst>(Op0); |
| auto *Cast1 = dyn_cast<CastInst>(Op1); |
| if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() && |
| Cast0->getSrcTy() == Cast1->getSrcTy()) { |
| Op0 = Cast0->getOperand(0); |
| Op1 = Cast1->getOperand(0); |
| } |
| |
| Value *V = nullptr; |
| auto *ICmp0 = dyn_cast<ICmpInst>(Op0); |
| auto *ICmp1 = dyn_cast<ICmpInst>(Op1); |
| if (ICmp0 && ICmp1) |
| V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q) |
| : simplifyOrOfICmps(ICmp0, ICmp1, Q); |
| |
| auto *FCmp0 = dyn_cast<FCmpInst>(Op0); |
| auto *FCmp1 = dyn_cast<FCmpInst>(Op1); |
| if (FCmp0 && FCmp1) |
| V = simplifyAndOrOfFCmps(Q.TLI, FCmp0, FCmp1, IsAnd); |
| |
| if (!V) |
| return nullptr; |
| if (!Cast0) |
| return V; |
| |
| // If we looked through casts, we can only handle a constant simplification |
| // because we are not allowed to create a cast instruction here. |
| if (auto *C = dyn_cast<Constant>(V)) |
| return ConstantExpr::getCast(Cast0->getOpcode(), C, Cast0->getType()); |
| |
| return nullptr; |
| } |
| |
| /// Given a bitwise logic op, check if the operands are add/sub with a common |
| /// source value and inverted constant (identity: C - X -> ~(X + ~C)). |
| static Value *simplifyLogicOfAddSub(Value *Op0, Value *Op1, |
| Instruction::BinaryOps Opcode) { |
| assert(Op0->getType() == Op1->getType() && "Mismatched binop types"); |
| assert(BinaryOperator::isBitwiseLogicOp(Opcode) && "Expected logic op"); |
| Value *X; |
| Constant *C1, *C2; |
| if ((match(Op0, m_Add(m_Value(X), m_Constant(C1))) && |
| match(Op1, m_Sub(m_Constant(C2), m_Specific(X)))) || |
| (match(Op1, m_Add(m_Value(X), m_Constant(C1))) && |
| match(Op0, m_Sub(m_Constant(C2), m_Specific(X))))) { |
| if (ConstantExpr::getNot(C1) == C2) { |
| // (X + C) & (~C - X) --> (X + C) & ~(X + C) --> 0 |
| // (X + C) | (~C - X) --> (X + C) | ~(X + C) --> -1 |
| // (X + C) ^ (~C - X) --> (X + C) ^ ~(X + C) --> -1 |
| Type *Ty = Op0->getType(); |
| return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty) |
| : ConstantInt::getAllOnesValue(Ty); |
| } |
| } |
| return nullptr; |
| } |
| |
| /// Given operands for an And, see if we can fold the result. |
| /// If not, this returns null. |
| static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q)) |
| return C; |
| |
| // X & poison -> poison |
| if (isa<PoisonValue>(Op1)) |
| return Op1; |
| |
| // X & undef -> 0 |
| if (Q.isUndefValue(Op1)) |
| return Constant::getNullValue(Op0->getType()); |
| |
| // X & X = X |
| if (Op0 == Op1) |
| return Op0; |
| |
| // X & 0 = 0 |
| if (match(Op1, m_Zero())) |
| return Constant::getNullValue(Op0->getType()); |
| |
| // X & -1 = X |
| if (match(Op1, m_AllOnes())) |
| return Op0; |
| |
| // A & ~A = ~A & A = 0 |
| if (match(Op0, m_Not(m_Specific(Op1))) || |
| match(Op1, m_Not(m_Specific(Op0)))) |
| return Constant::getNullValue(Op0->getType()); |
| |
| // (A | ?) & A = A |
| if (match(Op0, m_c_Or(m_Specific(Op1), m_Value()))) |
| return Op1; |
| |
| // A & (A | ?) = A |
| if (match(Op1, m_c_Or(m_Specific(Op0), m_Value()))) |
| return Op0; |
| |
| // (X | Y) & (X | ~Y) --> X (commuted 8 ways) |
| Value *X, *Y; |
| if (match(Op0, m_c_Or(m_Value(X), m_Not(m_Value(Y)))) && |
| match(Op1, m_c_Or(m_Deferred(X), m_Deferred(Y)))) |
| return X; |
| if (match(Op1, m_c_Or(m_Value(X), m_Not(m_Value(Y)))) && |
| match(Op0, m_c_Or(m_Deferred(X), m_Deferred(Y)))) |
| return X; |
| |
| if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::And)) |
| return V; |
| |
| // A mask that only clears known zeros of a shifted value is a no-op. |
| const APInt *Mask; |
| const APInt *ShAmt; |
| if (match(Op1, m_APInt(Mask))) { |
| // If all bits in the inverted and shifted mask are clear: |
| // and (shl X, ShAmt), Mask --> shl X, ShAmt |
| if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) && |
| (~(*Mask)).lshr(*ShAmt).isZero()) |
| return Op0; |
| |
| // If all bits in the inverted and shifted mask are clear: |
| // and (lshr X, ShAmt), Mask --> lshr X, ShAmt |
| if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) && |
| (~(*Mask)).shl(*ShAmt).isZero()) |
| return Op0; |
| } |
| |
| // If we have a multiplication overflow check that is being 'and'ed with a |
| // check that one of the multipliers is not zero, we can omit the 'and', and |
| // only keep the overflow check. |
| if (isCheckForZeroAndMulWithOverflow(Op0, Op1, true)) |
| return Op1; |
| if (isCheckForZeroAndMulWithOverflow(Op1, Op0, true)) |
| return Op0; |
| |
| // A & (-A) = A if A is a power of two or zero. |
| if (match(Op0, m_Neg(m_Specific(Op1))) || |
| match(Op1, m_Neg(m_Specific(Op0)))) { |
| if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, |
| Q.DT)) |
| return Op0; |
| if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, |
| Q.DT)) |
| return Op1; |
| } |
| |
| // This is a similar pattern used for checking if a value is a power-of-2: |
| // (A - 1) & A --> 0 (if A is a power-of-2 or 0) |
| // A & (A - 1) --> 0 (if A is a power-of-2 or 0) |
| if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) && |
| isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT)) |
| return Constant::getNullValue(Op1->getType()); |
| if (match(Op1, m_Add(m_Specific(Op0), m_AllOnes())) && |
| isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT)) |
| return Constant::getNullValue(Op0->getType()); |
| |
| if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true)) |
| return V; |
| |
| // Try some generic simplifications for associative operations. |
| if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q, |
| MaxRecurse)) |
| return V; |
| |
| // And distributes over Or. Try some generic simplifications based on this. |
| if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1, |
| Instruction::Or, Q, MaxRecurse)) |
| return V; |
| |
| // And distributes over Xor. Try some generic simplifications based on this. |
| if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1, |
| Instruction::Xor, Q, MaxRecurse)) |
| return V; |
| |
| if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) { |
| if (Op0->getType()->isIntOrIntVectorTy(1)) { |
| // A & (A && B) -> A && B |
| if (match(Op1, m_Select(m_Specific(Op0), m_Value(), m_Zero()))) |
| return Op1; |
| else if (match(Op0, m_Select(m_Specific(Op1), m_Value(), m_Zero()))) |
| return Op0; |
| } |
| // If the operation is with the result of a select instruction, check |
| // whether operating on either branch of the select always yields the same |
| // value. |
| if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q, |
| MaxRecurse)) |
| return V; |
| } |
| |
| // If the operation is with the result of a phi instruction, check whether |
| // operating on all incoming values of the phi always yields the same value. |
| if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) |
| if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q, |
| MaxRecurse)) |
| return V; |
| |
| // Assuming the effective width of Y is not larger than A, i.e. all bits |
| // from X and Y are disjoint in (X << A) | Y, |
| // if the mask of this AND op covers all bits of X or Y, while it covers |
| // no bits from the other, we can bypass this AND op. E.g., |
| // ((X << A) | Y) & Mask -> Y, |
| // if Mask = ((1 << effective_width_of(Y)) - 1) |
| // ((X << A) | Y) & Mask -> X << A, |
| // if Mask = ((1 << effective_width_of(X)) - 1) << A |
| // SimplifyDemandedBits in InstCombine can optimize the general case. |
| // This pattern aims to help other passes for a common case. |
| Value *XShifted; |
| if (match(Op1, m_APInt(Mask)) && |
| match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)), |
| m_Value(XShifted)), |
| m_Value(Y)))) { |
| const unsigned Width = Op0->getType()->getScalarSizeInBits(); |
| const unsigned ShftCnt = ShAmt->getLimitedValue(Width); |
| const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); |
| const unsigned EffWidthY = YKnown.countMaxActiveBits(); |
| if (EffWidthY <= ShftCnt) { |
| const KnownBits XKnown = computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI, |
| Q.DT); |
| const unsigned EffWidthX = XKnown.countMaxActiveBits(); |
| const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY); |
| const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt; |
| // If the mask is extracting all bits from X or Y as is, we can skip |
| // this AND op. |
| if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask)) |
| return Y; |
| if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask)) |
| return XShifted; |
| } |
| } |
| |
| return nullptr; |
| } |
| |
| Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { |
| return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit); |
| } |
| |
| /// Given operands for an Or, see if we can fold the result. |
| /// If not, this returns null. |
| static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q)) |
| return C; |
| |
| // X | poison -> poison |
| if (isa<PoisonValue>(Op1)) |
| return Op1; |
| |
| // X | undef -> -1 |
| // X | -1 = -1 |
| // Do not return Op1 because it may contain undef elements if it's a vector. |
| if (Q.isUndefValue(Op1) || match(Op1, m_AllOnes())) |
| return Constant::getAllOnesValue(Op0->getType()); |
| |
| // X | X = X |
| // X | 0 = X |
| if (Op0 == Op1 || match(Op1, m_Zero())) |
| return Op0; |
| |
| // A | ~A = ~A | A = -1 |
| if (match(Op0, m_Not(m_Specific(Op1))) || |
| match(Op1, m_Not(m_Specific(Op0)))) |
| return Constant::getAllOnesValue(Op0->getType()); |
| |
| // (A & ?) | A = A |
| if (match(Op0, m_c_And(m_Specific(Op1), m_Value()))) |
| return Op1; |
| |
| // A | (A & ?) = A |
| if (match(Op1, m_c_And(m_Specific(Op0), m_Value()))) |
| return Op0; |
| |
| // ~(A & ?) | A = -1 |
| if (match(Op0, m_Not(m_c_And(m_Specific(Op1), m_Value())))) |
| return Constant::getAllOnesValue(Op1->getType()); |
| |
| // A | ~(A & ?) = -1 |
| if (match(Op1, m_Not(m_c_And(m_Specific(Op0), m_Value())))) |
| return Constant::getAllOnesValue(Op0->getType()); |
| |
| if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Or)) |
| return V; |
| |
| Value *A, *B, *NotA; |
| // (A & ~B) | (A ^ B) -> (A ^ B) |
| // (~B & A) | (A ^ B) -> (A ^ B) |
| // (A & ~B) | (B ^ A) -> (B ^ A) |
| // (~B & A) | (B ^ A) -> (B ^ A) |
| if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && |
| (match(Op0, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) || |
| match(Op0, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))) |
| return Op1; |
| |
| // Commute the 'or' operands. |
| // (A ^ B) | (A & ~B) -> (A ^ B) |
| // (A ^ B) | (~B & A) -> (A ^ B) |
| // (B ^ A) | (A & ~B) -> (B ^ A) |
| // (B ^ A) | (~B & A) -> (B ^ A) |
| if (match(Op0, m_Xor(m_Value(A), m_Value(B))) && |
| (match(Op1, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) || |
| match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))) |
| return Op0; |
| |
| // (A & B) | (~A ^ B) -> (~A ^ B) |
| // (B & A) | (~A ^ B) -> (~A ^ B) |
| // (A & B) | (B ^ ~A) -> (B ^ ~A) |
| // (B & A) | (B ^ ~A) -> (B ^ ~A) |
| if (match(Op0, m_And(m_Value(A), m_Value(B))) && |
| (match(Op1, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) || |
| match(Op1, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B))))) |
| return Op1; |
| |
| // Commute the 'or' operands. |
| // (~A ^ B) | (A & B) -> (~A ^ B) |
| // (~A ^ B) | (B & A) -> (~A ^ B) |
| // (B ^ ~A) | (A & B) -> (B ^ ~A) |
| // (B ^ ~A) | (B & A) -> (B ^ ~A) |
| if (match(Op1, m_And(m_Value(A), m_Value(B))) && |
| (match(Op0, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) || |
| match(Op0, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B))))) |
| return Op0; |
| |
| // (A | B) | (A ^ B) --> A | B |
| // (B | A) | (A ^ B) --> B | A |
| if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && |
| match(Op0, m_c_Or(m_Specific(A), m_Specific(B)))) |
| return Op0; |
| |
| // Commute the outer 'or' operands. |
| // (A ^ B) | (A | B) --> A | B |
| // (A ^ B) | (B | A) --> B | A |
| if (match(Op0, m_Xor(m_Value(A), m_Value(B))) && |
| match(Op1, m_c_Or(m_Specific(A), m_Specific(B)))) |
| return Op1; |
| |
| // (~A & B) | ~(A | B) --> ~A |
| // (~A & B) | ~(B | A) --> ~A |
| // (B & ~A) | ~(A | B) --> ~A |
| // (B & ~A) | ~(B | A) --> ~A |
| if (match(Op0, m_c_And(m_CombineAnd(m_Value(NotA), m_Not(m_Value(A))), |
| m_Value(B))) && |
| match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))) |
| return NotA; |
| |
| // Commute the 'or' operands. |
| // ~(A | B) | (~A & B) --> ~A |
| // ~(B | A) | (~A & B) --> ~A |
| // ~(A | B) | (B & ~A) --> ~A |
| // ~(B | A) | (B & ~A) --> ~A |
| if (match(Op1, m_c_And(m_CombineAnd(m_Value(NotA), m_Not(m_Value(A))), |
| m_Value(B))) && |
| match(Op0, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))) |
| return NotA; |
| |
| // Rotated -1 is still -1: |
| // (-1 << X) | (-1 >> (C - X)) --> -1 |
| // (-1 >> X) | (-1 << (C - X)) --> -1 |
| // ...with C <= bitwidth (and commuted variants). |
| Value *X, *Y; |
| if ((match(Op0, m_Shl(m_AllOnes(), m_Value(X))) && |
| match(Op1, m_LShr(m_AllOnes(), m_Value(Y)))) || |
| (match(Op1, m_Shl(m_AllOnes(), m_Value(X))) && |
| match(Op0, m_LShr(m_AllOnes(), m_Value(Y))))) { |
| const APInt *C; |
| if ((match(X, m_Sub(m_APInt(C), m_Specific(Y))) || |
| match(Y, m_Sub(m_APInt(C), m_Specific(X)))) && |
| C->ule(X->getType()->getScalarSizeInBits())) { |
| return ConstantInt::getAllOnesValue(X->getType()); |
| } |
| } |
| |
| if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false)) |
| return V; |
| |
| // If we have a multiplication overflow check that is being 'and'ed with a |
| // check that one of the multipliers is not zero, we can omit the 'and', and |
| // only keep the overflow check. |
| if (isCheckForZeroAndMulWithOverflow(Op0, Op1, false)) |
| return Op1; |
| if (isCheckForZeroAndMulWithOverflow(Op1, Op0, false)) |
| return Op0; |
| |
| // Try some generic simplifications for associative operations. |
| if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q, |
| MaxRecurse)) |
| return V; |
| |
| // Or distributes over And. Try some generic simplifications based on this. |
| if (Value *V = expandCommutativeBinOp(Instruction::Or, Op0, Op1, |
| Instruction::And, Q, MaxRecurse)) |
| return V; |
| |
| if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) { |
| if (Op0->getType()->isIntOrIntVectorTy(1)) { |
| // A | (A || B) -> A || B |
| if (match(Op1, m_Select(m_Specific(Op0), m_One(), m_Value()))) |
| return Op1; |
| else if (match(Op0, m_Select(m_Specific(Op1), m_One(), m_Value()))) |
| return Op0; |
| } |
| // If the operation is with the result of a select instruction, check |
| // whether operating on either branch of the select always yields the same |
| // value. |
| if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q, |
| MaxRecurse)) |
| return V; |
| } |
| |
| // (A & C1)|(B & C2) |
| const APInt *C1, *C2; |
| if (match(Op0, m_And(m_Value(A), m_APInt(C1))) && |
| match(Op1, m_And(m_Value(B), m_APInt(C2)))) { |
| if (*C1 == ~*C2) { |
| // (A & C1)|(B & C2) |
| // If we have: ((V + N) & C1) | (V & C2) |
| // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0 |
| // replace with V+N. |
| Value *N; |
| if (C2->isMask() && // C2 == 0+1+ |
| match(A, m_c_Add(m_Specific(B), m_Value(N)))) { |
| // Add commutes, try both ways. |
| if (MaskedValueIsZero(N, *C2, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) |
| return A; |
| } |
| // Or commutes, try both ways. |
| if (C1->isMask() && |
| match(B, m_c_Add(m_Specific(A), m_Value(N)))) { |
| // Add commutes, try both ways. |
| if (MaskedValueIsZero(N, *C1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) |
| return B; |
| } |
| } |
| } |
| |
| // If the operation is with the result of a phi instruction, check whether |
| // operating on all incoming values of the phi always yields the same value. |
| if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) |
| if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse)) |
| return V; |
| |
| return nullptr; |
| } |
| |
| Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { |
| return ::SimplifyOrInst(Op0, Op1, Q, RecursionLimit); |
| } |
| |
| /// Given operands for a Xor, see if we can fold the result. |
| /// If not, this returns null. |
| static Value *SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, |
| unsigned MaxRecurse) { |
| if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q)) |
| return C; |
| |
| // A ^ undef -> undef |
| if (Q.isUndefValue(Op1)) |
| return Op1; |
| |
| // A ^ 0 = A |
| if (match(Op1, m_Zero())) |
| return Op0; |
| |
| // A ^ A = 0 |
| if (Op0 == Op1) |
| return Constant::getNullValue(Op0->getType()); |
| |
| // A ^ ~A = ~A ^ A = -1 |
| if (match(Op0, m_Not(m_Specific(Op1))) || |
| match(Op1, m_Not(m_Specific(Op0)))) |
| return Constant::getAllOnesValue(Op0->getType()); |
| |
| auto foldAndOrNot = [](Value *X, Value *Y) -> Value * { |
| Value *A, *B; |
| // (~A & B) ^ (A | B) --> A -- There are 8 commuted variants. |
| if (match(X, m_c_And(m_Not(m_Value(A)), m_Value(B))) && |
| match(Y, m_c_Or(m_Specific(A), m_Specific(B)))) |
| return A; |
| |
| // (~A | B) ^ (A & B) --> ~A -- There are 8 commuted variants. |
| // The 'not' op must contain a complete -1 operand (no undef elements for |
| // vector) for the transform to be safe. |
| Value *NotA; |
| const APInt *C; |
| if (match(X, m_c_Or(m_CombineAnd(m_Xor(m_Value(A), m_APIntForbidUndef(C)), |
| m_Value(NotA)), |
| m_Value(B))) && |
| match(Y, m_c_And(m_Specific(A), m_Specific(B))) && C->isAllOnes()) |
| return NotA; |
| |
| return nullptr; |
| }; |
| if (Value *R = foldAndOrNot(Op0, Op1)) |
| return R; |
| if (Value *R = foldAndOrNot(Op1, Op0)) |
| return R; |
| |
| if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Xor)) |
| return V; |
| |
| // Try some generic simplifications for associative operations. |
| if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q, |
| MaxRecurse)) |
| return V; |
| |
| // Threading Xor over selects and phi nodes is pointless, so don't bother. |
| // Threading over the select in "A ^ select(cond, B, C)" means evaluating |
| // "A^B" and "A^C" and seeing if they are equal; but they are equal if and |
| // only if B and C are equal. If B and C are equal then (since we assume |
| // that operands have already been simplified) "select(cond, B, C)" should |
| // have been simplified to the common value of B and C already. Analysing |
| // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly |
| // for threading over phi nodes. |
| |
| return nullptr; |
| } |
| |
| Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { |
| return ::SimplifyXorInst(Op0, Op1, Q, RecursionLimit); |
| } |
| |
| |
| static Type *GetCompareTy(Value *Op) { |
| return CmpInst::makeCmpResultType(Op->getType()); |
| } |
| |
| /// Rummage around inside V looking for something equivalent to the comparison |
| /// "LHS Pred RHS". Return such a value if found, otherwise return null. |
| /// Helper function for analyzing max/min idioms. |
| static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred, |
| Value *LHS, Value *RHS) { |
| SelectInst *SI = dyn_cast<SelectInst>(V); |
| if (!SI) |
| return nullptr; |
| CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); |
| if (!Cmp) |
| return nullptr; |
| Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1); |
| if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS) |
| return Cmp; |
| if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) && |
| LHS == CmpRHS && RHS == CmpLHS) |
| return Cmp; |
| return nullptr; |
| } |
| |
| // A significant optimization not implemented here is assuming that alloca |
| // addresses are not equal to incoming argument values. They don't *alias*, |
| // as we say, but that doesn't mean they aren't equal, so we take a |
| // conservative approach. |
| // |
| // This is inspired in part by C++11 5.10p1: |
| // "Two pointers of the same type compare equal if and only if they are both |
| // null, both point to the same function, or both represent the same |
| // address." |
| // |
| // This is pretty permissive. |
| // |
| // It's also partly due to C11 6.5.9p6: |
| // "Two pointers compare equal if and only if both are null pointers, both are |
| // pointers to the same object (including a pointer to an object and a |
| // subobject at its beginning) or function, both are pointers to one past the |
| // last element of the same array object, or one is a pointer to one past the |
| // end of one array object and the other is a pointer to the start of a |
| // different array object that happens to immediately follow the first array |
| // object in the address space.) |
| // |
| // C11's version is more restrictive, however there's no reason why an argument |
| // couldn't be a one-past-the-end value for a stack object in the caller and be |
| // equal to the beginning of a stack object in the callee. |
| // |
| // If the C and C++ standards are ever made sufficiently restrictive in this |
| // area, it may be possible to update LLVM's semantics accordingly and reinstate |
| // this optimization. |
| static Constant * |
| computePointerICmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, |
| const SimplifyQuery &Q) { |
| const DataLayout &DL = Q.DL; |
| const TargetLibraryInfo *TLI = Q.TLI; |
| const DominatorTree *DT = Q.DT; |
| const Instruction *CxtI = Q.CxtI; |
| const InstrInfoQuery &IIQ = Q.IIQ; |
| |
| // First, skip past any trivial no-ops. |
| LHS = LHS->stripPointerCasts(); |
| RHS = RHS->stripPointerCasts(); |
| |
| // A non-null pointer is not equal to a null pointer. |
| if (isa<ConstantPointerNull>(RHS) && ICmpInst::isEquality(Pred) && |
| llvm::isKnownNonZero(LHS, DL, 0, nullptr, nullptr, nullptr, |
| IIQ.UseInstrInfo)) |
| return ConstantInt::get(GetCompareTy(LHS), |
| !CmpInst::isTrueWhenEqual(Pred)); |
| |
| // We can only fold certain predicates on pointer comparisons. |
| switch (Pred) { |
| default: |
| return nullptr; |
| |
| // Equality comaprisons are easy to fold. |
| case CmpInst::ICMP_EQ: |
| case CmpInst::ICMP_NE: |
| break; |
| |
| // We can only handle unsigned relational comparisons because 'inbounds' on |
| // a GEP only protects against unsigned wrapping. |
| case CmpInst::ICMP_UGT: |
| case CmpInst::ICMP_UGE: |
|