| //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===// |
| // |
| // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| // See https://llvm.org/LICENSE.txt for license information. |
| // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| // |
| //===----------------------------------------------------------------------===// |
| // |
| // See the Attributor.h file comment and the class descriptions in that file for |
| // more information. |
| // |
| //===----------------------------------------------------------------------===// |
| |
| #include "llvm/Transforms/IPO/Attributor.h" |
| |
| #include "llvm/ADT/SCCIterator.h" |
| #include "llvm/ADT/SmallPtrSet.h" |
| #include "llvm/ADT/Statistic.h" |
| #include "llvm/Analysis/AliasAnalysis.h" |
| #include "llvm/Analysis/AssumeBundleQueries.h" |
| #include "llvm/Analysis/AssumptionCache.h" |
| #include "llvm/Analysis/CaptureTracking.h" |
| #include "llvm/Analysis/LazyValueInfo.h" |
| #include "llvm/Analysis/MemoryBuiltins.h" |
| #include "llvm/Analysis/ScalarEvolution.h" |
| #include "llvm/Analysis/TargetTransformInfo.h" |
| #include "llvm/Analysis/ValueTracking.h" |
| #include "llvm/IR/IRBuilder.h" |
| #include "llvm/IR/Instruction.h" |
| #include "llvm/IR/IntrinsicInst.h" |
| #include "llvm/IR/NoFolder.h" |
| #include "llvm/Support/CommandLine.h" |
| #include "llvm/Transforms/IPO/ArgumentPromotion.h" |
| #include "llvm/Transforms/Utils/Local.h" |
| |
| #include <cassert> |
| |
| using namespace llvm; |
| |
| #define DEBUG_TYPE "attributor" |
| |
| static cl::opt<bool> ManifestInternal( |
| "attributor-manifest-internal", cl::Hidden, |
| cl::desc("Manifest Attributor internal string attributes."), |
| cl::init(false)); |
| |
| static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128), |
| cl::Hidden); |
| |
| template <> |
| unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0; |
| |
| static cl::opt<unsigned, true> MaxPotentialValues( |
| "attributor-max-potential-values", cl::Hidden, |
| cl::desc("Maximum number of potential values to be " |
| "tracked for each position."), |
| cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues), |
| cl::init(7)); |
| |
| STATISTIC(NumAAs, "Number of abstract attributes created"); |
| |
| // Some helper macros to deal with statistics tracking. |
| // |
| // Usage: |
| // For simple IR attribute tracking overload trackStatistics in the abstract |
| // attribute and choose the right STATS_DECLTRACK_********* macro, |
| // e.g.,: |
| // void trackStatistics() const override { |
| // STATS_DECLTRACK_ARG_ATTR(returned) |
| // } |
| // If there is a single "increment" side one can use the macro |
| // STATS_DECLTRACK with a custom message. If there are multiple increment |
| // sides, STATS_DECL and STATS_TRACK can also be used separately. |
| // |
| #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \ |
| ("Number of " #TYPE " marked '" #NAME "'") |
| #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME |
| #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG); |
| #define STATS_DECL(NAME, TYPE, MSG) \ |
| STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG); |
| #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE)); |
| #define STATS_DECLTRACK(NAME, TYPE, MSG) \ |
| { \ |
| STATS_DECL(NAME, TYPE, MSG) \ |
| STATS_TRACK(NAME, TYPE) \ |
| } |
| #define STATS_DECLTRACK_ARG_ATTR(NAME) \ |
| STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)) |
| #define STATS_DECLTRACK_CSARG_ATTR(NAME) \ |
| STATS_DECLTRACK(NAME, CSArguments, \ |
| BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)) |
| #define STATS_DECLTRACK_FN_ATTR(NAME) \ |
| STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)) |
| #define STATS_DECLTRACK_CS_ATTR(NAME) \ |
| STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)) |
| #define STATS_DECLTRACK_FNRET_ATTR(NAME) \ |
| STATS_DECLTRACK(NAME, FunctionReturn, \ |
| BUILD_STAT_MSG_IR_ATTR(function returns, NAME)) |
| #define STATS_DECLTRACK_CSRET_ATTR(NAME) \ |
| STATS_DECLTRACK(NAME, CSReturn, \ |
| BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)) |
| #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \ |
| STATS_DECLTRACK(NAME, Floating, \ |
| ("Number of floating values known to be '" #NAME "'")) |
| |
| // Specialization of the operator<< for abstract attributes subclasses. This |
| // disambiguates situations where multiple operators are applicable. |
| namespace llvm { |
| #define PIPE_OPERATOR(CLASS) \ |
| raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \ |
| return OS << static_cast<const AbstractAttribute &>(AA); \ |
| } |
| |
| PIPE_OPERATOR(AAIsDead) |
| PIPE_OPERATOR(AANoUnwind) |
| PIPE_OPERATOR(AANoSync) |
| PIPE_OPERATOR(AANoRecurse) |
| PIPE_OPERATOR(AAWillReturn) |
| PIPE_OPERATOR(AANoReturn) |
| PIPE_OPERATOR(AAReturnedValues) |
| PIPE_OPERATOR(AANonNull) |
| PIPE_OPERATOR(AANoAlias) |
| PIPE_OPERATOR(AADereferenceable) |
| PIPE_OPERATOR(AAAlign) |
| PIPE_OPERATOR(AANoCapture) |
| PIPE_OPERATOR(AAValueSimplify) |
| PIPE_OPERATOR(AANoFree) |
| PIPE_OPERATOR(AAHeapToStack) |
| PIPE_OPERATOR(AAReachability) |
| PIPE_OPERATOR(AAMemoryBehavior) |
| PIPE_OPERATOR(AAMemoryLocation) |
| PIPE_OPERATOR(AAValueConstantRange) |
| PIPE_OPERATOR(AAPrivatizablePtr) |
| PIPE_OPERATOR(AAUndefinedBehavior) |
| PIPE_OPERATOR(AAPotentialValues) |
| PIPE_OPERATOR(AANoUndef) |
| |
| #undef PIPE_OPERATOR |
| } // namespace llvm |
| |
| namespace { |
| |
| static Optional<ConstantInt *> |
| getAssumedConstantInt(Attributor &A, const Value &V, |
| const AbstractAttribute &AA, |
| bool &UsedAssumedInformation) { |
| Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation); |
| if (C.hasValue()) |
| return dyn_cast_or_null<ConstantInt>(C.getValue()); |
| return llvm::None; |
| } |
| |
| /// Get pointer operand of memory accessing instruction. If \p I is |
| /// not a memory accessing instruction, return nullptr. If \p AllowVolatile, |
| /// is set to false and the instruction is volatile, return nullptr. |
| static const Value *getPointerOperand(const Instruction *I, |
| bool AllowVolatile) { |
| if (!AllowVolatile && I->isVolatile()) |
| return nullptr; |
| |
| if (auto *LI = dyn_cast<LoadInst>(I)) { |
| return LI->getPointerOperand(); |
| } |
| |
| if (auto *SI = dyn_cast<StoreInst>(I)) { |
| return SI->getPointerOperand(); |
| } |
| |
| if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) { |
| return CXI->getPointerOperand(); |
| } |
| |
| if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) { |
| return RMWI->getPointerOperand(); |
| } |
| |
| return nullptr; |
| } |
| |
| /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and |
| /// advanced by \p Offset bytes. To aid later analysis the method tries to build |
| /// getelement pointer instructions that traverse the natural type of \p Ptr if |
| /// possible. If that fails, the remaining offset is adjusted byte-wise, hence |
| /// through a cast to i8*. |
| /// |
| /// TODO: This could probably live somewhere more prominantly if it doesn't |
| /// already exist. |
| static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr, |
| int64_t Offset, IRBuilder<NoFolder> &IRB, |
| const DataLayout &DL) { |
| assert(Offset >= 0 && "Negative offset not supported yet!"); |
| LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset |
| << "-bytes as " << *ResTy << "\n"); |
| |
| if (Offset) { |
| SmallVector<Value *, 4> Indices; |
| std::string GEPName = Ptr->getName().str() + ".0"; |
| |
| // Add 0 index to look through the pointer. |
| assert((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) && |
| "Offset out of bounds"); |
| Indices.push_back(Constant::getNullValue(IRB.getInt32Ty())); |
| |
| Type *Ty = PtrElemTy; |
| do { |
| auto *STy = dyn_cast<StructType>(Ty); |
| if (!STy) |
| // Non-aggregate type, we cast and make byte-wise progress now. |
| break; |
| |
| const StructLayout *SL = DL.getStructLayout(STy); |
| if (int64_t(SL->getSizeInBytes()) < Offset) |
| break; |
| |
| uint64_t Idx = SL->getElementContainingOffset(Offset); |
| assert(Idx < STy->getNumElements() && "Offset calculation error!"); |
| uint64_t Rem = Offset - SL->getElementOffset(Idx); |
| Ty = STy->getElementType(Idx); |
| |
| LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset |
| << " Idx: " << Idx << " Rem: " << Rem << "\n"); |
| |
| GEPName += "." + std::to_string(Idx); |
| Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx)); |
| Offset = Rem; |
| } while (Offset); |
| |
| // Create a GEP for the indices collected above. |
| Ptr = IRB.CreateGEP(PtrElemTy, Ptr, Indices, GEPName); |
| |
| // If an offset is left we use byte-wise adjustment. |
| if (Offset) { |
| Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()); |
| Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt32(Offset), |
| GEPName + ".b" + Twine(Offset)); |
| } |
| } |
| |
| // Ensure the result has the requested type. |
| Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast"); |
| |
| LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n"); |
| return Ptr; |
| } |
| |
| /// Recursively visit all values that might become \p IRP at some point. This |
| /// will be done by looking through cast instructions, selects, phis, and calls |
| /// with the "returned" attribute. Once we cannot look through the value any |
| /// further, the callback \p VisitValueCB is invoked and passed the current |
| /// value, the \p State, and a flag to indicate if we stripped anything. |
| /// Stripped means that we unpacked the value associated with \p IRP at least |
| /// once. Note that the value used for the callback may still be the value |
| /// associated with \p IRP (due to PHIs). To limit how much effort is invested, |
| /// we will never visit more values than specified by \p MaxValues. |
| template <typename AAType, typename StateTy> |
| static bool genericValueTraversal( |
| Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State, |
| function_ref<bool(Value &, const Instruction *, StateTy &, bool)> |
| VisitValueCB, |
| const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16, |
| function_ref<Value *(Value *)> StripCB = nullptr) { |
| |
| const AAIsDead *LivenessAA = nullptr; |
| if (IRP.getAnchorScope()) |
| LivenessAA = &A.getAAFor<AAIsDead>( |
| QueryingAA, IRPosition::function(*IRP.getAnchorScope()), |
| DepClassTy::NONE); |
| bool AnyDead = false; |
| |
| using Item = std::pair<Value *, const Instruction *>; |
| SmallSet<Item, 16> Visited; |
| SmallVector<Item, 16> Worklist; |
| Worklist.push_back({&IRP.getAssociatedValue(), CtxI}); |
| |
| int Iteration = 0; |
| do { |
| Item I = Worklist.pop_back_val(); |
| Value *V = I.first; |
| CtxI = I.second; |
| if (StripCB) |
| V = StripCB(V); |
| |
| // Check if we should process the current value. To prevent endless |
| // recursion keep a record of the values we followed! |
| if (!Visited.insert(I).second) |
| continue; |
| |
| // Make sure we limit the compile time for complex expressions. |
| if (Iteration++ >= MaxValues) |
| return false; |
| |
| // Explicitly look through calls with a "returned" attribute if we do |
| // not have a pointer as stripPointerCasts only works on them. |
| Value *NewV = nullptr; |
| if (V->getType()->isPointerTy()) { |
| NewV = V->stripPointerCasts(); |
| } else { |
| auto *CB = dyn_cast<CallBase>(V); |
| if (CB && CB->getCalledFunction()) { |
| for (Argument &Arg : CB->getCalledFunction()->args()) |
| if (Arg.hasReturnedAttr()) { |
| NewV = CB->getArgOperand(Arg.getArgNo()); |
| break; |
| } |
| } |
| } |
| if (NewV && NewV != V) { |
| Worklist.push_back({NewV, CtxI}); |
| continue; |
| } |
| |
| // Look through select instructions, visit both potential values. |
| if (auto *SI = dyn_cast<SelectInst>(V)) { |
| Worklist.push_back({SI->getTrueValue(), CtxI}); |
| Worklist.push_back({SI->getFalseValue(), CtxI}); |
| continue; |
| } |
| |
| // Look through phi nodes, visit all live operands. |
| if (auto *PHI = dyn_cast<PHINode>(V)) { |
| assert(LivenessAA && |
| "Expected liveness in the presence of instructions!"); |
| for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { |
| BasicBlock *IncomingBB = PHI->getIncomingBlock(u); |
| if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA, |
| LivenessAA, |
| /* CheckBBLivenessOnly */ true)) { |
| AnyDead = true; |
| continue; |
| } |
| Worklist.push_back( |
| {PHI->getIncomingValue(u), IncomingBB->getTerminator()}); |
| } |
| continue; |
| } |
| |
| if (UseValueSimplify && !isa<Constant>(V)) { |
| bool UsedAssumedInformation = false; |
| Optional<Constant *> C = |
| A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation); |
| if (!C.hasValue()) |
| continue; |
| if (Value *NewV = C.getValue()) { |
| Worklist.push_back({NewV, CtxI}); |
| continue; |
| } |
| } |
| |
| // Once a leaf is reached we inform the user through the callback. |
| if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) |
| return false; |
| } while (!Worklist.empty()); |
| |
| // If we actually used liveness information so we have to record a dependence. |
| if (AnyDead) |
| A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL); |
| |
| // All values have been visited. |
| return true; |
| } |
| |
| const Value *stripAndAccumulateMinimalOffsets( |
| Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val, |
| const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, |
| bool UseAssumed = false) { |
| |
| auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool { |
| const IRPosition &Pos = IRPosition::value(V); |
| // Only track dependence if we are going to use the assumed info. |
| const AAValueConstantRange &ValueConstantRangeAA = |
| A.getAAFor<AAValueConstantRange>(QueryingAA, Pos, |
| UseAssumed ? DepClassTy::OPTIONAL |
| : DepClassTy::NONE); |
| ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed() |
| : ValueConstantRangeAA.getKnown(); |
| // We can only use the lower part of the range because the upper part can |
| // be higher than what the value can really be. |
| ROffset = Range.getSignedMin(); |
| return true; |
| }; |
| |
| return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, |
| AttributorAnalysis); |
| } |
| |
| static const Value *getMinimalBaseOfAccsesPointerOperand( |
| Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I, |
| int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) { |
| const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); |
| if (!Ptr) |
| return nullptr; |
| APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); |
| const Value *Base = stripAndAccumulateMinimalOffsets( |
| A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds); |
| |
| BytesOffset = OffsetAPInt.getSExtValue(); |
| return Base; |
| } |
| |
| static const Value * |
| getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset, |
| const DataLayout &DL, |
| bool AllowNonInbounds = false) { |
| const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); |
| if (!Ptr) |
| return nullptr; |
| |
| return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL, |
| AllowNonInbounds); |
| } |
| |
| /// Helper function to clamp a state \p S of type \p StateType with the |
| /// information in \p R and indicate/return if \p S did change (as-in update is |
| /// required to be run again). |
| template <typename StateType> |
| ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) { |
| auto Assumed = S.getAssumed(); |
| S ^= R; |
| return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED |
| : ChangeStatus::CHANGED; |
| } |
| |
| /// Clamp the information known for all returned values of a function |
| /// (identified by \p QueryingAA) into \p S. |
| template <typename AAType, typename StateType = typename AAType::StateType> |
| static void clampReturnedValueStates( |
| Attributor &A, const AAType &QueryingAA, StateType &S, |
| const IRPosition::CallBaseContext *CBContext = nullptr) { |
| LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for " |
| << QueryingAA << " into " << S << "\n"); |
| |
| assert((QueryingAA.getIRPosition().getPositionKind() == |
| IRPosition::IRP_RETURNED || |
| QueryingAA.getIRPosition().getPositionKind() == |
| IRPosition::IRP_CALL_SITE_RETURNED) && |
| "Can only clamp returned value states for a function returned or call " |
| "site returned position!"); |
| |
| // Use an optional state as there might not be any return values and we want |
| // to join (IntegerState::operator&) the state of all there are. |
| Optional<StateType> T; |
| |
| // Callback for each possibly returned value. |
| auto CheckReturnValue = [&](Value &RV) -> bool { |
| const IRPosition &RVPos = IRPosition::value(RV, CBContext); |
| const AAType &AA = |
| A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED); |
| LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() |
| << " @ " << RVPos << "\n"); |
| const StateType &AAS = AA.getState(); |
| if (T.hasValue()) |
| *T &= AAS; |
| else |
| T = AAS; |
| LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T |
| << "\n"); |
| return T->isValidState(); |
| }; |
| |
| if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA)) |
| S.indicatePessimisticFixpoint(); |
| else if (T.hasValue()) |
| S ^= *T; |
| } |
| |
| /// Helper class for generic deduction: return value -> returned position. |
| template <typename AAType, typename BaseType, |
| typename StateType = typename BaseType::StateType, |
| bool PropagateCallBaseContext = false> |
| struct AAReturnedFromReturnedValues : public BaseType { |
| AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A) |
| : BaseType(IRP, A) {} |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| StateType S(StateType::getBestState(this->getState())); |
| clampReturnedValueStates<AAType, StateType>( |
| A, *this, S, |
| PropagateCallBaseContext ? this->getCallBaseContext() : nullptr); |
| // TODO: If we know we visited all returned values, thus no are assumed |
| // dead, we can take the known information from the state T. |
| return clampStateAndIndicateChange<StateType>(this->getState(), S); |
| } |
| }; |
| |
| /// Clamp the information known at all call sites for a given argument |
| /// (identified by \p QueryingAA) into \p S. |
| template <typename AAType, typename StateType = typename AAType::StateType> |
| static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, |
| StateType &S) { |
| LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for " |
| << QueryingAA << " into " << S << "\n"); |
| |
| assert(QueryingAA.getIRPosition().getPositionKind() == |
| IRPosition::IRP_ARGUMENT && |
| "Can only clamp call site argument states for an argument position!"); |
| |
| // Use an optional state as there might not be any return values and we want |
| // to join (IntegerState::operator&) the state of all there are. |
| Optional<StateType> T; |
| |
| // The argument number which is also the call site argument number. |
| unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo(); |
| |
| auto CallSiteCheck = [&](AbstractCallSite ACS) { |
| const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); |
| // Check if a coresponding argument was found or if it is on not associated |
| // (which can happen for callback calls). |
| if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) |
| return false; |
| |
| const AAType &AA = |
| A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED); |
| LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() |
| << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n"); |
| const StateType &AAS = AA.getState(); |
| if (T.hasValue()) |
| *T &= AAS; |
| else |
| T = AAS; |
| LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T |
| << "\n"); |
| return T->isValidState(); |
| }; |
| |
| bool AllCallSitesKnown; |
| if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true, |
| AllCallSitesKnown)) |
| S.indicatePessimisticFixpoint(); |
| else if (T.hasValue()) |
| S ^= *T; |
| } |
| |
| /// This function is the bridge between argument position and the call base |
| /// context. |
| template <typename AAType, typename BaseType, |
| typename StateType = typename AAType::StateType> |
| bool getArgumentStateFromCallBaseContext(Attributor &A, |
| BaseType &QueryingAttribute, |
| IRPosition &Pos, StateType &State) { |
| assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && |
| "Expected an 'argument' position !"); |
| const CallBase *CBContext = Pos.getCallBaseContext(); |
| if (!CBContext) |
| return false; |
| |
| int ArgNo = Pos.getCallSiteArgNo(); |
| assert(ArgNo >= 0 && "Invalid Arg No!"); |
| |
| const auto &AA = A.getAAFor<AAType>( |
| QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo), |
| DepClassTy::REQUIRED); |
| const StateType &CBArgumentState = |
| static_cast<const StateType &>(AA.getState()); |
| |
| LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument" |
| << "Position:" << Pos << "CB Arg state:" << CBArgumentState |
| << "\n"); |
| |
| // NOTE: If we want to do call site grouping it should happen here. |
| State ^= CBArgumentState; |
| return true; |
| } |
| |
| /// Helper class for generic deduction: call site argument -> argument position. |
| template <typename AAType, typename BaseType, |
| typename StateType = typename AAType::StateType, |
| bool BridgeCallBaseContext = false> |
| struct AAArgumentFromCallSiteArguments : public BaseType { |
| AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A) |
| : BaseType(IRP, A) {} |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| StateType S = StateType::getBestState(this->getState()); |
| |
| if (BridgeCallBaseContext) { |
| bool Success = |
| getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>( |
| A, *this, this->getIRPosition(), S); |
| if (Success) |
| return clampStateAndIndicateChange<StateType>(this->getState(), S); |
| } |
| clampCallSiteArgumentStates<AAType, StateType>(A, *this, S); |
| |
| // TODO: If we know we visited all incoming values, thus no are assumed |
| // dead, we can take the known information from the state T. |
| return clampStateAndIndicateChange<StateType>(this->getState(), S); |
| } |
| }; |
| |
| /// Helper class for generic replication: function returned -> cs returned. |
| template <typename AAType, typename BaseType, |
| typename StateType = typename BaseType::StateType, |
| bool IntroduceCallBaseContext = false> |
| struct AACallSiteReturnedFromReturned : public BaseType { |
| AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A) |
| : BaseType(IRP, A) {} |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| assert(this->getIRPosition().getPositionKind() == |
| IRPosition::IRP_CALL_SITE_RETURNED && |
| "Can only wrap function returned positions for call site returned " |
| "positions!"); |
| auto &S = this->getState(); |
| |
| const Function *AssociatedFunction = |
| this->getIRPosition().getAssociatedFunction(); |
| if (!AssociatedFunction) |
| return S.indicatePessimisticFixpoint(); |
| |
| CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue()); |
| if (IntroduceCallBaseContext) |
| LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:" |
| << CBContext << "\n"); |
| |
| IRPosition FnPos = IRPosition::returned( |
| *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr); |
| const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED); |
| return clampStateAndIndicateChange(S, AA.getState()); |
| } |
| }; |
| |
| /// Helper function to accumulate uses. |
| template <class AAType, typename StateType = typename AAType::StateType> |
| static void followUsesInContext(AAType &AA, Attributor &A, |
| MustBeExecutedContextExplorer &Explorer, |
| const Instruction *CtxI, |
| SetVector<const Use *> &Uses, |
| StateType &State) { |
| auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI); |
| for (unsigned u = 0; u < Uses.size(); ++u) { |
| const Use *U = Uses[u]; |
| if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) { |
| bool Found = Explorer.findInContextOf(UserI, EIt, EEnd); |
| if (Found && AA.followUseInMBEC(A, U, UserI, State)) |
| for (const Use &Us : UserI->uses()) |
| Uses.insert(&Us); |
| } |
| } |
| } |
| |
| /// Use the must-be-executed-context around \p I to add information into \p S. |
| /// The AAType class is required to have `followUseInMBEC` method with the |
| /// following signature and behaviour: |
| /// |
| /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I) |
| /// U - Underlying use. |
| /// I - The user of the \p U. |
| /// Returns true if the value should be tracked transitively. |
| /// |
| template <class AAType, typename StateType = typename AAType::StateType> |
| static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S, |
| Instruction &CtxI) { |
| |
| // Container for (transitive) uses of the associated value. |
| SetVector<const Use *> Uses; |
| for (const Use &U : AA.getIRPosition().getAssociatedValue().uses()) |
| Uses.insert(&U); |
| |
| MustBeExecutedContextExplorer &Explorer = |
| A.getInfoCache().getMustBeExecutedContextExplorer(); |
| |
| followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S); |
| |
| if (S.isAtFixpoint()) |
| return; |
| |
| SmallVector<const BranchInst *, 4> BrInsts; |
| auto Pred = [&](const Instruction *I) { |
| if (const BranchInst *Br = dyn_cast<BranchInst>(I)) |
| if (Br->isConditional()) |
| BrInsts.push_back(Br); |
| return true; |
| }; |
| |
| // Here, accumulate conditional branch instructions in the context. We |
| // explore the child paths and collect the known states. The disjunction of |
| // those states can be merged to its own state. Let ParentState_i be a state |
| // to indicate the known information for an i-th branch instruction in the |
| // context. ChildStates are created for its successors respectively. |
| // |
| // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1} |
| // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2} |
| // ... |
| // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m} |
| // |
| // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m |
| // |
| // FIXME: Currently, recursive branches are not handled. For example, we |
| // can't deduce that ptr must be dereferenced in below function. |
| // |
| // void f(int a, int c, int *ptr) { |
| // if(a) |
| // if (b) { |
| // *ptr = 0; |
| // } else { |
| // *ptr = 1; |
| // } |
| // else { |
| // if (b) { |
| // *ptr = 0; |
| // } else { |
| // *ptr = 1; |
| // } |
| // } |
| // } |
| |
| Explorer.checkForAllContext(&CtxI, Pred); |
| for (const BranchInst *Br : BrInsts) { |
| StateType ParentState; |
| |
| // The known state of the parent state is a conjunction of children's |
| // known states so it is initialized with a best state. |
| ParentState.indicateOptimisticFixpoint(); |
| |
| for (const BasicBlock *BB : Br->successors()) { |
| StateType ChildState; |
| |
| size_t BeforeSize = Uses.size(); |
| followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState); |
| |
| // Erase uses which only appear in the child. |
| for (auto It = Uses.begin() + BeforeSize; It != Uses.end();) |
| It = Uses.erase(It); |
| |
| ParentState &= ChildState; |
| } |
| |
| // Use only known state. |
| S += ParentState; |
| } |
| } |
| |
| /// -----------------------NoUnwind Function Attribute-------------------------- |
| |
| struct AANoUnwindImpl : AANoUnwind { |
| AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {} |
| |
| const std::string getAsStr() const override { |
| return getAssumed() ? "nounwind" : "may-unwind"; |
| } |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| auto Opcodes = { |
| (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, |
| (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, |
| (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; |
| |
| auto CheckForNoUnwind = [&](Instruction &I) { |
| if (!I.mayThrow()) |
| return true; |
| |
| if (const auto *CB = dyn_cast<CallBase>(&I)) { |
| const auto &NoUnwindAA = A.getAAFor<AANoUnwind>( |
| *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); |
| return NoUnwindAA.isAssumedNoUnwind(); |
| } |
| return false; |
| }; |
| |
| if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes)) |
| return indicatePessimisticFixpoint(); |
| |
| return ChangeStatus::UNCHANGED; |
| } |
| }; |
| |
| struct AANoUnwindFunction final : public AANoUnwindImpl { |
| AANoUnwindFunction(const IRPosition &IRP, Attributor &A) |
| : AANoUnwindImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) } |
| }; |
| |
| /// NoUnwind attribute deduction for a call sites. |
| struct AANoUnwindCallSite final : AANoUnwindImpl { |
| AANoUnwindCallSite(const IRPosition &IRP, Attributor &A) |
| : AANoUnwindImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::initialize(...). |
| void initialize(Attributor &A) override { |
| AANoUnwindImpl::initialize(A); |
| Function *F = getAssociatedFunction(); |
| if (!F || F->isDeclaration()) |
| indicatePessimisticFixpoint(); |
| } |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| // TODO: Once we have call site specific value information we can provide |
| // call site specific liveness information and then it makes |
| // sense to specialize attributes for call sites arguments instead of |
| // redirecting requests to the callee argument. |
| Function *F = getAssociatedFunction(); |
| const IRPosition &FnPos = IRPosition::function(*F); |
| auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED); |
| return clampStateAndIndicateChange(getState(), FnAA.getState()); |
| } |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); } |
| }; |
| |
| /// --------------------- Function Return Values ------------------------------- |
| |
| /// "Attribute" that collects all potential returned values and the return |
| /// instructions that they arise from. |
| /// |
| /// If there is a unique returned value R, the manifest method will: |
| /// - mark R with the "returned" attribute, if R is an argument. |
| class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState { |
| |
| /// Mapping of values potentially returned by the associated function to the |
| /// return instructions that might return them. |
| MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues; |
| |
| /// Mapping to remember the number of returned values for a call site such |
| /// that we can avoid updates if nothing changed. |
| DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA; |
| |
| /// Set of unresolved calls returned by the associated function. |
| SmallSetVector<CallBase *, 4> UnresolvedCalls; |
| |
| /// State flags |
| /// |
| ///{ |
| bool IsFixed = false; |
| bool IsValidState = true; |
| ///} |
| |
| public: |
| AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A) |
| : AAReturnedValues(IRP, A) {} |
| |
| /// See AbstractAttribute::initialize(...). |
| void initialize(Attributor &A) override { |
| // Reset the state. |
| IsFixed = false; |
| IsValidState = true; |
| ReturnedValues.clear(); |
| |
| Function *F = getAssociatedFunction(); |
| if (!F || F->isDeclaration()) { |
| indicatePessimisticFixpoint(); |
| return; |
| } |
| assert(!F->getReturnType()->isVoidTy() && |
| "Did not expect a void return type!"); |
| |
| // The map from instruction opcodes to those instructions in the function. |
| auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F); |
| |
| // Look through all arguments, if one is marked as returned we are done. |
| for (Argument &Arg : F->args()) { |
| if (Arg.hasReturnedAttr()) { |
| auto &ReturnInstSet = ReturnedValues[&Arg]; |
| if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret)) |
| for (Instruction *RI : *Insts) |
| ReturnInstSet.insert(cast<ReturnInst>(RI)); |
| |
| indicateOptimisticFixpoint(); |
| return; |
| } |
| } |
| |
| if (!A.isFunctionIPOAmendable(*F)) |
| indicatePessimisticFixpoint(); |
| } |
| |
| /// See AbstractAttribute::manifest(...). |
| ChangeStatus manifest(Attributor &A) override; |
| |
| /// See AbstractAttribute::getState(...). |
| AbstractState &getState() override { return *this; } |
| |
| /// See AbstractAttribute::getState(...). |
| const AbstractState &getState() const override { return *this; } |
| |
| /// See AbstractAttribute::updateImpl(Attributor &A). |
| ChangeStatus updateImpl(Attributor &A) override; |
| |
| llvm::iterator_range<iterator> returned_values() override { |
| return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); |
| } |
| |
| llvm::iterator_range<const_iterator> returned_values() const override { |
| return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); |
| } |
| |
| const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override { |
| return UnresolvedCalls; |
| } |
| |
| /// Return the number of potential return values, -1 if unknown. |
| size_t getNumReturnValues() const override { |
| return isValidState() ? ReturnedValues.size() : -1; |
| } |
| |
| /// Return an assumed unique return value if a single candidate is found. If |
| /// there cannot be one, return a nullptr. If it is not clear yet, return the |
| /// Optional::NoneType. |
| Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; |
| |
| /// See AbstractState::checkForAllReturnedValues(...). |
| bool checkForAllReturnedValuesAndReturnInsts( |
| function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) |
| const override; |
| |
| /// Pretty print the attribute similar to the IR representation. |
| const std::string getAsStr() const override; |
| |
| /// See AbstractState::isAtFixpoint(). |
| bool isAtFixpoint() const override { return IsFixed; } |
| |
| /// See AbstractState::isValidState(). |
| bool isValidState() const override { return IsValidState; } |
| |
| /// See AbstractState::indicateOptimisticFixpoint(...). |
| ChangeStatus indicateOptimisticFixpoint() override { |
| IsFixed = true; |
| return ChangeStatus::UNCHANGED; |
| } |
| |
| ChangeStatus indicatePessimisticFixpoint() override { |
| IsFixed = true; |
| IsValidState = false; |
| return ChangeStatus::CHANGED; |
| } |
| }; |
| |
| ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { |
| ChangeStatus Changed = ChangeStatus::UNCHANGED; |
| |
| // Bookkeeping. |
| assert(isValidState()); |
| STATS_DECLTRACK(KnownReturnValues, FunctionReturn, |
| "Number of function with known return values"); |
| |
| // Check if we have an assumed unique return value that we could manifest. |
| Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); |
| |
| if (!UniqueRV.hasValue() || !UniqueRV.getValue()) |
| return Changed; |
| |
| // Bookkeeping. |
| STATS_DECLTRACK(UniqueReturnValue, FunctionReturn, |
| "Number of function with unique return"); |
| |
| // Callback to replace the uses of CB with the constant C. |
| auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) { |
| if (CB.use_empty()) |
| return ChangeStatus::UNCHANGED; |
| if (A.changeValueAfterManifest(CB, C)) |
| return ChangeStatus::CHANGED; |
| return ChangeStatus::UNCHANGED; |
| }; |
| |
| // If the assumed unique return value is an argument, annotate it. |
| if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) { |
| if (UniqueRVArg->getType()->canLosslesslyBitCastTo( |
| getAssociatedFunction()->getReturnType())) { |
| getIRPosition() = IRPosition::argument(*UniqueRVArg); |
| Changed = IRAttribute::manifest(A); |
| } |
| } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) { |
| // We can replace the returned value with the unique returned constant. |
| Value &AnchorValue = getAnchorValue(); |
| if (Function *F = dyn_cast<Function>(&AnchorValue)) { |
| for (const Use &U : F->uses()) |
| if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) |
| if (CB->isCallee(&U)) { |
| Constant *RVCCast = |
| CB->getType() == RVC->getType() |
| ? RVC |
| : ConstantExpr::getTruncOrBitCast(RVC, CB->getType()); |
| Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed; |
| } |
| } else { |
| assert(isa<CallBase>(AnchorValue) && |
| "Expcected a function or call base anchor!"); |
| Constant *RVCCast = |
| AnchorValue.getType() == RVC->getType() |
| ? RVC |
| : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType()); |
| Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast); |
| } |
| if (Changed == ChangeStatus::CHANGED) |
| STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn, |
| "Number of function returns replaced by constant return"); |
| } |
| |
| return Changed; |
| } |
| |
| const std::string AAReturnedValuesImpl::getAsStr() const { |
| return (isAtFixpoint() ? "returns(#" : "may-return(#") + |
| (isValidState() ? std::to_string(getNumReturnValues()) : "?") + |
| ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]"; |
| } |
| |
| Optional<Value *> |
| AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const { |
| // If checkForAllReturnedValues provides a unique value, ignoring potential |
| // undef values that can also be present, it is assumed to be the actual |
| // return value and forwarded to the caller of this method. If there are |
| // multiple, a nullptr is returned indicating there cannot be a unique |
| // returned value. |
| Optional<Value *> UniqueRV; |
| |
| auto Pred = [&](Value &RV) -> bool { |
| // If we found a second returned value and neither the current nor the saved |
| // one is an undef, there is no unique returned value. Undefs are special |
| // since we can pretend they have any value. |
| if (UniqueRV.hasValue() && UniqueRV != &RV && |
| !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) { |
| UniqueRV = nullptr; |
| return false; |
| } |
| |
| // Do not overwrite a value with an undef. |
| if (!UniqueRV.hasValue() || !isa<UndefValue>(RV)) |
| UniqueRV = &RV; |
| |
| return true; |
| }; |
| |
| if (!A.checkForAllReturnedValues(Pred, *this)) |
| UniqueRV = nullptr; |
| |
| return UniqueRV; |
| } |
| |
| bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts( |
| function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) |
| const { |
| if (!isValidState()) |
| return false; |
| |
| // Check all returned values but ignore call sites as long as we have not |
| // encountered an overdefined one during an update. |
| for (auto &It : ReturnedValues) { |
| Value *RV = It.first; |
| |
| CallBase *CB = dyn_cast<CallBase>(RV); |
| if (CB && !UnresolvedCalls.count(CB)) |
| continue; |
| |
| if (!Pred(*RV, It.second)) |
| return false; |
| } |
| |
| return true; |
| } |
| |
| ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { |
| size_t NumUnresolvedCalls = UnresolvedCalls.size(); |
| bool Changed = false; |
| |
| // State used in the value traversals starting in returned values. |
| struct RVState { |
| // The map in which we collect return values -> return instrs. |
| decltype(ReturnedValues) &RetValsMap; |
| // The flag to indicate a change. |
| bool &Changed; |
| // The return instrs we come from. |
| SmallSetVector<ReturnInst *, 4> RetInsts; |
| }; |
| |
| // Callback for a leaf value returned by the associated function. |
| auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS, |
| bool) -> bool { |
| auto Size = RVS.RetValsMap[&Val].size(); |
| RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end()); |
| bool Inserted = RVS.RetValsMap[&Val].size() != Size; |
| RVS.Changed |= Inserted; |
| LLVM_DEBUG({ |
| if (Inserted) |
| dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val |
| << " => " << RVS.RetInsts.size() << "\n"; |
| }); |
| return true; |
| }; |
| |
| // Helper method to invoke the generic value traversal. |
| auto VisitReturnedValue = [&](Value &RV, RVState &RVS, |
| const Instruction *CtxI) { |
| IRPosition RetValPos = IRPosition::value(RV); |
| return genericValueTraversal<AAReturnedValues, RVState>( |
| A, RetValPos, *this, RVS, VisitValueCB, CtxI, |
| /* UseValueSimplify */ false); |
| }; |
| |
| // Callback for all "return intructions" live in the associated function. |
| auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) { |
| ReturnInst &Ret = cast<ReturnInst>(I); |
| RVState RVS({ReturnedValues, Changed, {}}); |
| RVS.RetInsts.insert(&Ret); |
| return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I); |
| }; |
| |
| // Start by discovering returned values from all live returned instructions in |
| // the associated function. |
| if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret})) |
| return indicatePessimisticFixpoint(); |
| |
| // Once returned values "directly" present in the code are handled we try to |
| // resolve returned calls. To avoid modifications to the ReturnedValues map |
| // while we iterate over it we kept record of potential new entries in a copy |
| // map, NewRVsMap. |
| decltype(ReturnedValues) NewRVsMap; |
| |
| auto HandleReturnValue = [&](Value *RV, |
| SmallSetVector<ReturnInst *, 4> &RIs) { |
| LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #" |
| << RIs.size() << " RIs\n"); |
| CallBase *CB = dyn_cast<CallBase>(RV); |
| if (!CB || UnresolvedCalls.count(CB)) |
| return; |
| |
| if (!CB->getCalledFunction()) { |
| LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB |
| << "\n"); |
| UnresolvedCalls.insert(CB); |
| return; |
| } |
| |
| // TODO: use the function scope once we have call site AAReturnedValues. |
| const auto &RetValAA = A.getAAFor<AAReturnedValues>( |
| *this, IRPosition::function(*CB->getCalledFunction()), |
| DepClassTy::REQUIRED); |
| LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: " |
| << RetValAA << "\n"); |
| |
| // Skip dead ends, thus if we do not know anything about the returned |
| // call we mark it as unresolved and it will stay that way. |
| if (!RetValAA.getState().isValidState()) { |
| LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB |
| << "\n"); |
| UnresolvedCalls.insert(CB); |
| return; |
| } |
| |
| // Do not try to learn partial information. If the callee has unresolved |
| // return values we will treat the call as unresolved/opaque. |
| auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls(); |
| if (!RetValAAUnresolvedCalls.empty()) { |
| UnresolvedCalls.insert(CB); |
| return; |
| } |
| |
| // Now check if we can track transitively returned values. If possible, thus |
| // if all return value can be represented in the current scope, do so. |
| bool Unresolved = false; |
| for (auto &RetValAAIt : RetValAA.returned_values()) { |
| Value *RetVal = RetValAAIt.first; |
| if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) || |
| isa<Constant>(RetVal)) |
| continue; |
| // Anything that did not fit in the above categories cannot be resolved, |
| // mark the call as unresolved. |
| LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value " |
| "cannot be translated: " |
| << *RetVal << "\n"); |
| UnresolvedCalls.insert(CB); |
| Unresolved = true; |
| break; |
| } |
| |
| if (Unresolved) |
| return; |
| |
| // Now track transitively returned values. |
| unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB]; |
| if (NumRetAA == RetValAA.getNumReturnValues()) { |
| LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not " |
| "changed since it was seen last\n"); |
| return; |
| } |
| NumRetAA = RetValAA.getNumReturnValues(); |
| |
| for (auto &RetValAAIt : RetValAA.returned_values()) { |
| Value *RetVal = RetValAAIt.first; |
| if (Argument *Arg = dyn_cast<Argument>(RetVal)) { |
| // Arguments are mapped to call site operands and we begin the traversal |
| // again. |
| bool Unused = false; |
| RVState RVS({NewRVsMap, Unused, RetValAAIt.second}); |
| VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB); |
| continue; |
| } |
| if (isa<CallBase>(RetVal)) { |
| // Call sites are resolved by the callee attribute over time, no need to |
| // do anything for us. |
| continue; |
| } |
| if (isa<Constant>(RetVal)) { |
| // Constants are valid everywhere, we can simply take them. |
| NewRVsMap[RetVal].insert(RIs.begin(), RIs.end()); |
| continue; |
| } |
| } |
| }; |
| |
| for (auto &It : ReturnedValues) |
| HandleReturnValue(It.first, It.second); |
| |
| // Because processing the new information can again lead to new return values |
| // we have to be careful and iterate until this iteration is complete. The |
| // idea is that we are in a stable state at the end of an update. All return |
| // values have been handled and properly categorized. We might not update |
| // again if we have not requested a non-fix attribute so we cannot "wait" for |
| // the next update to analyze a new return value. |
| while (!NewRVsMap.empty()) { |
| auto It = std::move(NewRVsMap.back()); |
| NewRVsMap.pop_back(); |
| |
| assert(!It.second.empty() && "Entry does not add anything."); |
| auto &ReturnInsts = ReturnedValues[It.first]; |
| for (ReturnInst *RI : It.second) |
| if (ReturnInsts.insert(RI)) { |
| LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value " |
| << *It.first << " => " << *RI << "\n"); |
| HandleReturnValue(It.first, ReturnInsts); |
| Changed = true; |
| } |
| } |
| |
| Changed |= (NumUnresolvedCalls != UnresolvedCalls.size()); |
| return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; |
| } |
| |
| struct AAReturnedValuesFunction final : public AAReturnedValuesImpl { |
| AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A) |
| : AAReturnedValuesImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) } |
| }; |
| |
| /// Returned values information for a call sites. |
| struct AAReturnedValuesCallSite final : AAReturnedValuesImpl { |
| AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A) |
| : AAReturnedValuesImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::initialize(...). |
| void initialize(Attributor &A) override { |
| // TODO: Once we have call site specific value information we can provide |
| // call site specific liveness information and then it makes |
| // sense to specialize attributes for call sites instead of |
| // redirecting requests to the callee. |
| llvm_unreachable("Abstract attributes for returned values are not " |
| "supported for call sites yet!"); |
| } |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| return indicatePessimisticFixpoint(); |
| } |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override {} |
| }; |
| |
| /// ------------------------ NoSync Function Attribute ------------------------- |
| |
| struct AANoSyncImpl : AANoSync { |
| AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {} |
| |
| const std::string getAsStr() const override { |
| return getAssumed() ? "nosync" : "may-sync"; |
| } |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override; |
| |
| /// Helper function used to determine whether an instruction is non-relaxed |
| /// atomic. In other words, if an atomic instruction does not have unordered |
| /// or monotonic ordering |
| static bool isNonRelaxedAtomic(Instruction *I); |
| |
| /// Helper function specific for intrinsics which are potentially volatile |
| static bool isNoSyncIntrinsic(Instruction *I); |
| }; |
| |
| bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) { |
| if (!I->isAtomic()) |
| return false; |
| |
| if (auto *FI = dyn_cast<FenceInst>(I)) |
| // All legal orderings for fence are stronger than monotonic. |
| return FI->getSyncScopeID() != SyncScope::SingleThread; |
| else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) { |
| // Unordered is not a legal ordering for cmpxchg. |
| return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic || |
| AI->getFailureOrdering() != AtomicOrdering::Monotonic); |
| } |
| |
| AtomicOrdering Ordering; |
| switch (I->getOpcode()) { |
| case Instruction::AtomicRMW: |
| Ordering = cast<AtomicRMWInst>(I)->getOrdering(); |
| break; |
| case Instruction::Store: |
| Ordering = cast<StoreInst>(I)->getOrdering(); |
| break; |
| case Instruction::Load: |
| Ordering = cast<LoadInst>(I)->getOrdering(); |
| break; |
| default: |
| llvm_unreachable( |
| "New atomic operations need to be known in the attributor."); |
| } |
| |
| return (Ordering != AtomicOrdering::Unordered && |
| Ordering != AtomicOrdering::Monotonic); |
| } |
| |
| /// Return true if this intrinsic is nosync. This is only used for intrinsics |
| /// which would be nosync except that they have a volatile flag. All other |
| /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td. |
| bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) { |
| if (auto *MI = dyn_cast<MemIntrinsic>(I)) |
| return !MI->isVolatile(); |
| return false; |
| } |
| |
| ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { |
| |
| auto CheckRWInstForNoSync = [&](Instruction &I) { |
| /// We are looking for volatile instructions or Non-Relaxed atomics. |
| |
| if (const auto *CB = dyn_cast<CallBase>(&I)) { |
| if (CB->hasFnAttr(Attribute::NoSync)) |
| return true; |
| |
| if (isNoSyncIntrinsic(&I)) |
| return true; |
| |
| const auto &NoSyncAA = A.getAAFor<AANoSync>( |
| *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); |
| return NoSyncAA.isAssumedNoSync(); |
| } |
| |
| if (!I.isVolatile() && !isNonRelaxedAtomic(&I)) |
| return true; |
| |
| return false; |
| }; |
| |
| auto CheckForNoSync = [&](Instruction &I) { |
| // At this point we handled all read/write effects and they are all |
| // nosync, so they can be skipped. |
| if (I.mayReadOrWriteMemory()) |
| return true; |
| |
| // non-convergent and readnone imply nosync. |
| return !cast<CallBase>(I).isConvergent(); |
| }; |
| |
| if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) || |
| !A.checkForAllCallLikeInstructions(CheckForNoSync, *this)) |
| return indicatePessimisticFixpoint(); |
| |
| return ChangeStatus::UNCHANGED; |
| } |
| |
| struct AANoSyncFunction final : public AANoSyncImpl { |
| AANoSyncFunction(const IRPosition &IRP, Attributor &A) |
| : AANoSyncImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) } |
| }; |
| |
| /// NoSync attribute deduction for a call sites. |
| struct AANoSyncCallSite final : AANoSyncImpl { |
| AANoSyncCallSite(const IRPosition &IRP, Attributor &A) |
| : AANoSyncImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::initialize(...). |
| void initialize(Attributor &A) override { |
| AANoSyncImpl::initialize(A); |
| Function *F = getAssociatedFunction(); |
| if (!F || F->isDeclaration()) |
| indicatePessimisticFixpoint(); |
| } |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| // TODO: Once we have call site specific value information we can provide |
| // call site specific liveness information and then it makes |
| // sense to specialize attributes for call sites arguments instead of |
| // redirecting requests to the callee argument. |
| Function *F = getAssociatedFunction(); |
| const IRPosition &FnPos = IRPosition::function(*F); |
| auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED); |
| return clampStateAndIndicateChange(getState(), FnAA.getState()); |
| } |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); } |
| }; |
| |
| /// ------------------------ No-Free Attributes ---------------------------- |
| |
| struct AANoFreeImpl : public AANoFree { |
| AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {} |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| auto CheckForNoFree = [&](Instruction &I) { |
| const auto &CB = cast<CallBase>(I); |
| if (CB.hasFnAttr(Attribute::NoFree)) |
| return true; |
| |
| const auto &NoFreeAA = A.getAAFor<AANoFree>( |
| *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); |
| return NoFreeAA.isAssumedNoFree(); |
| }; |
| |
| if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this)) |
| return indicatePessimisticFixpoint(); |
| return ChangeStatus::UNCHANGED; |
| } |
| |
| /// See AbstractAttribute::getAsStr(). |
| const std::string getAsStr() const override { |
| return getAssumed() ? "nofree" : "may-free"; |
| } |
| }; |
| |
| struct AANoFreeFunction final : public AANoFreeImpl { |
| AANoFreeFunction(const IRPosition &IRP, Attributor &A) |
| : AANoFreeImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) } |
| }; |
| |
| /// NoFree attribute deduction for a call sites. |
| struct AANoFreeCallSite final : AANoFreeImpl { |
| AANoFreeCallSite(const IRPosition &IRP, Attributor &A) |
| : AANoFreeImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::initialize(...). |
| void initialize(Attributor &A) override { |
| AANoFreeImpl::initialize(A); |
| Function *F = getAssociatedFunction(); |
| if (!F || F->isDeclaration()) |
| indicatePessimisticFixpoint(); |
| } |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| // TODO: Once we have call site specific value information we can provide |
| // call site specific liveness information and then it makes |
| // sense to specialize attributes for call sites arguments instead of |
| // redirecting requests to the callee argument. |
| Function *F = getAssociatedFunction(); |
| const IRPosition &FnPos = IRPosition::function(*F); |
| auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED); |
| return clampStateAndIndicateChange(getState(), FnAA.getState()); |
| } |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); } |
| }; |
| |
| /// NoFree attribute for floating values. |
| struct AANoFreeFloating : AANoFreeImpl { |
| AANoFreeFloating(const IRPosition &IRP, Attributor &A) |
| : AANoFreeImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)} |
| |
| /// See Abstract Attribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| const IRPosition &IRP = getIRPosition(); |
| |
| const auto &NoFreeAA = A.getAAFor<AANoFree>( |
| *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL); |
| if (NoFreeAA.isAssumedNoFree()) |
| return ChangeStatus::UNCHANGED; |
| |
| Value &AssociatedValue = getIRPosition().getAssociatedValue(); |
| auto Pred = [&](const Use &U, bool &Follow) -> bool { |
| Instruction *UserI = cast<Instruction>(U.getUser()); |
| if (auto *CB = dyn_cast<CallBase>(UserI)) { |
| if (CB->isBundleOperand(&U)) |
| return false; |
| if (!CB->isArgOperand(&U)) |
| return true; |
| unsigned ArgNo = CB->getArgOperandNo(&U); |
| |
| const auto &NoFreeArg = A.getAAFor<AANoFree>( |
| *this, IRPosition::callsite_argument(*CB, ArgNo), |
| DepClassTy::REQUIRED); |
| return NoFreeArg.isAssumedNoFree(); |
| } |
| |
| if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || |
| isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { |
| Follow = true; |
| return true; |
| } |
| if (isa<ReturnInst>(UserI)) |
| return true; |
| |
| // Unknown user. |
| return false; |
| }; |
| if (!A.checkForAllUses(Pred, *this, AssociatedValue)) |
| return indicatePessimisticFixpoint(); |
| |
| return ChangeStatus::UNCHANGED; |
| } |
| }; |
| |
| /// NoFree attribute for a call site argument. |
| struct AANoFreeArgument final : AANoFreeFloating { |
| AANoFreeArgument(const IRPosition &IRP, Attributor &A) |
| : AANoFreeFloating(IRP, A) {} |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) } |
| }; |
| |
| /// NoFree attribute for call site arguments. |
| struct AANoFreeCallSiteArgument final : AANoFreeFloating { |
| AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A) |
| : AANoFreeFloating(IRP, A) {} |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| // TODO: Once we have call site specific value information we can provide |
| // call site specific liveness information and then it makes |
| // sense to specialize attributes for call sites arguments instead of |
| // redirecting requests to the callee argument. |
| Argument *Arg = getAssociatedArgument(); |
| if (!Arg) |
| return indicatePessimisticFixpoint(); |
| const IRPosition &ArgPos = IRPosition::argument(*Arg); |
| auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED); |
| return clampStateAndIndicateChange(getState(), ArgAA.getState()); |
| } |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)}; |
| }; |
| |
| /// NoFree attribute for function return value. |
| struct AANoFreeReturned final : AANoFreeFloating { |
| AANoFreeReturned(const IRPosition &IRP, Attributor &A) |
| : AANoFreeFloating(IRP, A) { |
| llvm_unreachable("NoFree is not applicable to function returns!"); |
| } |
| |
| /// See AbstractAttribute::initialize(...). |
| void initialize(Attributor &A) override { |
| llvm_unreachable("NoFree is not applicable to function returns!"); |
| } |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| llvm_unreachable("NoFree is not applicable to function returns!"); |
| } |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override {} |
| }; |
| |
| /// NoFree attribute deduction for a call site return value. |
| struct AANoFreeCallSiteReturned final : AANoFreeFloating { |
| AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A) |
| : AANoFreeFloating(IRP, A) {} |
| |
| ChangeStatus manifest(Attributor &A) override { |
| return ChangeStatus::UNCHANGED; |
| } |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) } |
| }; |
| |
| /// ------------------------ NonNull Argument Attribute ------------------------ |
| static int64_t getKnownNonNullAndDerefBytesForUse( |
| Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue, |
| const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) { |
| TrackUse = false; |
| |
| const Value *UseV = U->get(); |
| if (!UseV->getType()->isPointerTy()) |
| return 0; |
| |
| // We need to follow common pointer manipulation uses to the accesses they |
| // feed into. We can try to be smart to avoid looking through things we do not |
| // like for now, e.g., non-inbounds GEPs. |
| if (isa<CastInst>(I)) { |
| TrackUse = true; |
| return 0; |
| } |
| |
| if (isa<GetElementPtrInst>(I)) { |
| TrackUse = true; |
| return 0; |
| } |
| |
| Type *PtrTy = UseV->getType(); |
| const Function *F = I->getFunction(); |
| bool NullPointerIsDefined = |
| F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true; |
| const DataLayout &DL = A.getInfoCache().getDL(); |
| if (const auto *CB = dyn_cast<CallBase>(I)) { |
| if (CB->isBundleOperand(U)) { |
| if (RetainedKnowledge RK = getKnowledgeFromUse( |
| U, {Attribute::NonNull, Attribute::Dereferenceable})) { |
| IsNonNull |= |
| (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined); |
| return RK.ArgValue; |
| } |
| return 0; |
| } |
| |
| if (CB->isCallee(U)) { |
| IsNonNull |= !NullPointerIsDefined; |
| return 0; |
| } |
| |
| unsigned ArgNo = CB->getArgOperandNo(U); |
| IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); |
| // As long as we only use known information there is no need to track |
| // dependences here. |
| auto &DerefAA = |
| A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE); |
| IsNonNull |= DerefAA.isKnownNonNull(); |
| return DerefAA.getKnownDereferenceableBytes(); |
| } |
| |
| int64_t Offset; |
| const Value *Base = |
| getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL); |
| if (Base) { |
| if (Base == &AssociatedValue && |
| getPointerOperand(I, /* AllowVolatile */ false) == UseV) { |
| int64_t DerefBytes = |
| (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset; |
| |
| IsNonNull |= !NullPointerIsDefined; |
| return std::max(int64_t(0), DerefBytes); |
| } |
| } |
| |
| /// Corner case when an offset is 0. |
| Base = getBasePointerOfAccessPointerOperand(I, Offset, DL, |
| /*AllowNonInbounds*/ true); |
| if (Base) { |
| if (Offset == 0 && Base == &AssociatedValue && |
| getPointerOperand(I, /* AllowVolatile */ false) == UseV) { |
| int64_t DerefBytes = |
| (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()); |
| IsNonNull |= !NullPointerIsDefined; |
| return std::max(int64_t(0), DerefBytes); |
| } |
| } |
| |
| return 0; |
| } |
| |
| struct AANonNullImpl : AANonNull { |
| AANonNullImpl(const IRPosition &IRP, Attributor &A) |
| : AANonNull(IRP, A), |
| NullIsDefined(NullPointerIsDefined( |
| getAnchorScope(), |
| getAssociatedValue().getType()->getPointerAddressSpace())) {} |
| |
| /// See AbstractAttribute::initialize(...). |
| void initialize(Attributor &A) override { |
| Value &V = getAssociatedValue(); |
| if (!NullIsDefined && |
| hasAttr({Attribute::NonNull, Attribute::Dereferenceable}, |
| /* IgnoreSubsumingPositions */ false, &A)) { |
| indicateOptimisticFixpoint(); |
| return; |
| } |
| |
| if (isa<ConstantPointerNull>(V)) { |
| indicatePessimisticFixpoint(); |
| return; |
| } |
| |
| AANonNull::initialize(A); |
| |
| bool CanBeNull, CanBeFreed; |
| if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull, |
| CanBeFreed)) { |
| if (!CanBeNull) { |
| indicateOptimisticFixpoint(); |
| return; |
| } |
| } |
| |
| if (isa<GlobalValue>(&getAssociatedValue())) { |
| indicatePessimisticFixpoint(); |
| return; |
| } |
| |
| if (Instruction *CtxI = getCtxI()) |
| followUsesInMBEC(*this, A, getState(), *CtxI); |
| } |
| |
| /// See followUsesInMBEC |
| bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, |
| AANonNull::StateType &State) { |
| bool IsNonNull = false; |
| bool TrackUse = false; |
| getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I, |
| IsNonNull, TrackUse); |
| State.setKnown(IsNonNull); |
| return TrackUse; |
| } |
| |
| /// See AbstractAttribute::getAsStr(). |
| const std::string getAsStr() const override { |
| return getAssumed() ? "nonnull" : "may-null"; |
| } |
| |
| /// Flag to determine if the underlying value can be null and still allow |
| /// valid accesses. |
| const bool NullIsDefined; |
| }; |
| |
| /// NonNull attribute for a floating value. |
| struct AANonNullFloating : public AANonNullImpl { |
| AANonNullFloating(const IRPosition &IRP, Attributor &A) |
| : AANonNullImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| const DataLayout &DL = A.getDataLayout(); |
| |
| DominatorTree *DT = nullptr; |
| AssumptionCache *AC = nullptr; |
| InformationCache &InfoCache = A.getInfoCache(); |
| if (const Function *Fn = getAnchorScope()) { |
| DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn); |
| AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn); |
| } |
| |
| auto VisitValueCB = [&](Value &V, const Instruction *CtxI, |
| AANonNull::StateType &T, bool Stripped) -> bool { |
| const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V), |
| DepClassTy::REQUIRED); |
| if (!Stripped && this == &AA) { |
| if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT)) |
| T.indicatePessimisticFixpoint(); |
| } else { |
| // Use abstract attribute information. |
| const AANonNull::StateType &NS = AA.getState(); |
| T ^= NS; |
| } |
| return T.isValidState(); |
| }; |
| |
| StateType T; |
| if (!genericValueTraversal<AANonNull, StateType>( |
| A, getIRPosition(), *this, T, VisitValueCB, getCtxI())) |
| return indicatePessimisticFixpoint(); |
| |
| return clampStateAndIndicateChange(getState(), T); |
| } |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } |
| }; |
| |
| /// NonNull attribute for function return value. |
| struct AANonNullReturned final |
| : AAReturnedFromReturnedValues<AANonNull, AANonNull> { |
| AANonNullReturned(const IRPosition &IRP, Attributor &A) |
| : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {} |
| |
| /// See AbstractAttribute::getAsStr(). |
| const std::string getAsStr() const override { |
| return getAssumed() ? "nonnull" : "may-null"; |
| } |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } |
| }; |
| |
| /// NonNull attribute for function argument. |
| struct AANonNullArgument final |
| : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { |
| AANonNullArgument(const IRPosition &IRP, Attributor &A) |
| : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {} |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) } |
| }; |
| |
| struct AANonNullCallSiteArgument final : AANonNullFloating { |
| AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A) |
| : AANonNullFloating(IRP, A) {} |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) } |
| }; |
| |
| /// NonNull attribute for a call site return position. |
| struct AANonNullCallSiteReturned final |
| : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> { |
| AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A) |
| : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {} |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) } |
| }; |
| |
| /// ------------------------ No-Recurse Attributes ---------------------------- |
| |
| struct AANoRecurseImpl : public AANoRecurse { |
| AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {} |
| |
| /// See AbstractAttribute::getAsStr() |
| const std::string getAsStr() const override { |
| return getAssumed() ? "norecurse" : "may-recurse"; |
| } |
| }; |
| |
| struct AANoRecurseFunction final : AANoRecurseImpl { |
| AANoRecurseFunction(const IRPosition &IRP, Attributor &A) |
| : AANoRecurseImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::initialize(...). |
| void initialize(Attributor &A) override { |
| AANoRecurseImpl::initialize(A); |
| if (const Function *F = getAnchorScope()) |
| if (A.getInfoCache().getSccSize(*F) != 1) |
| indicatePessimisticFixpoint(); |
| } |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| |
| // If all live call sites are known to be no-recurse, we are as well. |
| auto CallSitePred = [&](AbstractCallSite ACS) { |
| const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( |
| *this, IRPosition::function(*ACS.getInstruction()->getFunction()), |
| DepClassTy::NONE); |
| return NoRecurseAA.isKnownNoRecurse(); |
| }; |
| bool AllCallSitesKnown; |
| if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) { |
| // If we know all call sites and all are known no-recurse, we are done. |
| // If all known call sites, which might not be all that exist, are known |
| // to be no-recurse, we are not done but we can continue to assume |
| // no-recurse. If one of the call sites we have not visited will become |
| // live, another update is triggered. |
| if (AllCallSitesKnown) |
| indicateOptimisticFixpoint(); |
| return ChangeStatus::UNCHANGED; |
| } |
| |
| // If the above check does not hold anymore we look at the calls. |
| auto CheckForNoRecurse = [&](Instruction &I) { |
| const auto &CB = cast<CallBase>(I); |
| if (CB.hasFnAttr(Attribute::NoRecurse)) |
| return true; |
| |
| const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( |
| *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); |
| if (!NoRecurseAA.isAssumedNoRecurse()) |
| return false; |
| |
| // Recursion to the same function |
| if (CB.getCalledFunction() == getAnchorScope()) |
| return false; |
| |
| return true; |
| }; |
| |
| if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this)) |
| return indicatePessimisticFixpoint(); |
| return ChangeStatus::UNCHANGED; |
| } |
| |
| void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) } |
| }; |
| |
| /// NoRecurse attribute deduction for a call sites. |
| struct AANoRecurseCallSite final : AANoRecurseImpl { |
| AANoRecurseCallSite(const IRPosition &IRP, Attributor &A) |
| : AANoRecurseImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::initialize(...). |
| void initialize(Attributor &A) override { |
| AANoRecurseImpl::initialize(A); |
| Function *F = getAssociatedFunction(); |
| if (!F || F->isDeclaration()) |
| indicatePessimisticFixpoint(); |
| } |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| // TODO: Once we have call site specific value information we can provide |
| // call site specific liveness information and then it makes |
| // sense to specialize attributes for call sites arguments instead of |
| // redirecting requests to the callee argument. |
| Function *F = getAssociatedFunction(); |
| const IRPosition &FnPos = IRPosition::function(*F); |
| auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED); |
| return clampStateAndIndicateChange(getState(), FnAA.getState()); |
| } |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); } |
| }; |
| |
| /// -------------------- Undefined-Behavior Attributes ------------------------ |
| |
| struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { |
| AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A) |
| : AAUndefinedBehavior(IRP, A) {} |
| |
| /// See AbstractAttribute::updateImpl(...). |
| // through a pointer (i.e. also branches etc.) |
| ChangeStatus updateImpl(Attributor &A) override { |
| const size_t UBPrevSize = KnownUBInsts.size(); |
| const size_t NoUBPrevSize = AssumedNoUBInsts.size(); |
| |
| auto InspectMemAccessInstForUB = [&](Instruction &I) { |
| // Skip instructions that are already saved. |
| if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) |
| return true; |
| |
| // If we reach here, we know we have an instruction |
| // that accesses memory through a pointer operand, |
| // for which getPointerOperand() should give it to us. |
| const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true); |
| assert(PtrOp && |
| "Expected pointer operand of memory accessing instruction"); |
| |
| // Either we stopped and the appropriate action was taken, |
| // or we got back a simplified value to continue. |
| Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I); |
| if (!SimplifiedPtrOp.hasValue()) |
| return true; |
| const Value *PtrOpVal = SimplifiedPtrOp.getValue(); |
| |
| // A memory access through a pointer is considered UB |
| // only if the pointer has constant null value. |
| // TODO: Expand it to not only check constant values. |
| if (!isa<ConstantPointerNull>(PtrOpVal)) { |
| AssumedNoUBInsts.insert(&I); |
| return true; |
| } |
| const Type *PtrTy = PtrOpVal->getType(); |
| |
| // Because we only consider instructions inside functions, |
| // assume that a parent function exists. |
| const Function *F = I.getFunction(); |
| |
| // A memory access using constant null pointer is only considered UB |
| // if null pointer is _not_ defined for the target platform. |
| if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace())) |
| AssumedNoUBInsts.insert(&I); |
| else |
| KnownUBInsts.insert(&I); |
| return true; |
| }; |
| |
| auto InspectBrInstForUB = [&](Instruction &I) { |
| // A conditional branch instruction is considered UB if it has `undef` |
| // condition. |
| |
| // Skip instructions that are already saved. |
| if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) |
| return true; |
| |
| // We know we have a branch instruction. |
| auto BrInst = cast<BranchInst>(&I); |
| |
| // Unconditional branches are never considered UB. |
| if (BrInst->isUnconditional()) |
| return true; |
| |
| // Either we stopped and the appropriate action was taken, |
| // or we got back a simplified value to continue. |
| Optional<Value *> SimplifiedCond = |
| stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst); |
| if (!SimplifiedCond.hasValue()) |
| return true; |
| AssumedNoUBInsts.insert(&I); |
| return true; |
| }; |
| |
| auto InspectCallSiteForUB = [&](Instruction &I) { |
| // Check whether a callsite always cause UB or not |
| |
| // Skip instructions that are already saved. |
| if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) |
| return true; |
| |
| // Check nonnull and noundef argument attribute violation for each |
| // callsite. |
| CallBase &CB = cast<CallBase>(I); |
| Function *Callee = CB.getCalledFunction(); |
| if (!Callee) |
| return true; |
| for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) { |
| // If current argument is known to be simplified to null pointer and the |
| // corresponding argument position is known to have nonnull attribute, |
| // the argument is poison. Furthermore, if the argument is poison and |
| // the position is known to have noundef attriubte, this callsite is |
| // considered UB. |
| if (idx >= Callee->arg_size()) |
| break; |
| Value *ArgVal = CB.getArgOperand(idx); |
| if (!ArgVal) |
| continue; |
| // Here, we handle three cases. |
| // (1) Not having a value means it is dead. (we can replace the value |
| // with undef) |
| // (2) Simplified to undef. The argument violate noundef attriubte. |
| // (3) Simplified to null pointer where known to be nonnull. |
| // The argument is a poison value and violate noundef attribute. |
| IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx); |
| auto &NoUndefAA = |
| A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE); |
| if (!NoUndefAA.isKnownNoUndef()) |
| continue; |
| auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>( |
| *this, IRPosition::value(*ArgVal), DepClassTy::NONE); |
| if (!ValueSimplifyAA.isKnown()) |
| continue; |
| Optional<Value *> SimplifiedVal = |
| ValueSimplifyAA.getAssumedSimplifiedValue(A); |
| if (!SimplifiedVal.hasValue() || |
| isa<UndefValue>(*SimplifiedVal.getValue())) { |
| KnownUBInsts.insert(&I); |
| continue; |
| } |
| if (!ArgVal->getType()->isPointerTy() || |
| !isa<ConstantPointerNull>(*SimplifiedVal.getValue())) |
| continue; |
| auto &NonNullAA = |
| A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE); |
| if (NonNullAA.isKnownNonNull()) |
| KnownUBInsts.insert(&I); |
| } |
| return true; |
| }; |
| |
| auto InspectReturnInstForUB = |
| [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) { |
| // Check if a return instruction always cause UB or not |
| // Note: It is guaranteed that the returned position of the anchor |
| // scope has noundef attribute when this is called. |
| // We also ensure the return position is not "assumed dead" |
| // because the returned value was then potentially simplified to |
| // `undef` in AAReturnedValues without removing the `noundef` |
| // attribute yet. |
| |
| // When the returned position has noundef attriubte, UB occur in the |
| // following cases. |
| // (1) Returned value is known to be undef. |
| // (2) The value is known to be a null pointer and the returned |
| // position has nonnull attribute (because the returned value is |
| // poison). |
| bool FoundUB = false; |
| if (isa<UndefValue>(V)) { |
| FoundUB = true; |
| } else { |
| if (isa<ConstantPointerNull>(V)) { |
| auto &NonNullAA = A.getAAFor<AANonNull>( |
| *this, IRPosition::returned(*getAnchorScope()), |
| DepClassTy::NONE); |
| if (NonNullAA.isKnownNonNull()) |
| FoundUB = true; |
| } |
| } |
| |
| if (FoundUB) |
| for (ReturnInst *RI : RetInsts) |
| KnownUBInsts.insert(RI); |
| return true; |
| }; |
| |
| A.checkForAllInstructions(InspectMemAccessInstForUB, *this, |
| {Instruction::Load, Instruction::Store, |
| Instruction::AtomicCmpXchg, |
| Instruction::AtomicRMW}, |
| /* CheckBBLivenessOnly */ true); |
| A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br}, |
| /* CheckBBLivenessOnly */ true); |
| A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this); |
| |
| // If the returned position of the anchor scope has noundef attriubte, check |
| // all returned instructions. |
| if (!getAnchorScope()->getReturnType()->isVoidTy()) { |
| const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope()); |
| if (!A.isAssumedDead(ReturnIRP, this, nullptr)) { |
| auto &RetPosNoUndefAA = |
| A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE); |
| if (RetPosNoUndefAA.isKnownNoUndef()) |
| A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB, |
| *this); |
| } |
| } |
| |
| if (NoUBPrevSize != AssumedNoUBInsts.size() || |
| UBPrevSize != KnownUBInsts.size()) |
| return ChangeStatus::CHANGED; |
| return ChangeStatus::UNCHANGED; |
| } |
| |
| bool isKnownToCauseUB(Instruction *I) const override { |
| return KnownUBInsts.count(I); |
| } |
| |
| bool isAssumedToCauseUB(Instruction *I) const override { |
| // In simple words, if an instruction is not in the assumed to _not_ |
| // cause UB, then it is assumed UB (that includes those |
| // in the KnownUBInsts set). The rest is boilerplate |
| // is to ensure that it is one of the instructions we test |
| // for UB. |
| |
| switch (I->getOpcode()) { |
| case Instruction::Load: |
| case Instruction::Store: |
| case Instruction::AtomicCmpXchg: |
| case Instruction::AtomicRMW: |
| return !AssumedNoUBInsts.count(I); |
| case Instruction::Br: { |
| auto BrInst = cast<BranchInst>(I); |
| if (BrInst->isUnconditional()) |
| return false; |
| return !AssumedNoUBInsts.count(I); |
| } break; |
| default: |
| return false; |
| } |
| return false; |
| } |
| |
| ChangeStatus manifest(Attributor &A) override { |
| if (KnownUBInsts.empty()) |
| return ChangeStatus::UNCHANGED; |
| for (Instruction *I : KnownUBInsts) |
| A.changeToUnreachableAfterManifest(I); |
| return ChangeStatus::CHANGED; |
| } |
| |
| /// See AbstractAttribute::getAsStr() |
| const std::string getAsStr() const override { |
| return getAssumed() ? "undefined-behavior" : "no-ub"; |
| } |
| |
| /// Note: The correctness of this analysis depends on the fact that the |
| /// following 2 sets will stop changing after some point. |
| /// "Change" here means that their size changes. |
| /// The size of each set is monotonically increasing |
| /// (we only add items to them) and it is upper bounded by the number of |
| /// instructions in the processed function (we can never save more |
| /// elements in either set than this number). Hence, at some point, |
| /// they will stop increasing. |
| /// Consequently, at some point, both sets will have stopped |
| /// changing, effectively making the analysis reach a fixpoint. |
| |
| /// Note: These 2 sets are disjoint and an instruction can be considered |
| /// one of 3 things: |
| /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in |
| /// the KnownUBInsts set. |
| /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior |
| /// has a reason to assume it). |
| /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior |
| /// could not find a reason to assume or prove that it can cause UB, |
| /// hence it assumes it doesn't. We have a set for these instructions |
| /// so that we don't reprocess them in every update. |
| /// Note however that instructions in this set may cause UB. |
| |
| protected: |
| /// A set of all live instructions _known_ to cause UB. |
| SmallPtrSet<Instruction *, 8> KnownUBInsts; |
| |
| private: |
| /// A set of all the (live) instructions that are assumed to _not_ cause UB. |
| SmallPtrSet<Instruction *, 8> AssumedNoUBInsts; |
| |
| // Should be called on updates in which if we're processing an instruction |
| // \p I that depends on a value \p V, one of the following has to happen: |
| // - If the value is assumed, then stop. |
| // - If the value is known but undef, then consider it UB. |
| // - Otherwise, do specific processing with the simplified value. |
| // We return None in the first 2 cases to signify that an appropriate |
| // action was taken and the caller should stop. |
| // Otherwise, we return the simplified value that the caller should |
| // use for specific processing. |
| Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V, |
| Instruction *I) { |
| const auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>( |
| *this, IRPosition::value(*V), DepClassTy::REQUIRED); |
| Optional<Value *> SimplifiedV = |
| ValueSimplifyAA.getAssumedSimplifiedValue(A); |
| if (!ValueSimplifyAA.isKnown()) { |
| // Don't depend on assumed values. |
| return llvm::None; |
| } |
| if (!SimplifiedV.hasValue()) { |
| // If it is known (which we tested above) but it doesn't have a value, |
| // then we can assume `undef` and hence the instruction is UB. |
| KnownUBInsts.insert(I); |
| return llvm::None; |
| } |
| Value *Val = SimplifiedV.getValue(); |
| if (isa<UndefValue>(Val)) { |
| KnownUBInsts.insert(I); |
| return llvm::None; |
| } |
| return Val; |
| } |
| }; |
| |
| struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl { |
| AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A) |
| : AAUndefinedBehaviorImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { |
| STATS_DECL(UndefinedBehaviorInstruction, Instruction, |
| "Number of instructions known to have UB"); |
| BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) += |
| KnownUBInsts.size(); |
| } |
| }; |
| |
| /// ------------------------ Will-Return Attributes ---------------------------- |
| |
| // Helper function that checks whether a function has any cycle which we don't |
| // know if it is bounded or not. |
| // Loops with maximum trip count are considered bounded, any other cycle not. |
| static bool mayContainUnboundedCycle(Function &F, Attributor &A) { |
| ScalarEvolution *SE = |
| A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F); |
| LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F); |
| // If either SCEV or LoopInfo is not available for the function then we assume |
| // any cycle to be unbounded cycle. |
| // We use scc_iterator which uses Tarjan algorithm to find all the maximal |
| // SCCs.To detect if there's a cycle, we only need to find the maximal ones. |
| if (!SE || !LI) { |
| for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI) |
| if (SCCI.hasCycle()) |
| return true; |
| return false; |
| } |
| |
| // If there's irreducible control, the function may contain non-loop cycles. |
| if (mayContainIrreducibleControl(F, LI)) |
| return true; |
| |
| // Any loop that does not have a max trip count is considered unbounded cycle. |
| for (auto *L : LI->getLoopsInPreorder()) { |
| if (!SE->getSmallConstantMaxTripCount(L)) |
| return true; |
| } |
| return false; |
| } |
| |
| struct AAWillReturnImpl : public AAWillReturn { |
| AAWillReturnImpl(const IRPosition &IRP, Attributor &A) |
| : AAWillReturn(IRP, A) {} |
| |
| /// See AbstractAttribute::initialize(...). |
| void initialize(Attributor &A) override { |
| AAWillReturn::initialize(A); |
| |
| if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) { |
| indicateOptimisticFixpoint(); |
| return; |
| } |
| } |
| |
| /// Check for `mustprogress` and `readonly` as they imply `willreturn`. |
| bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) { |
| // Check for `mustprogress` in the scope and the associated function which |
| // might be different if this is a call site. |
| if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) && |
| (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress())) |
| return false; |
| |
| const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), |
| DepClassTy::NONE); |
| if (!MemAA.isAssumedReadOnly()) |
| return false; |
| if (KnownOnly && !MemAA.isKnownReadOnly()) |
| return false; |
| if (!MemAA.isKnownReadOnly()) |
| A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL); |
| |
| return true; |
| } |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) |
| return ChangeStatus::UNCHANGED; |
| |
| auto CheckForWillReturn = [&](Instruction &I) { |
| IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I)); |
| const auto &WillReturnAA = |
| A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED); |
| if (WillReturnAA.isKnownWillReturn()) |
| return true; |
| if (!WillReturnAA.isAssumedWillReturn()) |
| return false; |
| const auto &NoRecurseAA = |
| A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED); |
| return NoRecurseAA.isAssumedNoRecurse(); |
| }; |
| |
| if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this)) |
| return indicatePessimisticFixpoint(); |
| |
| return ChangeStatus::UNCHANGED; |
| } |
| |
| /// See AbstractAttribute::getAsStr() |
| const std::string getAsStr() const override { |
| return getAssumed() ? "willreturn" : "may-noreturn"; |
| } |
| }; |
| |
| struct AAWillReturnFunction final : AAWillReturnImpl { |
| AAWillReturnFunction(const IRPosition &IRP, Attributor &A) |
| : AAWillReturnImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::initialize(...). |
| void initialize(Attributor &A) override { |
| AAWillReturnImpl::initialize(A); |
| |
| Function *F = getAnchorScope(); |
| if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A)) |
| indicatePessimisticFixpoint(); |
| } |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) } |
| }; |
| |
| /// WillReturn attribute deduction for a call sites. |
| struct AAWillReturnCallSite final : AAWillReturnImpl { |
| AAWillReturnCallSite(const IRPosition &IRP, Attributor &A) |
| : AAWillReturnImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::initialize(...). |
| void initialize(Attributor &A) override { |
| AAWillReturnImpl::initialize(A); |
| Function *F = getAssociatedFunction(); |
| if (!F || !A.isFunctionIPOAmendable(*F)) |
| indicatePessimisticFixpoint(); |
| } |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) |
| return ChangeStatus::UNCHANGED; |
| |
| // TODO: Once we have call site specific value information we can provide |
| // call site specific liveness information and then it makes |
| // sense to specialize attributes for call sites arguments instead of |
| // redirecting requests to the callee argument. |
| Function *F = getAssociatedFunction(); |
| const IRPosition &FnPos = IRPosition::function(*F); |
| auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED); |
| return clampStateAndIndicateChange(getState(), FnAA.getState()); |
| } |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); } |
| }; |
| |
| /// -------------------AAReachability Attribute-------------------------- |
| |
| struct AAReachabilityImpl : AAReachability { |
| AAReachabilityImpl(const IRPosition &IRP, Attributor &A) |
| : AAReachability(IRP, A) {} |
| |
| const std::string getAsStr() const override { |
| // TODO: Return the number of reachable queries. |
| return "reachable"; |
| } |
| |
| /// See AbstractAttribute::initialize(...). |
| void initialize(Attributor &A) override { indicatePessimisticFixpoint(); } |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| return indicatePessimisticFixpoint(); |
| } |
| }; |
| |
| struct AAReachabilityFunction final : public AAReachabilityImpl { |
| AAReachabilityFunction(const IRPosition &IRP, Attributor &A) |
| : AAReachabilityImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); } |
| }; |
| |
| /// ------------------------ NoAlias Argument Attribute ------------------------ |
| |
| struct AANoAliasImpl : AANoAlias { |
| AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) { |
| assert(getAssociatedType()->isPointerTy() && |
| "Noalias is a pointer attribute"); |
| } |
| |
| const std::string getAsStr() const override { |
| return getAssumed() ? "noalias" : "may-alias"; |
| } |
| }; |
| |
| /// NoAlias attribute for a floating value. |
| struct AANoAliasFloating final : AANoAliasImpl { |
| AANoAliasFloating(const IRPosition &IRP, Attributor &A) |
| : AANoAliasImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::initialize(...). |
| void initialize(Attributor &A) override { |
| AANoAliasImpl::initialize(A); |
| Value *Val = &getAssociatedValue(); |
| do { |
| CastInst *CI = dyn_cast<CastInst>(Val); |
| if (!CI) |
| break; |
| Value *Base = CI->getOperand(0); |
| if (!Base->hasOneUse()) |
| break; |
| Val = Base; |
| } while (true); |
| |
| if (!Val->getType()->isPointerTy()) { |
| indicatePessimisticFixpoint(); |
| return; |
| } |
| |
| if (isa<AllocaInst>(Val)) |
| indicateOptimisticFixpoint(); |
| else if (isa<ConstantPointerNull>(Val) && |
| !NullPointerIsDefined(getAnchorScope(), |
| Val->getType()->getPointerAddressSpace())) |
| indicateOptimisticFixpoint(); |
| else if (Val != &getAssociatedValue()) { |
| const auto &ValNoAliasAA = A.getAAFor<AANoAlias>( |
| *this, IRPosition::value(*Val), DepClassTy::OPTIONAL); |
| if (ValNoAliasAA.isKnownNoAlias()) |
| indicateOptimisticFixpoint(); |
| } |
| } |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| // TODO: Implement this. |
| return indicatePessimisticFixpoint(); |
| } |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { |
| STATS_DECLTRACK_FLOATING_ATTR(noalias) |
| } |
| }; |
| |
| /// NoAlias attribute for an argument. |
| struct AANoAliasArgument final |
| : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { |
| using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>; |
| AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} |
| |
| /// See AbstractAttribute::initialize(...). |
| void initialize(Attributor &A) override { |
| Base::initialize(A); |
| // See callsite argument attribute and callee argument attribute. |
| if (hasAttr({Attribute::ByVal})) |
| indicateOptimisticFixpoint(); |
| } |
| |
| /// See AbstractAttribute::update(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| // We have to make sure no-alias on the argument does not break |
| // synchronization when this is a callback argument, see also [1] below. |
| // If synchronization cannot be affected, we delegate to the base updateImpl |
| // function, otherwise we give up for now. |
| |
| // If the function is no-sync, no-alias cannot break synchronization. |
| const auto &NoSyncAA = |
| A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()), |
| DepClassTy::OPTIONAL); |
| if (NoSyncAA.isAssumedNoSync()) |
| return Base::updateImpl(A); |
| |
| // If the argument is read-only, no-alias cannot break synchronization. |
| const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( |
| *this, getIRPosition(), DepClassTy::OPTIONAL); |
| if (MemBehaviorAA.isAssumedReadOnly()) |
| return Base::updateImpl(A); |
| |
| // If the argument is never passed through callbacks, no-alias cannot break |
| // synchronization. |
| bool AllCallSitesKnown; |
| if (A.checkForAllCallSites( |
| [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this, |
| true, AllCallSitesKnown)) |
| return Base::updateImpl(A); |
| |
| // TODO: add no-alias but make sure it doesn't break synchronization by |
| // introducing fake uses. See: |
| // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel, |
| // International Workshop on OpenMP 2018, |
| // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf |
| |
| return indicatePessimisticFixpoint(); |
| } |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) } |
| }; |
| |
| struct AANoAliasCallSiteArgument final : AANoAliasImpl { |
| AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A) |
| : AANoAliasImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::initialize(...). |
| void initialize(Attributor &A) override { |
| // See callsite argument attribute and callee argument attribute. |
| const auto &CB = cast<CallBase>(getAnchorValue()); |
| if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias)) |
| indicateOptimisticFixpoint(); |
| Value &Val = getAssociatedValue(); |
| if (isa<ConstantPointerNull>(Val) && |
| !NullPointerIsDefined(getAnchorScope(), |
| Val.getType()->getPointerAddressSpace())) |
| indicateOptimisticFixpoint(); |
| } |
| |
| /// Determine if the underlying value may alias with the call site argument |
| /// \p OtherArgNo of \p ICS (= the underlying call site). |
| bool mayAliasWithArgument(Attributor &A, AAResults *&AAR, |
| const AAMemoryBehavior &MemBehaviorAA, |
| const CallBase &CB, unsigned OtherArgNo) { |
| // We do not need to worry about aliasing with the underlying IRP. |
| if (this->getCalleeArgNo() == (int)OtherArgNo) |
| return false; |
| |
| // If it is not a pointer or pointer vector we do not alias. |
| const Value *ArgOp = CB.getArgOperand(OtherArgNo); |
| if (!ArgOp->getType()->isPtrOrPtrVectorTy()) |
| return false; |
| |
| auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>( |
| *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE); |
| |
| // If the argument is readnone, there is no read-write aliasing. |
| if (CBArgMemBehaviorAA.isAssumedReadNone()) { |
| A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); |
| return false; |
| } |
| |
| // If the argument is readonly and the underlying value is readonly, there |
| // is no read-write aliasing. |
| bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly(); |
| if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) { |
| A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); |
| A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); |
| return false; |
| } |
| |
| // We have to utilize actual alias analysis queries so we need the object. |
| if (!AAR) |
| AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope()); |
| |
| // Try to rule it out at the call site. |
| bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp); |
| LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between " |
| "callsite arguments: " |
| << getAssociatedValue() << " " << *ArgOp << " => " |
| << (IsAliasing ? "" : "no-") << "alias \n"); |
| |
| return IsAliasing; |
| } |
| |
| bool |
| isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR, |
| const AAMemoryBehavior &MemBehaviorAA, |
| const AANoAlias &NoAliasAA) { |
| // We can deduce "noalias" if the following conditions hold. |
| // (i) Associated value is assumed to be noalias in the definition. |
| // (ii) Associated value is assumed to be no-capture in all the uses |
| // possibly executed before this callsite. |
| // (iii) There is no other pointer argument which could alias with the |
| // value. |
| |
| bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias(); |
| if (!AssociatedValueIsNoAliasAtDef) { |
| LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue() |
| << " is not no-alias at the definition\n"); |
| return false; |
| } |
| |
| A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL); |
| |
| const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); |
| const Function *ScopeFn = VIRP.getAnchorScope(); |
| auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE); |
| // Check whether the value is captured in the scope using AANoCapture. |
| // Look at CFG and check only uses possibly executed before this |
| // callsite. |
| auto UsePred = [&](const Use &U, bool &Follow) -> bool { |
| Instruction *UserI = cast<Instruction>(U.getUser()); |
| |
| // If UserI is the curr instruction and there is a single potential use of |
| // the value in UserI we allow the use. |
| // TODO: We should inspect the operands and allow those that cannot alias |
| // with the value. |
| if (UserI == getCtxI() && UserI->getNumOperands() == 1) |
| return true; |
| |
| if (ScopeFn) { |
| const auto &ReachabilityAA = A.getAAFor<AAReachability>( |
| *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL); |
| |
| if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI())) |
| return true; |
| |
| if (auto *CB = dyn_cast<CallBase>(UserI)) { |
| if (CB->isArgOperand(&U)) { |
| |
| unsigned ArgNo = CB->getArgOperandNo(&U); |
| |
| const auto &NoCaptureAA = A.getAAFor<AANoCapture>( |
| *this, IRPosition::callsite_argument(*CB, ArgNo), |
| DepClassTy::OPTIONAL); |
| |
| if (NoCaptureAA.isAssumedNoCapture()) |
| return true; |
| } |
| } |
| } |
| |
| // For cases which can potentially have more users |
| if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) || |
| isa<SelectInst>(U)) { |
| Follow = true; |
| return true; |
| } |
| |
| LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n"); |
| return false; |
| }; |
| |
| if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) { |
| if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) { |
| LLVM_DEBUG( |
| dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() |
| << " cannot be noalias as it is potentially captured\n"); |
| return false; |
| } |
| } |
| A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL); |
| |
| // Check there is no other pointer argument which could alias with the |
| // value passed at this call site. |
| // TODO: AbstractCallSite |
| const auto &CB = cast<CallBase>(getAnchorValue()); |
| for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands(); |
| OtherArgNo++) |
| if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo)) |
| return false; |
| |
| return true; |
| } |
| |
| /// See AbstractAttribute::updateImpl(...). |
| ChangeStatus updateImpl(Attributor &A) override { |
| // If the argument is readnone we are done as there are no accesses via the |
| // argument. |
| auto &MemBehaviorAA = |
| A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); |
| if (MemBehaviorAA.isAssumedReadNone()) { |
| A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); |
| return ChangeStatus::UNCHANGED; |
| } |
| |
| const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); |
| const auto &NoAliasAA = |
| A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE); |
| |
| AAResults *AAR = nullptr; |
| if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA, |
| NoAliasAA)) { |
| LLVM_DEBUG( |
| dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"); |
| return ChangeStatus::UNCHANGED; |
| } |
| |
| return indicatePessimisticFixpoint(); |
| } |
| |
| /// See AbstractAttribute::trackStatistics() |
| void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) } |
| }; |
| |
| /// NoAlias attribute for function return value. |
| struct AANoAliasReturned final : AANoAliasImpl { |
| AANoAliasReturned(const IRPosition &IRP, Attributor &A) |
| : AANoAliasImpl(IRP, A) {} |
| |
| /// See AbstractAttribute::initialize(...). |
| void initialize(Attributor &A) override { |
| AANoAliasImpl::initialize(A); |
| Function *F = getAssociatedFunction(); |
|
|