blob: ec08287393de7c4d8fccfd241a257a3afb0d396a [file] [log] [blame]
//===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// See the Attributor.h file comment and the class descriptions in that file for
// more information.
//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/IPO/Attributor.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/SCCIterator.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumeBundleQueries.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LazyValueInfo.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Assumptions.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/NoFolder.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/IPO/ArgumentPromotion.h"
#include "llvm/Transforms/Utils/Local.h"
#include <cassert>
using namespace llvm;
#define DEBUG_TYPE "attributor"
static cl::opt<bool> ManifestInternal(
"attributor-manifest-internal", cl::Hidden,
cl::desc("Manifest Attributor internal string attributes."),
cl::init(false));
static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
cl::Hidden);
template <>
unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
static cl::opt<unsigned, true> MaxPotentialValues(
"attributor-max-potential-values", cl::Hidden,
cl::desc("Maximum number of potential values to be "
"tracked for each position."),
cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
cl::init(7));
STATISTIC(NumAAs, "Number of abstract attributes created");
// Some helper macros to deal with statistics tracking.
//
// Usage:
// For simple IR attribute tracking overload trackStatistics in the abstract
// attribute and choose the right STATS_DECLTRACK_********* macro,
// e.g.,:
// void trackStatistics() const override {
// STATS_DECLTRACK_ARG_ATTR(returned)
// }
// If there is a single "increment" side one can use the macro
// STATS_DECLTRACK with a custom message. If there are multiple increment
// sides, STATS_DECL and STATS_TRACK can also be used separately.
//
#define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \
("Number of " #TYPE " marked '" #NAME "'")
#define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
#define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
#define STATS_DECL(NAME, TYPE, MSG) \
STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
#define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
#define STATS_DECLTRACK(NAME, TYPE, MSG) \
{ \
STATS_DECL(NAME, TYPE, MSG) \
STATS_TRACK(NAME, TYPE) \
}
#define STATS_DECLTRACK_ARG_ATTR(NAME) \
STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
#define STATS_DECLTRACK_CSARG_ATTR(NAME) \
STATS_DECLTRACK(NAME, CSArguments, \
BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
#define STATS_DECLTRACK_FN_ATTR(NAME) \
STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
#define STATS_DECLTRACK_CS_ATTR(NAME) \
STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
#define STATS_DECLTRACK_FNRET_ATTR(NAME) \
STATS_DECLTRACK(NAME, FunctionReturn, \
BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
#define STATS_DECLTRACK_CSRET_ATTR(NAME) \
STATS_DECLTRACK(NAME, CSReturn, \
BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
#define STATS_DECLTRACK_FLOATING_ATTR(NAME) \
STATS_DECLTRACK(NAME, Floating, \
("Number of floating values known to be '" #NAME "'"))
// Specialization of the operator<< for abstract attributes subclasses. This
// disambiguates situations where multiple operators are applicable.
namespace llvm {
#define PIPE_OPERATOR(CLASS) \
raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \
return OS << static_cast<const AbstractAttribute &>(AA); \
}
PIPE_OPERATOR(AAIsDead)
PIPE_OPERATOR(AANoUnwind)
PIPE_OPERATOR(AANoSync)
PIPE_OPERATOR(AANoRecurse)
PIPE_OPERATOR(AAWillReturn)
PIPE_OPERATOR(AANoReturn)
PIPE_OPERATOR(AAReturnedValues)
PIPE_OPERATOR(AANonNull)
PIPE_OPERATOR(AANoAlias)
PIPE_OPERATOR(AADereferenceable)
PIPE_OPERATOR(AAAlign)
PIPE_OPERATOR(AANoCapture)
PIPE_OPERATOR(AAValueSimplify)
PIPE_OPERATOR(AANoFree)
PIPE_OPERATOR(AAHeapToStack)
PIPE_OPERATOR(AAReachability)
PIPE_OPERATOR(AAMemoryBehavior)
PIPE_OPERATOR(AAMemoryLocation)
PIPE_OPERATOR(AAValueConstantRange)
PIPE_OPERATOR(AAPrivatizablePtr)
PIPE_OPERATOR(AAUndefinedBehavior)
PIPE_OPERATOR(AAPotentialValues)
PIPE_OPERATOR(AANoUndef)
PIPE_OPERATOR(AACallEdges)
PIPE_OPERATOR(AAFunctionReachability)
PIPE_OPERATOR(AAPointerInfo)
PIPE_OPERATOR(AAAssumptionInfo)
#undef PIPE_OPERATOR
template <>
ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
const DerefState &R) {
ChangeStatus CS0 =
clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
return CS0 | CS1;
}
} // namespace llvm
/// Get pointer operand of memory accessing instruction. If \p I is
/// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
/// is set to false and the instruction is volatile, return nullptr.
static const Value *getPointerOperand(const Instruction *I,
bool AllowVolatile) {
if (!AllowVolatile && I->isVolatile())
return nullptr;
if (auto *LI = dyn_cast<LoadInst>(I)) {
return LI->getPointerOperand();
}
if (auto *SI = dyn_cast<StoreInst>(I)) {
return SI->getPointerOperand();
}
if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
return CXI->getPointerOperand();
}
if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
return RMWI->getPointerOperand();
}
return nullptr;
}
/// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
/// advanced by \p Offset bytes. To aid later analysis the method tries to build
/// getelement pointer instructions that traverse the natural type of \p Ptr if
/// possible. If that fails, the remaining offset is adjusted byte-wise, hence
/// through a cast to i8*.
///
/// TODO: This could probably live somewhere more prominantly if it doesn't
/// already exist.
static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
int64_t Offset, IRBuilder<NoFolder> &IRB,
const DataLayout &DL) {
assert(Offset >= 0 && "Negative offset not supported yet!");
LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
<< "-bytes as " << *ResTy << "\n");
if (Offset) {
Type *Ty = PtrElemTy;
APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
SmallVector<Value *, 4> ValIndices;
std::string GEPName = Ptr->getName().str();
for (const APInt &Index : IntIndices) {
ValIndices.push_back(IRB.getInt(Index));
GEPName += "." + std::to_string(Index.getZExtValue());
}
// Create a GEP for the indices collected above.
Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
// If an offset is left we use byte-wise adjustment.
if (IntOffset != 0) {
Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
GEPName + ".b" + Twine(IntOffset.getZExtValue()));
}
}
// Ensure the result has the requested type.
Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
return Ptr;
}
/// Recursively visit all values that might become \p IRP at some point. This
/// will be done by looking through cast instructions, selects, phis, and calls
/// with the "returned" attribute. Once we cannot look through the value any
/// further, the callback \p VisitValueCB is invoked and passed the current
/// value, the \p State, and a flag to indicate if we stripped anything.
/// Stripped means that we unpacked the value associated with \p IRP at least
/// once. Note that the value used for the callback may still be the value
/// associated with \p IRP (due to PHIs). To limit how much effort is invested,
/// we will never visit more values than specified by \p MaxValues.
template <typename StateTy>
static bool genericValueTraversal(
Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
StateTy &State,
function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
VisitValueCB,
const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
function_ref<Value *(Value *)> StripCB = nullptr) {
const AAIsDead *LivenessAA = nullptr;
if (IRP.getAnchorScope())
LivenessAA = &A.getAAFor<AAIsDead>(
QueryingAA,
IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()),
DepClassTy::NONE);
bool AnyDead = false;
Value *InitialV = &IRP.getAssociatedValue();
using Item = std::pair<Value *, const Instruction *>;
SmallSet<Item, 16> Visited;
SmallVector<Item, 16> Worklist;
Worklist.push_back({InitialV, CtxI});
int Iteration = 0;
do {
Item I = Worklist.pop_back_val();
Value *V = I.first;
CtxI = I.second;
if (StripCB)
V = StripCB(V);
// Check if we should process the current value. To prevent endless
// recursion keep a record of the values we followed!
if (!Visited.insert(I).second)
continue;
// Make sure we limit the compile time for complex expressions.
if (Iteration++ >= MaxValues)
return false;
// Explicitly look through calls with a "returned" attribute if we do
// not have a pointer as stripPointerCasts only works on them.
Value *NewV = nullptr;
if (V->getType()->isPointerTy()) {
NewV = V->stripPointerCasts();
} else {
auto *CB = dyn_cast<CallBase>(V);
if (CB && CB->getCalledFunction()) {
for (Argument &Arg : CB->getCalledFunction()->args())
if (Arg.hasReturnedAttr()) {
NewV = CB->getArgOperand(Arg.getArgNo());
break;
}
}
}
if (NewV && NewV != V) {
Worklist.push_back({NewV, CtxI});
continue;
}
// Look through select instructions, visit assumed potential values.
if (auto *SI = dyn_cast<SelectInst>(V)) {
bool UsedAssumedInformation = false;
Optional<Constant *> C = A.getAssumedConstant(
*SI->getCondition(), QueryingAA, UsedAssumedInformation);
bool NoValueYet = !C.hasValue();
if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
continue;
if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
if (CI->isZero())
Worklist.push_back({SI->getFalseValue(), CtxI});
else
Worklist.push_back({SI->getTrueValue(), CtxI});
continue;
}
// We could not simplify the condition, assume both values.(
Worklist.push_back({SI->getTrueValue(), CtxI});
Worklist.push_back({SI->getFalseValue(), CtxI});
continue;
}
// Look through phi nodes, visit all live operands.
if (auto *PHI = dyn_cast<PHINode>(V)) {
assert(LivenessAA &&
"Expected liveness in the presence of instructions!");
for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
bool UsedAssumedInformation = false;
if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
LivenessAA, UsedAssumedInformation,
/* CheckBBLivenessOnly */ true)) {
AnyDead = true;
continue;
}
Worklist.push_back(
{PHI->getIncomingValue(u), IncomingBB->getTerminator()});
}
continue;
}
if (UseValueSimplify && !isa<Constant>(V)) {
bool UsedAssumedInformation = false;
Optional<Value *> SimpleV =
A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
if (!SimpleV.hasValue())
continue;
if (!SimpleV.getValue())
return false;
Value *NewV = SimpleV.getValue();
if (NewV != V) {
Worklist.push_back({NewV, CtxI});
continue;
}
}
// Once a leaf is reached we inform the user through the callback.
if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
return false;
} while (!Worklist.empty());
// If we actually used liveness information so we have to record a dependence.
if (AnyDead)
A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
// All values have been visited.
return true;
}
bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
SmallVectorImpl<Value *> &Objects,
const AbstractAttribute &QueryingAA,
const Instruction *CtxI) {
auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
SmallPtrSet<Value *, 8> SeenObjects;
auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
SmallVectorImpl<Value *> &Objects,
bool) -> bool {
if (SeenObjects.insert(&Val).second)
Objects.push_back(&Val);
return true;
};
if (!genericValueTraversal<decltype(Objects)>(
A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
true, 32, StripCB))
return false;
return true;
}
const Value *stripAndAccumulateMinimalOffsets(
Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
bool UseAssumed = false) {
auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
const IRPosition &Pos = IRPosition::value(V);
// Only track dependence if we are going to use the assumed info.
const AAValueConstantRange &ValueConstantRangeAA =
A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
UseAssumed ? DepClassTy::OPTIONAL
: DepClassTy::NONE);
ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
: ValueConstantRangeAA.getKnown();
// We can only use the lower part of the range because the upper part can
// be higher than what the value can really be.
ROffset = Range.getSignedMin();
return true;
};
return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
/* AllowInvariant */ false,
AttributorAnalysis);
}
static const Value *getMinimalBaseOfAccsesPointerOperand(
Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
if (!Ptr)
return nullptr;
APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
const Value *Base = stripAndAccumulateMinimalOffsets(
A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
BytesOffset = OffsetAPInt.getSExtValue();
return Base;
}
static const Value *
getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
const DataLayout &DL,
bool AllowNonInbounds = false) {
const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
if (!Ptr)
return nullptr;
return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
AllowNonInbounds);
}
/// Clamp the information known for all returned values of a function
/// (identified by \p QueryingAA) into \p S.
template <typename AAType, typename StateType = typename AAType::StateType>
static void clampReturnedValueStates(
Attributor &A, const AAType &QueryingAA, StateType &S,
const IRPosition::CallBaseContext *CBContext = nullptr) {
LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
<< QueryingAA << " into " << S << "\n");
assert((QueryingAA.getIRPosition().getPositionKind() ==
IRPosition::IRP_RETURNED ||
QueryingAA.getIRPosition().getPositionKind() ==
IRPosition::IRP_CALL_SITE_RETURNED) &&
"Can only clamp returned value states for a function returned or call "
"site returned position!");
// Use an optional state as there might not be any return values and we want
// to join (IntegerState::operator&) the state of all there are.
Optional<StateType> T;
// Callback for each possibly returned value.
auto CheckReturnValue = [&](Value &RV) -> bool {
const IRPosition &RVPos = IRPosition::value(RV, CBContext);
const AAType &AA =
A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
<< " @ " << RVPos << "\n");
const StateType &AAS = AA.getState();
if (T.hasValue())
*T &= AAS;
else
T = AAS;
LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
<< "\n");
return T->isValidState();
};
if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
S.indicatePessimisticFixpoint();
else if (T.hasValue())
S ^= *T;
}
namespace {
/// Helper class for generic deduction: return value -> returned position.
template <typename AAType, typename BaseType,
typename StateType = typename BaseType::StateType,
bool PropagateCallBaseContext = false>
struct AAReturnedFromReturnedValues : public BaseType {
AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
: BaseType(IRP, A) {}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
StateType S(StateType::getBestState(this->getState()));
clampReturnedValueStates<AAType, StateType>(
A, *this, S,
PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
// TODO: If we know we visited all returned values, thus no are assumed
// dead, we can take the known information from the state T.
return clampStateAndIndicateChange<StateType>(this->getState(), S);
}
};
/// Clamp the information known at all call sites for a given argument
/// (identified by \p QueryingAA) into \p S.
template <typename AAType, typename StateType = typename AAType::StateType>
static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
StateType &S) {
LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
<< QueryingAA << " into " << S << "\n");
assert(QueryingAA.getIRPosition().getPositionKind() ==
IRPosition::IRP_ARGUMENT &&
"Can only clamp call site argument states for an argument position!");
// Use an optional state as there might not be any return values and we want
// to join (IntegerState::operator&) the state of all there are.
Optional<StateType> T;
// The argument number which is also the call site argument number.
unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
auto CallSiteCheck = [&](AbstractCallSite ACS) {
const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
// Check if a coresponding argument was found or if it is on not associated
// (which can happen for callback calls).
if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
return false;
const AAType &AA =
A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
<< " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
const StateType &AAS = AA.getState();
if (T.hasValue())
*T &= AAS;
else
T = AAS;
LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
<< "\n");
return T->isValidState();
};
bool AllCallSitesKnown;
if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
AllCallSitesKnown))
S.indicatePessimisticFixpoint();
else if (T.hasValue())
S ^= *T;
}
/// This function is the bridge between argument position and the call base
/// context.
template <typename AAType, typename BaseType,
typename StateType = typename AAType::StateType>
bool getArgumentStateFromCallBaseContext(Attributor &A,
BaseType &QueryingAttribute,
IRPosition &Pos, StateType &State) {
assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
"Expected an 'argument' position !");
const CallBase *CBContext = Pos.getCallBaseContext();
if (!CBContext)
return false;
int ArgNo = Pos.getCallSiteArgNo();
assert(ArgNo >= 0 && "Invalid Arg No!");
const auto &AA = A.getAAFor<AAType>(
QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
DepClassTy::REQUIRED);
const StateType &CBArgumentState =
static_cast<const StateType &>(AA.getState());
LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
<< "Position:" << Pos << "CB Arg state:" << CBArgumentState
<< "\n");
// NOTE: If we want to do call site grouping it should happen here.
State ^= CBArgumentState;
return true;
}
/// Helper class for generic deduction: call site argument -> argument position.
template <typename AAType, typename BaseType,
typename StateType = typename AAType::StateType,
bool BridgeCallBaseContext = false>
struct AAArgumentFromCallSiteArguments : public BaseType {
AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
: BaseType(IRP, A) {}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
StateType S = StateType::getBestState(this->getState());
if (BridgeCallBaseContext) {
bool Success =
getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
A, *this, this->getIRPosition(), S);
if (Success)
return clampStateAndIndicateChange<StateType>(this->getState(), S);
}
clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
// TODO: If we know we visited all incoming values, thus no are assumed
// dead, we can take the known information from the state T.
return clampStateAndIndicateChange<StateType>(this->getState(), S);
}
};
/// Helper class for generic replication: function returned -> cs returned.
template <typename AAType, typename BaseType,
typename StateType = typename BaseType::StateType,
bool IntroduceCallBaseContext = false>
struct AACallSiteReturnedFromReturned : public BaseType {
AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
: BaseType(IRP, A) {}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
assert(this->getIRPosition().getPositionKind() ==
IRPosition::IRP_CALL_SITE_RETURNED &&
"Can only wrap function returned positions for call site returned "
"positions!");
auto &S = this->getState();
const Function *AssociatedFunction =
this->getIRPosition().getAssociatedFunction();
if (!AssociatedFunction)
return S.indicatePessimisticFixpoint();
CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
if (IntroduceCallBaseContext)
LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
<< CBContext << "\n");
IRPosition FnPos = IRPosition::returned(
*AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
return clampStateAndIndicateChange(S, AA.getState());
}
};
} // namespace
/// Helper function to accumulate uses.
template <class AAType, typename StateType = typename AAType::StateType>
static void followUsesInContext(AAType &AA, Attributor &A,
MustBeExecutedContextExplorer &Explorer,
const Instruction *CtxI,
SetVector<const Use *> &Uses,
StateType &State) {
auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
for (unsigned u = 0; u < Uses.size(); ++u) {
const Use *U = Uses[u];
if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
if (Found && AA.followUseInMBEC(A, U, UserI, State))
for (const Use &Us : UserI->uses())
Uses.insert(&Us);
}
}
}
/// Use the must-be-executed-context around \p I to add information into \p S.
/// The AAType class is required to have `followUseInMBEC` method with the
/// following signature and behaviour:
///
/// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
/// U - Underlying use.
/// I - The user of the \p U.
/// Returns true if the value should be tracked transitively.
///
template <class AAType, typename StateType = typename AAType::StateType>
static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
Instruction &CtxI) {
// Container for (transitive) uses of the associated value.
SetVector<const Use *> Uses;
for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
Uses.insert(&U);
MustBeExecutedContextExplorer &Explorer =
A.getInfoCache().getMustBeExecutedContextExplorer();
followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
if (S.isAtFixpoint())
return;
SmallVector<const BranchInst *, 4> BrInsts;
auto Pred = [&](const Instruction *I) {
if (const BranchInst *Br = dyn_cast<BranchInst>(I))
if (Br->isConditional())
BrInsts.push_back(Br);
return true;
};
// Here, accumulate conditional branch instructions in the context. We
// explore the child paths and collect the known states. The disjunction of
// those states can be merged to its own state. Let ParentState_i be a state
// to indicate the known information for an i-th branch instruction in the
// context. ChildStates are created for its successors respectively.
//
// ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
// ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
// ...
// ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
//
// Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
//
// FIXME: Currently, recursive branches are not handled. For example, we
// can't deduce that ptr must be dereferenced in below function.
//
// void f(int a, int c, int *ptr) {
// if(a)
// if (b) {
// *ptr = 0;
// } else {
// *ptr = 1;
// }
// else {
// if (b) {
// *ptr = 0;
// } else {
// *ptr = 1;
// }
// }
// }
Explorer.checkForAllContext(&CtxI, Pred);
for (const BranchInst *Br : BrInsts) {
StateType ParentState;
// The known state of the parent state is a conjunction of children's
// known states so it is initialized with a best state.
ParentState.indicateOptimisticFixpoint();
for (const BasicBlock *BB : Br->successors()) {
StateType ChildState;
size_t BeforeSize = Uses.size();
followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
// Erase uses which only appear in the child.
for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
It = Uses.erase(It);
ParentState &= ChildState;
}
// Use only known state.
S += ParentState;
}
}
/// ------------------------ PointerInfo ---------------------------------------
namespace llvm {
namespace AA {
namespace PointerInfo {
/// An access kind description as used by AAPointerInfo.
struct OffsetAndSize;
struct State;
} // namespace PointerInfo
} // namespace AA
/// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
template <>
struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
using Access = AAPointerInfo::Access;
static inline Access getEmptyKey();
static inline Access getTombstoneKey();
static unsigned getHashValue(const Access &A);
static bool isEqual(const Access &LHS, const Access &RHS);
};
/// Helper that allows OffsetAndSize as a key in a DenseMap.
template <>
struct DenseMapInfo<AA::PointerInfo ::OffsetAndSize>
: DenseMapInfo<std::pair<int64_t, int64_t>> {};
/// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
/// but the instruction
struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
using Base = DenseMapInfo<Instruction *>;
using Access = AAPointerInfo::Access;
static inline Access getEmptyKey();
static inline Access getTombstoneKey();
static unsigned getHashValue(const Access &A);
static bool isEqual(const Access &LHS, const Access &RHS);
};
} // namespace llvm
/// Helper to represent an access offset and size, with logic to deal with
/// uncertainty and check for overlapping accesses.
struct AA::PointerInfo::OffsetAndSize : public std::pair<int64_t, int64_t> {
using BaseTy = std::pair<int64_t, int64_t>;
OffsetAndSize(int64_t Offset, int64_t Size) : BaseTy(Offset, Size) {}
OffsetAndSize(const BaseTy &P) : BaseTy(P) {}
int64_t getOffset() const { return first; }
int64_t getSize() const { return second; }
static OffsetAndSize getUnknown() { return OffsetAndSize(Unknown, Unknown); }
/// Return true if this offset and size pair might describe an address that
/// overlaps with \p OAS.
bool mayOverlap(const OffsetAndSize &OAS) const {
// Any unknown value and we are giving up -> overlap.
if (OAS.getOffset() == OffsetAndSize::Unknown ||
OAS.getSize() == OffsetAndSize::Unknown ||
getOffset() == OffsetAndSize::Unknown ||
getSize() == OffsetAndSize::Unknown)
return true;
// Check if one offset point is in the other interval [offset, offset+size].
return OAS.getOffset() + OAS.getSize() > getOffset() &&
OAS.getOffset() < getOffset() + getSize();
}
/// Constant used to represent unknown offset or sizes.
static constexpr int64_t Unknown = 1 << 31;
};
/// Implementation of the DenseMapInfo.
///
///{
inline llvm::AccessAsInstructionInfo::Access
llvm::AccessAsInstructionInfo::getEmptyKey() {
return Access(Base::getEmptyKey(), nullptr, AAPointerInfo::AK_READ, nullptr);
}
inline llvm::AccessAsInstructionInfo::Access
llvm::AccessAsInstructionInfo::getTombstoneKey() {
return Access(Base::getTombstoneKey(), nullptr, AAPointerInfo::AK_READ,
nullptr);
}
unsigned llvm::AccessAsInstructionInfo::getHashValue(
const llvm::AccessAsInstructionInfo::Access &A) {
return Base::getHashValue(A.getRemoteInst());
}
bool llvm::AccessAsInstructionInfo::isEqual(
const llvm::AccessAsInstructionInfo::Access &LHS,
const llvm::AccessAsInstructionInfo::Access &RHS) {
return LHS.getRemoteInst() == RHS.getRemoteInst();
}
inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
llvm::DenseMapInfo<AAPointerInfo::Access>::getEmptyKey() {
return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_READ,
nullptr);
}
inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
llvm::DenseMapInfo<AAPointerInfo::Access>::getTombstoneKey() {
return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_WRITE,
nullptr);
}
unsigned llvm::DenseMapInfo<AAPointerInfo::Access>::getHashValue(
const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &A) {
return detail::combineHashValue(
DenseMapInfo<Instruction *>::getHashValue(A.getRemoteInst()),
(A.isWrittenValueYetUndetermined()
? ~0
: DenseMapInfo<Value *>::getHashValue(A.getWrittenValue()))) +
A.getKind();
}
bool llvm::DenseMapInfo<AAPointerInfo::Access>::isEqual(
const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &LHS,
const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &RHS) {
return LHS == RHS;
}
///}
/// A type to track pointer/struct usage and accesses for AAPointerInfo.
struct AA::PointerInfo::State : public AbstractState {
/// Return the best possible representable state.
static State getBestState(const State &SIS) { return State(); }
/// Return the worst possible representable state.
static State getWorstState(const State &SIS) {
State R;
R.indicatePessimisticFixpoint();
return R;
}
State() {}
State(const State &SIS) : AccessBins(SIS.AccessBins) {}
State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {}
const State &getAssumed() const { return *this; }
/// See AbstractState::isValidState().
bool isValidState() const override { return BS.isValidState(); }
/// See AbstractState::isAtFixpoint().
bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
/// See AbstractState::indicateOptimisticFixpoint().
ChangeStatus indicateOptimisticFixpoint() override {
BS.indicateOptimisticFixpoint();
return ChangeStatus::UNCHANGED;
}
/// See AbstractState::indicatePessimisticFixpoint().
ChangeStatus indicatePessimisticFixpoint() override {
BS.indicatePessimisticFixpoint();
return ChangeStatus::CHANGED;
}
State &operator=(const State &R) {
if (this == &R)
return *this;
BS = R.BS;
AccessBins = R.AccessBins;
return *this;
}
State &operator=(State &&R) {
if (this == &R)
return *this;
std::swap(BS, R.BS);
std::swap(AccessBins, R.AccessBins);
return *this;
}
bool operator==(const State &R) const {
if (BS != R.BS)
return false;
if (AccessBins.size() != R.AccessBins.size())
return false;
auto It = begin(), RIt = R.begin(), E = end();
while (It != E) {
if (It->getFirst() != RIt->getFirst())
return false;
auto &Accs = It->getSecond();
auto &RAccs = RIt->getSecond();
if (Accs.size() != RAccs.size())
return false;
auto AccIt = Accs.begin(), RAccIt = RAccs.begin(), AccE = Accs.end();
while (AccIt != AccE) {
if (*AccIt != *RAccIt)
return false;
++AccIt;
++RAccIt;
}
++It;
++RIt;
}
return true;
}
bool operator!=(const State &R) const { return !(*this == R); }
/// We store accesses in a set with the instruction as key.
using Accesses = DenseSet<AAPointerInfo::Access, AccessAsInstructionInfo>;
/// We store all accesses in bins denoted by their offset and size.
using AccessBinsTy = DenseMap<OffsetAndSize, Accesses>;
AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
protected:
/// The bins with all the accesses for the associated pointer.
DenseMap<OffsetAndSize, Accesses> AccessBins;
/// Add a new access to the state at offset \p Offset and with size \p Size.
/// The access is associated with \p I, writes \p Content (if anything), and
/// is of kind \p Kind.
/// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
ChangeStatus addAccess(int64_t Offset, int64_t Size, Instruction &I,
Optional<Value *> Content,
AAPointerInfo::AccessKind Kind, Type *Ty,
Instruction *RemoteI = nullptr,
Accesses *BinPtr = nullptr) {
OffsetAndSize Key{Offset, Size};
Accesses &Bin = BinPtr ? *BinPtr : AccessBins[Key];
AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
// Check if we have an access for this instruction in this bin, if not,
// simply add it.
auto It = Bin.find(Acc);
if (It == Bin.end()) {
Bin.insert(Acc);
return ChangeStatus::CHANGED;
}
// If the existing access is the same as then new one, nothing changed.
AAPointerInfo::Access Before = *It;
// The new one will be combined with the existing one.
*It &= Acc;
return *It == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
}
/// See AAPointerInfo::forallInterferingAccesses.
bool forallInterferingAccesses(
Instruction &I,
function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
if (!isValidState())
return false;
// First find the offset and size of I.
OffsetAndSize OAS(-1, -1);
for (auto &It : AccessBins) {
for (auto &Access : It.getSecond()) {
if (Access.getRemoteInst() == &I) {
OAS = It.getFirst();
break;
}
}
if (OAS.getSize() != -1)
break;
}
if (OAS.getSize() == -1)
return true;
// Now that we have an offset and size, find all overlapping ones and use
// the callback on the accesses.
for (auto &It : AccessBins) {
OffsetAndSize ItOAS = It.getFirst();
if (!OAS.mayOverlap(ItOAS))
continue;
for (auto &Access : It.getSecond())
if (!CB(Access, OAS == ItOAS))
return false;
}
return true;
}
private:
/// State to track fixpoint and validity.
BooleanState BS;
};
namespace {
struct AAPointerInfoImpl
: public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
/// See AbstractAttribute::getAsStr().
const std::string getAsStr() const override {
return std::string("PointerInfo ") +
(isValidState() ? (std::string("#") +
std::to_string(AccessBins.size()) + " bins")
: "<invalid>");
}
/// See AbstractAttribute::manifest(...).
ChangeStatus manifest(Attributor &A) override {
return AAPointerInfo::manifest(A);
}
bool forallInterferingAccesses(
LoadInst &LI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
const override {
return State::forallInterferingAccesses(LI, CB);
}
bool forallInterferingAccesses(
StoreInst &SI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
const override {
return State::forallInterferingAccesses(SI, CB);
}
ChangeStatus translateAndAddCalleeState(Attributor &A,
const AAPointerInfo &CalleeAA,
int64_t CallArgOffset, CallBase &CB) {
using namespace AA::PointerInfo;
if (!CalleeAA.getState().isValidState() || !isValidState())
return indicatePessimisticFixpoint();
const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA);
bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr();
// Combine the accesses bin by bin.
ChangeStatus Changed = ChangeStatus::UNCHANGED;
for (auto &It : CalleeImplAA.getState()) {
OffsetAndSize OAS = OffsetAndSize::getUnknown();
if (CallArgOffset != OffsetAndSize::Unknown)
OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset,
It.first.getSize());
Accesses &Bin = AccessBins[OAS];
for (const AAPointerInfo::Access &RAcc : It.second) {
if (IsByval && !RAcc.isRead())
continue;
bool UsedAssumedInformation = false;
Optional<Value *> Content = A.translateArgumentToCallSiteContent(
RAcc.getContent(), CB, *this, UsedAssumedInformation);
AccessKind AK =
AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ
: AccessKind::AK_READ_WRITE));
Changed =
Changed | addAccess(OAS.getOffset(), OAS.getSize(), CB, Content, AK,
RAcc.getType(), RAcc.getRemoteInst(), &Bin);
}
}
return Changed;
}
/// Statistic tracking for all AAPointerInfo implementations.
/// See AbstractAttribute::trackStatistics().
void trackPointerInfoStatistics(const IRPosition &IRP) const {}
};
struct AAPointerInfoFloating : public AAPointerInfoImpl {
using AccessKind = AAPointerInfo::AccessKind;
AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
: AAPointerInfoImpl(IRP, A) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
/// Deal with an access and signal if it was handled successfully.
bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
Optional<Value *> Content, AccessKind Kind, int64_t Offset,
ChangeStatus &Changed, Type *Ty,
int64_t Size = AA::PointerInfo::OffsetAndSize::Unknown) {
using namespace AA::PointerInfo;
// No need to find a size if one is given or the offset is unknown.
if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
Ty) {
const DataLayout &DL = A.getDataLayout();
TypeSize AccessSize = DL.getTypeStoreSize(Ty);
if (!AccessSize.isScalable())
Size = AccessSize.getFixedSize();
}
Changed = Changed | addAccess(Offset, Size, I, Content, Kind, Ty);
return true;
};
/// Helper struct, will support ranges eventually.
struct OffsetInfo {
int64_t Offset = AA::PointerInfo::OffsetAndSize::Unknown;
bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
};
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
using namespace AA::PointerInfo;
State S = getState();
ChangeStatus Changed = ChangeStatus::UNCHANGED;
Value &AssociatedValue = getAssociatedValue();
const DataLayout &DL = A.getDataLayout();
DenseMap<Value *, OffsetInfo> OffsetInfoMap;
OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo &PtrOI,
bool &Follow) {
OffsetInfo &UsrOI = OffsetInfoMap[Usr];
UsrOI = PtrOI;
Follow = true;
return true;
};
auto UsePred = [&](const Use &U, bool &Follow) -> bool {
Value *CurPtr = U.get();
User *Usr = U.getUser();
LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
<< *Usr << "\n");
OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
if (CE->isCast())
return HandlePassthroughUser(Usr, PtrOI, Follow);
if (CE->isCompare())
return true;
if (!CE->isGEPWithNoNotionalOverIndexing()) {
LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
<< "\n");
return false;
}
}
if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
OffsetInfo &UsrOI = OffsetInfoMap[Usr];
UsrOI = PtrOI;
// TODO: Use range information.
if (PtrOI.Offset == OffsetAndSize::Unknown ||
!GEP->hasAllConstantIndices()) {
UsrOI.Offset = OffsetAndSize::Unknown;
Follow = true;
return true;
}
SmallVector<Value *, 8> Indices;
for (Use &Idx : GEP->indices()) {
if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
Indices.push_back(CIdx);
continue;
}
LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
<< " : " << *Idx << "\n");
return false;
}
UsrOI.Offset = PtrOI.Offset +
DL.getIndexedOffsetInType(
CurPtr->getType()->getPointerElementType(), Indices);
Follow = true;
return true;
}
if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
return HandlePassthroughUser(Usr, PtrOI, Follow);
// For PHIs we need to take care of the recurrence explicitly as the value
// might change while we iterate through a loop. For now, we give up if
// the PHI is not invariant.
if (isa<PHINode>(Usr)) {
// Check if the PHI is invariant (so far).
OffsetInfo &UsrOI = OffsetInfoMap[Usr];
if (UsrOI == PtrOI)
return true;
// Check if the PHI operand has already an unknown offset as we can't
// improve on that anymore.
if (PtrOI.Offset == OffsetAndSize::Unknown) {
UsrOI = PtrOI;
Follow = true;
return true;
}
// Check if the PHI operand is not dependent on the PHI itself.
// TODO: This is not great as we look at the pointer type. However, it
// is unclear where the Offset size comes from with typeless pointers.
APInt Offset(
DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
0);
if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
DL, Offset, /* AllowNonInbounds */ true)) {
if (Offset != PtrOI.Offset) {
LLVM_DEBUG(dbgs()
<< "[AAPointerInfo] PHI operand pointer offset mismatch "
<< *CurPtr << " in " << *Usr << "\n");
return false;
}
return HandlePassthroughUser(Usr, PtrOI, Follow);
}
// TODO: Approximate in case we know the direction of the recurrence.
LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
<< *CurPtr << " in " << *Usr << "\n");
UsrOI = PtrOI;
UsrOI.Offset = OffsetAndSize::Unknown;
Follow = true;
return true;
}
if (auto *LoadI = dyn_cast<LoadInst>(Usr))
return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
AccessKind::AK_READ, PtrOI.Offset, Changed,
LoadI->getType());
if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
if (StoreI->getValueOperand() == CurPtr) {
LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
<< *StoreI << "\n");
return false;
}
bool UsedAssumedInformation = false;
Optional<Value *> Content = A.getAssumedSimplified(
*StoreI->getValueOperand(), *this, UsedAssumedInformation);
return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
PtrOI.Offset, Changed,
StoreI->getValueOperand()->getType());
}
if (auto *CB = dyn_cast<CallBase>(Usr)) {
if (CB->isLifetimeStartOrEnd())
return true;
if (CB->isArgOperand(&U)) {
unsigned ArgNo = CB->getArgOperandNo(&U);
const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
*this, IRPosition::callsite_argument(*CB, ArgNo),
DepClassTy::REQUIRED);
Changed = translateAndAddCalleeState(A, CSArgPI, PtrOI.Offset, *CB) |
Changed;
return true;
}
LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
<< "\n");
// TODO: Allow some call uses
return false;
}
LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
return false;
};
if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
/* CheckBBLivenessOnly */ true))
return indicatePessimisticFixpoint();
LLVM_DEBUG({
dbgs() << "Accesses by bin after update:\n";
for (auto &It : AccessBins) {
dbgs() << "[" << It.first.getOffset() << "-"
<< It.first.getOffset() + It.first.getSize()
<< "] : " << It.getSecond().size() << "\n";
for (auto &Acc : It.getSecond()) {
dbgs() << " - " << Acc.getKind() << " - " << *Acc.getLocalInst()
<< "\n";
if (Acc.getLocalInst() != Acc.getRemoteInst())
dbgs() << " --> "
<< *Acc.getRemoteInst() << "\n";
if (!Acc.isWrittenValueYetUndetermined())
dbgs() << " - " << Acc.getWrittenValue() << "\n";
}
}
});
return Changed;
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {
AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
}
};
struct AAPointerInfoReturned final : AAPointerInfoImpl {
AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
: AAPointerInfoImpl(IRP, A) {}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
return indicatePessimisticFixpoint();
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {
AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
}
};
struct AAPointerInfoArgument final : AAPointerInfoFloating {
AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
: AAPointerInfoFloating(IRP, A) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
AAPointerInfoFloating::initialize(A);
if (getAnchorScope()->isDeclaration())
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {
AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
}
};
struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
: AAPointerInfoFloating(IRP, A) {}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
using namespace AA::PointerInfo;
// We handle memory intrinsics explicitly, at least the first (=
// destination) and second (=source) arguments as we know how they are
// accessed.
if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
int64_t LengthVal = OffsetAndSize::Unknown;
if (Length)
LengthVal = Length->getSExtValue();
Value &Ptr = getAssociatedValue();
unsigned ArgNo = getIRPosition().getCallSiteArgNo();
ChangeStatus Changed;
if (ArgNo == 0) {
handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
nullptr, LengthVal);
} else if (ArgNo == 1) {
handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
nullptr, LengthVal);
} else {
LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
<< *MI << "\n");
return indicatePessimisticFixpoint();
}
return Changed;
}
// TODO: Once we have call site specific value information we can provide
// call site specific liveness information and then it makes
// sense to specialize attributes for call sites arguments instead of
// redirecting requests to the callee argument.
Argument *Arg = getAssociatedArgument();
if (!Arg)
return indicatePessimisticFixpoint();
const IRPosition &ArgPos = IRPosition::argument(*Arg);
auto &ArgAA =
A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI()));
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {
AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
}
};
struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
: AAPointerInfoFloating(IRP, A) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {
AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
}
};
/// -----------------------NoUnwind Function Attribute--------------------------
struct AANoUnwindImpl : AANoUnwind {
AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
const std::string getAsStr() const override {
return getAssumed() ? "nounwind" : "may-unwind";
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
auto Opcodes = {
(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
(unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet,
(unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
auto CheckForNoUnwind = [&](Instruction &I) {
if (!I.mayThrow())
return true;
if (const auto *CB = dyn_cast<CallBase>(&I)) {
const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
*this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
return NoUnwindAA.isAssumedNoUnwind();
}
return false;
};
bool UsedAssumedInformation = false;
if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
UsedAssumedInformation))
return indicatePessimisticFixpoint();
return ChangeStatus::UNCHANGED;
}
};
struct AANoUnwindFunction final : public AANoUnwindImpl {
AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
: AANoUnwindImpl(IRP, A) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
};
/// NoUnwind attribute deduction for a call sites.
struct AANoUnwindCallSite final : AANoUnwindImpl {
AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
: AANoUnwindImpl(IRP, A) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
AANoUnwindImpl::initialize(A);
Function *F = getAssociatedFunction();
if (!F || F->isDeclaration())
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
// TODO: Once we have call site specific value information we can provide
// call site specific liveness information and then it makes
// sense to specialize attributes for call sites arguments instead of
// redirecting requests to the callee argument.
Function *F = getAssociatedFunction();
const IRPosition &FnPos = IRPosition::function(*F);
auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
return clampStateAndIndicateChange(getState(), FnAA.getState());
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
};
/// --------------------- Function Return Values -------------------------------
/// "Attribute" that collects all potential returned values and the return
/// instructions that they arise from.
///
/// If there is a unique returned value R, the manifest method will:
/// - mark R with the "returned" attribute, if R is an argument.
class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
/// Mapping of values potentially returned by the associated function to the
/// return instructions that might return them.
MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
/// State flags
///
///{
bool IsFixed = false;
bool IsValidState = true;
///}
public:
AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
: AAReturnedValues(IRP, A) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
// Reset the state.
IsFixed = false;
IsValidState = true;
ReturnedValues.clear();
Function *F = getAssociatedFunction();
if (!F || F->isDeclaration()) {
indicatePessimisticFixpoint();
return;
}
assert(!F->getReturnType()->isVoidTy() &&
"Did not expect a void return type!");
// The map from instruction opcodes to those instructions in the function.
auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
// Look through all arguments, if one is marked as returned we are done.
for (Argument &Arg : F->args()) {
if (Arg.hasReturnedAttr()) {
auto &ReturnInstSet = ReturnedValues[&Arg];
if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
for (Instruction *RI : *Insts)
ReturnInstSet.insert(cast<ReturnInst>(RI));
indicateOptimisticFixpoint();
return;
}
}
if (!A.isFunctionIPOAmendable(*F))
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::manifest(...).
ChangeStatus manifest(Attributor &A) override;
/// See AbstractAttribute::getState(...).
AbstractState &getState() override { return *this; }
/// See AbstractAttribute::getState(...).
const AbstractState &getState() const override { return *this; }
/// See AbstractAttribute::updateImpl(Attributor &A).
ChangeStatus updateImpl(Attributor &A) override;
llvm::iterator_range<iterator> returned_values() override {
return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
}
llvm::iterator_range<const_iterator> returned_values() const override {
return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
}
/// Return the number of potential return values, -1 if unknown.
size_t getNumReturnValues() const override {
return isValidState() ? ReturnedValues.size() : -1;
}
/// Return an assumed unique return value if a single candidate is found. If
/// there cannot be one, return a nullptr. If it is not clear yet, return the
/// Optional::NoneType.
Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
/// See AbstractState::checkForAllReturnedValues(...).
bool checkForAllReturnedValuesAndReturnInsts(
function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
const override;
/// Pretty print the attribute similar to the IR representation.
const std::string getAsStr() const override;
/// See AbstractState::isAtFixpoint().
bool isAtFixpoint() const override { return IsFixed; }
/// See AbstractState::isValidState().
bool isValidState() const override { return IsValidState; }
/// See AbstractState::indicateOptimisticFixpoint(...).
ChangeStatus indicateOptimisticFixpoint() override {
IsFixed = true;
return ChangeStatus::UNCHANGED;
}
ChangeStatus indicatePessimisticFixpoint() override {
IsFixed = true;
IsValidState = false;
return ChangeStatus::CHANGED;
}
};
ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
ChangeStatus Changed = ChangeStatus::UNCHANGED;
// Bookkeeping.
assert(isValidState());
STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
"Number of function with known return values");
// Check if we have an assumed unique return value that we could manifest.
Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
if (!UniqueRV.hasValue() || !UniqueRV.getValue())
return Changed;
// Bookkeeping.
STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
"Number of function with unique return");
// If the assumed unique return value is an argument, annotate it.
if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
getAssociatedFunction()->getReturnType())) {
getIRPosition() = IRPosition::argument(*UniqueRVArg);
Changed = IRAttribute::manifest(A);
}
}
return Changed;
}
const std::string AAReturnedValuesImpl::getAsStr() const {
return (isAtFixpoint() ? "returns(#" : "may-return(#") +
(isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
}
Optional<Value *>
AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
// If checkForAllReturnedValues provides a unique value, ignoring potential
// undef values that can also be present, it is assumed to be the actual
// return value and forwarded to the caller of this method. If there are
// multiple, a nullptr is returned indicating there cannot be a unique
// returned value.
Optional<Value *> UniqueRV;
Type *Ty = getAssociatedFunction()->getReturnType();
auto Pred = [&](Value &RV) -> bool {
UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
return UniqueRV != Optional<Value *>(nullptr);
};
if (!A.checkForAllReturnedValues(Pred, *this))
UniqueRV = nullptr;
return UniqueRV;
}
bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
const {
if (!isValidState())
return false;
// Check all returned values but ignore call sites as long as we have not
// encountered an overdefined one during an update.
for (auto &It : ReturnedValues) {
Value *RV = It.first;
if (!Pred(*RV, It.second))
return false;
}
return true;
}
ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
ChangeStatus Changed = ChangeStatus::UNCHANGED;
auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
bool) -> bool {
bool UsedAssumedInformation = false;
Optional<Value *> SimpleRetVal =
A.getAssumedSimplified(V, *this, UsedAssumedInformation);
if (!SimpleRetVal.hasValue())
return true;
if (!SimpleRetVal.getValue())
return false;
Value *RetVal = *SimpleRetVal;
assert(AA::isValidInScope(*RetVal, Ret.getFunction()) &&
"Assumed returned value should be valid in function scope!");
if (ReturnedValues[RetVal].insert(&Ret))
Changed = ChangeStatus::CHANGED;
return true;
};
auto ReturnInstCB = [&](Instruction &I) {
ReturnInst &Ret = cast<ReturnInst>(I);
return genericValueTraversal<ReturnInst>(
A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
&I);
};
// Discover returned values from all live returned instructions in the
// associated function.
bool UsedAssumedInformation = false;
if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
UsedAssumedInformation))
return indicatePessimisticFixpoint();
return Changed;
}
struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
: AAReturnedValuesImpl(IRP, A) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
};
/// Returned values information for a call sites.
struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
: AAReturnedValuesImpl(IRP, A) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
// TODO: Once we have call site specific value information we can provide
// call site specific liveness information and then it makes
// sense to specialize attributes for call sites instead of
// redirecting requests to the callee.
llvm_unreachable("Abstract attributes for returned values are not "
"supported for call sites yet!");
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
return indicatePessimisticFixpoint();
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {}
};
/// ------------------------ NoSync Function Attribute -------------------------
struct AANoSyncImpl : AANoSync {
AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
const std::string getAsStr() const override {
return getAssumed() ? "nosync" : "may-sync";
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override;
/// Helper function used to determine whether an instruction is non-relaxed
/// atomic. In other words, if an atomic instruction does not have unordered
/// or monotonic ordering
static bool isNonRelaxedAtomic(Instruction *I);
/// Helper function specific for intrinsics which are potentially volatile
static bool isNoSyncIntrinsic(Instruction *I);
};
bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
if (!I->isAtomic())
return false;
if (auto *FI = dyn_cast<FenceInst>(I))
// All legal orderings for fence are stronger than monotonic.
return FI->getSyncScopeID() != SyncScope::SingleThread;
else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
// Unordered is not a legal ordering for cmpxchg.
return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
AI->getFailureOrdering() != AtomicOrdering::Monotonic);
}
AtomicOrdering Ordering;
switch (I->getOpcode()) {
case Instruction::AtomicRMW:
Ordering = cast<AtomicRMWInst>(I)->getOrdering();
break;
case Instruction::Store:
Ordering = cast<StoreInst>(I)->getOrdering();
break;
case Instruction::Load:
Ordering = cast<LoadInst>(I)->getOrdering();
break;
default:
llvm_unreachable(
"New atomic operations need to be known in the attributor.");
}
return (Ordering != AtomicOrdering::Unordered &&
Ordering != AtomicOrdering::Monotonic);
}
/// Return true if this intrinsic is nosync. This is only used for intrinsics
/// which would be nosync except that they have a volatile flag. All other
/// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
if (auto *MI = dyn_cast<MemIntrinsic>(I))
return !MI->isVolatile();
return false;
}
ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
auto CheckRWInstForNoSync = [&](Instruction &I) {
/// We are looking for volatile instructions or Non-Relaxed atomics.
if (const auto *CB = dyn_cast<CallBase>(&I)) {
if (CB->hasFnAttr(Attribute::NoSync))
return true;
if (isNoSyncIntrinsic(&I))
return true;
const auto &NoSyncAA = A.getAAFor<AANoSync>(
*this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
return NoSyncAA.isAssumedNoSync();
}
if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
return true;
return false;
};
auto CheckForNoSync = [&](Instruction &I) {
// At this point we handled all read/write effects and they are all
// nosync, so they can be skipped.
if (I.mayReadOrWriteMemory())
return true;
// non-convergent and readnone imply nosync.
return !cast<CallBase>(I).isConvergent();
};
bool UsedAssumedInformation = false;
if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
UsedAssumedInformation) ||
!A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
UsedAssumedInformation))
return indicatePessimisticFixpoint();
return ChangeStatus::UNCHANGED;
}
struct AANoSyncFunction final : public AANoSyncImpl {
AANoSyncFunction(const IRPosition &IRP, Attributor &A)
: AANoSyncImpl(IRP, A) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
};
/// NoSync attribute deduction for a call sites.
struct AANoSyncCallSite final : AANoSyncImpl {
AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
: AANoSyncImpl(IRP, A) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
AANoSyncImpl::initialize(A);
Function *F = getAssociatedFunction();
if (!F || F->isDeclaration())
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
// TODO: Once we have call site specific value information we can provide
// call site specific liveness information and then it makes
// sense to specialize attributes for call sites arguments instead of
// redirecting requests to the callee argument.
Function *F = getAssociatedFunction();
const IRPosition &FnPos = IRPosition::function(*F);
auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
return clampStateAndIndicateChange(getState(), FnAA.getState());
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
};
/// ------------------------ No-Free Attributes ----------------------------
struct AANoFreeImpl : public AANoFree {
AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
auto CheckForNoFree = [&](Instruction &I) {
const auto &CB = cast<CallBase>(I);
if (CB.hasFnAttr(Attribute::NoFree))
return true;
const auto &NoFreeAA = A.getAAFor<AANoFree>(
*this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
return NoFreeAA.isAssumedNoFree();
};
bool UsedAssumedInformation = false;
if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
UsedAssumedInformation))
return indicatePessimisticFixpoint();
return ChangeStatus::UNCHANGED;
}
/// See AbstractAttribute::getAsStr().
const std::string getAsStr() const override {
return getAssumed() ? "nofree" : "may-free";
}
};
struct AANoFreeFunction final : public AANoFreeImpl {
AANoFreeFunction(const IRPosition &IRP, Attributor &A)
: AANoFreeImpl(IRP, A) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
};
/// NoFree attribute deduction for a call sites.
struct AANoFreeCallSite final : AANoFreeImpl {
AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
: AANoFreeImpl(IRP, A) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
AANoFreeImpl::initialize(A);
Function *F = getAssociatedFunction();
if (!F || F->isDeclaration())
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
// TODO: Once we have call site specific value information we can provide
// call site specific liveness information and then it makes
// sense to specialize attributes for call sites arguments instead of
// redirecting requests to the callee argument.
Function *F = getAssociatedFunction();
const IRPosition &FnPos = IRPosition::function(*F);
auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
return clampStateAndIndicateChange(getState(), FnAA.getState());
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
};
/// NoFree attribute for floating values.
struct AANoFreeFloating : AANoFreeImpl {
AANoFreeFloating(const IRPosition &IRP, Attributor &A)
: AANoFreeImpl(IRP, A) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
/// See Abstract Attribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
const IRPosition &IRP = getIRPosition();
const auto &NoFreeAA = A.getAAFor<AANoFree>(
*this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
if (NoFreeAA.isAssumedNoFree())
return ChangeStatus::UNCHANGED;
Value &AssociatedValue = getIRPosition().getAssociatedValue();
auto Pred = [&](const Use &U, bool &Follow) -> bool {
Instruction *UserI = cast<Instruction>(U.getUser());
if (auto *CB = dyn_cast<CallBase>(UserI)) {
if (CB->isBundleOperand(&U))
return false;
if (!CB->isArgOperand(&U))
return true;
unsigned ArgNo = CB->getArgOperandNo(&U);
const auto &NoFreeArg = A.getAAFor<AANoFree>(
*this, IRPosition::callsite_argument(*CB, ArgNo),
DepClassTy::REQUIRED);
return NoFreeArg.isAssumedNoFree();
}
if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
Follow = true;
return true;
}
if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
isa<ReturnInst>(UserI))
return true;
// Unknown user.
return false;
};
if (!A.checkForAllUses(Pred, *this, AssociatedValue))
return indicatePessimisticFixpoint();
return ChangeStatus::UNCHANGED;
}
};
/// NoFree attribute for a call site argument.
struct AANoFreeArgument final : AANoFreeFloating {
AANoFreeArgument(const IRPosition &IRP, Attributor &A)
: AANoFreeFloating(IRP, A) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
};
/// NoFree attribute for call site arguments.
struct AANoFreeCallSiteArgument final : AANoFreeFloating {
AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
: AANoFreeFloating(IRP, A) {}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
// TODO: Once we have call site specific value information we can provide
// call site specific liveness information and then it makes
// sense to specialize attributes for call sites arguments instead of
// redirecting requests to the callee argument.
Argument *Arg = getAssociatedArgument();
if (!Arg)
return indicatePessimisticFixpoint();
const IRPosition &ArgPos = IRPosition::argument(*Arg);
auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
return clampStateAndIndicateChange(getState(), ArgAA.getState());
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
};
/// NoFree attribute for function return value.
struct AANoFreeReturned final : AANoFreeFloating {
AANoFreeReturned(const IRPosition &IRP, Attributor &A)
: AANoFreeFloating(IRP, A) {
llvm_unreachable("NoFree is not applicable to function returns!");
}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
llvm_unreachable("NoFree is not applicable to function returns!");
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
llvm_unreachable("NoFree is not applicable to function returns!");
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {}
};
/// NoFree attribute deduction for a call site return value.
struct AANoFreeCallSiteReturned final : AANoFreeFloating {
AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
: AANoFreeFloating(IRP, A) {}
ChangeStatus manifest(Attributor &A) override {
return ChangeStatus::UNCHANGED;
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
};
/// ------------------------ NonNull Argument Attribute ------------------------
static int64_t getKnownNonNullAndDerefBytesForUse(
Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
TrackUse = false;
const Value *UseV = U->get();
if (!UseV->getType()->isPointerTy())
return 0;
// We need to follow common pointer manipulation uses to the accesses they
// feed into. We can try to be smart to avoid looking through things we do not
// like for now, e.g., non-inbounds GEPs.
if (isa<CastInst>(I)) {
TrackUse = true;
return 0;
}
if (isa<GetElementPtrInst>(I)) {
TrackUse = true;
return 0;
}
Type *PtrTy = UseV->getType();
const Function *F = I->getFunction();
bool NullPointerIsDefined =
F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
const DataLayout &DL = A.getInfoCache().getDL();
if (const auto *CB = dyn_cast<CallBase>(I)) {
if (CB->isBundleOperand(U)) {
if (RetainedKnowledge RK = getKnowledgeFromUse(
U, {Attribute::NonNull, Attribute::Dereferenceable})) {
IsNonNull |=
(RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
return RK.ArgValue;
}
return 0;
}
if (CB->isCallee(U)) {
IsNonNull |= !NullPointerIsDefined;
return 0;
}
unsigned ArgNo = CB->getArgOperandNo(U);
IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
// As long as we only use known information there is no need to track
// dependences here.
auto &DerefAA =
A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
IsNonNull |= DerefAA.isKnownNonNull();
return DerefAA.getKnownDereferenceableBytes();
}
int64_t Offset;
const Value *Base =
getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
if (Base) {
if (Base == &AssociatedValue &&
getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
int64_t DerefBytes =
(int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
IsNonNull |= !NullPointerIsDefined;
return std::max(int64_t(0), DerefBytes);
}
}
/// Corner case when an offset is 0.
Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
/*AllowNonInbounds*/ true);
if (Base) {
if (Offset == 0 && Base == &AssociatedValue &&
getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
int64_t DerefBytes =
(int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
IsNonNull |= !NullPointerIsDefined;
return std::max(int64_t(0), DerefBytes);
}
}
return 0;
}
struct AANonNullImpl : AANonNull {
AANonNullImpl(const IRPosition &IRP, Attributor &A)
: AANonNull(IRP, A),
NullIsDefined(NullPointerIsDefined(
getAnchorScope(),
getAssociatedValue().getType()->getPointerAddressSpace())) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
Value &V = getAssociatedValue();
if (!NullIsDefined &&
hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
/* IgnoreSubsumingPositions */ false, &A)) {
indicateOptimisticFixpoint();
return;
}
if (isa<ConstantPointerNull>(V)) {
indicatePessimisticFixpoint();
return;
}
AANonNull::initialize(A);
bool CanBeNull, CanBeFreed;
if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
CanBeFreed)) {
if (!CanBeNull) {
indicateOptimisticFixpoint();
return;
}
}
if (isa<GlobalValue>(&getAssociatedValue())) {
indicatePessimisticFixpoint();
return;
}
if (Instruction *CtxI = getCtxI())
followUsesInMBEC(*this, A, getState(), *CtxI);
}
/// See followUsesInMBEC
bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
AANonNull::StateType &State) {
bool IsNonNull = false;
bool TrackUse = false;
getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
IsNonNull, TrackUse);
State.setKnown(IsNonNull);
return TrackUse;
}
/// See AbstractAttribute::getAsStr().
const std::string getAsStr() const override {
return getAssumed() ? "nonnull" : "may-null";
}
/// Flag to determine if the underlying value can be null and still allow
/// valid accesses.
const bool NullIsDefined;
};
/// NonNull attribute for a floating value.
struct AANonNullFloating : public AANonNullImpl {
AANonNullFloating(const IRPosition &IRP, Attributor &A)
: AANonNullImpl(IRP, A) {}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
const DataLayout &DL = A.getDataLayout();
DominatorTree *DT = nullptr;
AssumptionCache *AC = nullptr;
InformationCache &InfoCache = A.getInfoCache();
if (const Function *Fn = getAnchorScope()) {
DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
}
auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
AANonNull::StateType &T, bool Stripped) -> bool {
const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
DepClassTy::REQUIRED);
if (!Stripped && this == &AA) {
if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
T.indicatePessimisticFixpoint();
} else {
// Use abstract attribute information.
const AANonNull::StateType &NS = AA.getState();
T ^= NS;
}
return T.isValidState();
};
StateType T;
if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
VisitValueCB, getCtxI()))
return indicatePessimisticFixpoint();
return clampStateAndIndicateChange(getState(), T);
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
};
/// NonNull attribute for function return value.
struct AANonNullReturned final
: AAReturnedFromReturnedValues<AANonNull, AANonNull> {
AANonNullReturned(const IRPosition &IRP, Attributor &A)
: AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
/// See AbstractAttribute::getAsStr().
const std::string getAsStr() const override {
return getAssumed() ? "nonnull" : "may-null";
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
};
/// NonNull attribute for function argument.
struct AANonNullArgument final
: AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
AANonNullArgument(const IRPosition &IRP, Attributor &A)
: AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
};
struct AANonNullCallSiteArgument final : AANonNullFloating {
AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
: AANonNullFloating(IRP, A) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
};
/// NonNull attribute for a call site return position.
struct AANonNullCallSiteReturned final
: AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
: AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
};
/// ------------------------ No-Recurse Attributes ----------------------------
struct AANoRecurseImpl : public AANoRecurse {
AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
/// See AbstractAttribute::getAsStr()
const std::string getAsStr() const override {
return getAssumed() ? "norecurse" : "may-recurse";
}
};
struct AANoRecurseFunction final : AANoRecurseImpl {
AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
: AANoRecurseImpl(IRP, A) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
AANoRecurseImpl::initialize(A);
if (const Function *F = getAnchorScope())
if (A.getInfoCache().getSccSize(*F) != 1)
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
// If all live call sites are known to be no-recurse, we are as well.
auto CallSitePred = [&](AbstractCallSite ACS) {
const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
*this, IRPosition::function(*ACS.getInstruction()->getFunction()),
DepClassTy::NONE);
return NoRecurseAA.isKnownNoRecurse();
};
bool AllCallSitesKnown;
if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
// If we know all call sites and all are known no-recurse, we are done.
// If all known call sites, which might not be all that exist, are known
// to be no-recurse, we are not done but we can continue to assume
// no-recurse. If one of the call sites we have not visited will become
// live, another update is triggered.
if (AllCallSitesKnown)
indicateOptimisticFixpoint();
return ChangeStatus::UNCHANGED;
}
// If the above check does not hold anymore we look at the calls.
auto CheckForNoRecurse = [&](Instruction &I) {
const auto &CB = cast<CallBase>(I);
if (CB.hasFnAttr(Attribute::NoRecurse))
return true;
const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
*this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
if (!NoRecurseAA.isAssumedNoRecurse())
return false;
// Recursion to the same function
if (CB.getCalledFunction() == getAnchorScope())
return false;
return true;
};
bool UsedAssumedInformation = false;
if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this,
UsedAssumedInformation))
return indicatePessimisticFixpoint();
return ChangeStatus::UNCHANGED;
}
void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
};
/// NoRecurse attribute deduction for a call sites.
struct AANoRecurseCallSite final : AANoRecurseImpl {
AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
: AANoRecurseImpl(IRP, A) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
AANoRecurseImpl::initialize(A);
Function *F = getAssociatedFunction();
if (!F || F->isDeclaration())
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
// TODO: Once we have call site specific value information we can provide
// call site specific liveness information and then it makes
// sense to specialize attributes for call sites arguments instead of
// redirecting requests to the callee argument.
Function *F = getAssociatedFunction();
const IRPosition &FnPos = IRPosition::function(*F);
auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
return clampStateAndIndicateChange(getState(), FnAA.getState());
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
};
/// -------------------- Undefined-Behavior Attributes ------------------------
struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
: AAUndefinedBehavior(IRP, A) {}
/// See AbstractAttribute::updateImpl(...).
// through a pointer (i.e. also branches etc.)
ChangeStatus updateImpl(Attributor &A) override {
const size_t UBPrevSize = KnownUBInsts.size();
const size_t NoUBPrevSize = AssumedNoUBInsts.size();
auto InspectMemAccessInstForUB = [&](Instruction &I) {
// Lang ref now states volatile store is not UB, let's skip them.
if (I.isVolatile() && I.mayWriteToMemory())
return true;
// Skip instructions that are already saved.
if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
return true;
// If we reach here, we know we have an instruction
// that accesses memory through a pointer operand,
// for which getPointerOperand() should give it to us.
Value *PtrOp =
const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
assert(PtrOp &&
"Expected pointer operand of memory accessing instruction");
// Either we stopped and the appropriate action was taken,
// or we got back a simplified value to continue.
Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
return true;
const Value *PtrOpVal = SimplifiedPtrOp.getValue();
// A memory access through a pointer is considered UB
// only if the pointer has constant null value.
// TODO: Expand it to not only check constant values.
if (!isa<ConstantPointerNull>(PtrOpVal)) {
AssumedNoUBInsts.insert(&I);
return true;
}
const Type *PtrTy = PtrOpVal->getType();
// Because we only consider instructions inside functions,
// assume that a parent function exists.
const Function *F = I.getFunction();
// A memory access using constant null pointer is only considered UB
// if null pointer is _not_ defined for the target platform.
if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
AssumedNoUBInsts.insert(&I);
else
KnownUBInsts.insert(&I);
return true;
};
auto InspectBrInstForUB = [&](Instruction &I) {
// A conditional branch instruction is considered UB if it has `undef`
// condition.
// Skip instructions that are already saved.
if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
return true;
// We know we have a branch instruction.
auto *BrInst = cast<BranchInst>(&I);
// Unconditional branches are never considered UB.
if (BrInst->isUnconditional())
return true;
// Either we stopped and the appropriate action was taken,
// or we got back a simplified value to continue.
Optional<Value *> SimplifiedCond =
stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue())
return true;
AssumedNoUBInsts.insert(&I);
return true;
};
auto InspectCallSiteForUB = [&](Instruction &I) {
// Check whether a callsite always cause UB or not
// Skip instructions that are already saved.
if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
return true;
// Check nonnull and noundef argument attribute violation for each
// callsite.
CallBase &CB = cast<CallBase>(I);
Function *Callee = CB.getCalledFunction();
if (!Callee)
return true;
for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
// If current argument is known to be simplified to null pointer and the
// corresponding argument position is known to have nonnull attribute,
// the argument is poison. Furthermore, if the argument is poison and
// the position is known to have noundef attriubte, this callsite is
// considered UB.
if (idx >= Callee->arg_size())
break;
Value *ArgVal = CB.getArgOperand(idx);
if (!ArgVal)
continue;
// Here, we handle three cases.
// (1) Not having a value means it is dead. (we can replace the value
// with undef)
// (2) Simplified to undef. The argument violate noundef attriubte.
// (3) Simplified to null pointer where known to be nonnull.
// The argument is a poison value and violate noundef attribute.
IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
auto &NoUndefAA =
A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
if (!NoUndefAA.isKnownNoUndef())
continue;
bool UsedAssumedInformation = false;
Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
if (UsedAssumedInformation)
continue;
if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
return true;
if (!SimplifiedVal.hasValue() ||
isa<UndefValue>(*SimplifiedVal.getValue())) {
KnownUBInsts.insert(&I);
continue;
}
if (!ArgVal->getType()->isPointerTy() ||
!isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
continue;
auto &NonNullAA =
A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
if (NonNullAA.isKnownNonNull())
KnownUBInsts.insert(&I);
}
return true;
};
auto InspectReturnInstForUB =
[&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
// Check if a return instruction always cause UB or not
// Note: It is guaranteed that the returned position of the anchor
// scope has noundef attribute when this is called.
// We also ensure the return position is not "assumed dead"
// because the returned value was then potentially simplified to
// `undef` in AAReturnedValues without removing the `noundef`
// attribute yet.
// When the returned position has noundef attriubte, UB occur in the
// following cases.
// (1) Returned value is known to be undef.
// (2) The value is known to be a null pointer and the returned
// position has nonnull attribute (because the returned value is
// poison).
bool FoundUB = false;
if (isa<UndefValue>(V)) {
FoundUB = true;
} else {
if (isa<ConstantPointerNull>(V)) {
auto &NonNullAA = A.getAAFor<AANonNull>(
*this, IRPosition::returned(*getAnchorScope()),
DepClassTy::NONE);
if (NonNullAA.isKnownNonNull())
FoundUB = true;
}
}
if (FoundUB)
for (ReturnInst *RI : RetInsts)
KnownUBInsts.insert(RI);
return true;
};
bool UsedAssumedInformation = false;
A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
{Instruction::Load, Instruction::Store,
Instruction::AtomicCmpXchg,
Instruction::AtomicRMW},
UsedAssumedInformation,
/* CheckBBLivenessOnly */ true);
A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
UsedAssumedInformation,
/* CheckBBLivenessOnly */ true);
A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
UsedAssumedInformation);
// If the returned position of the anchor scope has noundef attriubte, check
// all returned instructions.
if (!getAnchorScope()->getReturnType()->isVoidTy()) {
const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
auto &RetPosNoUndefAA =
A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
if (RetPosNoUndefAA.isKnownNoUndef())
A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
*this);
}
}
if (NoUBPrevSize != AssumedNoUBInsts.size() ||
UBPrevSize != KnownUBInsts.size())
return ChangeStatus::CHANGED;
return ChangeStatus::UNCHANGED;
}
bool isKnownToCauseUB(Instruction *I) const override {
return KnownUBInsts.count(I);
}
bool isAssumedToCauseUB(Instruction *I) const override {
// In simple words, if an instruction is not in the assumed to _not_
// cause UB, then it is assumed UB (that includes those
// in the KnownUBInsts set). The rest is boilerplate
// is to ensure that it is one of the instructions we test
// for UB.
switch (I->getOpcode()) {
case Instruction::Load:
case Instruction::Store:
case Instruction::AtomicCmpXchg:
case Instruction::AtomicRMW:
return !AssumedNoUBInsts.count(I);
case Instruction::Br: {
auto BrInst = cast<BranchInst>(I);
if (BrInst->isUnconditional())
return false;
return !AssumedNoUBInsts.count(I);
} break;
default:
return false;
}
return false;
}
ChangeStatus manifest(Attributor &A) override {
if (KnownUBInsts.empty())
return ChangeStatus::UNCHANGED;
for (Instruction *I : KnownUBInsts)
A.changeToUnreachableAfterManifest(I);
return ChangeStatus::CHANGED;
}
/// See AbstractAttribute::getAsStr()
const std::string getAsStr() const override {
return getAssumed() ? "undefined-behavior" : "no-ub";
}
/// Note: The correctness of this analysis depends on the fact that the
/// following 2 sets will stop changing after some point.
/// "Change" here means that their size changes.
/// The size of each set is monotonically increasing
/// (we only add items to them) and it is upper bounded by the number of
/// instructions in the processed function (we can never save more
/// elements in