blob: 95f47345d8fd06eba5839f4543643d2512b93bdc [file] [log] [blame]
//===- Attributor.cpp - Module-wide attribute deduction -------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements an inter procedural pass that deduces and/or propagating
// attributes. This is done in an abstract interpretation style fixpoint
// iteration. See the Attributor.h file comment and the class descriptions in
// that file for more information.
//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/IPO/Attributor.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include <cassert>
using namespace llvm;
#define DEBUG_TYPE "attributor"
STATISTIC(NumFnWithExactDefinition,
"Number of function with exact definitions");
STATISTIC(NumFnWithoutExactDefinition,
"Number of function without exact definitions");
STATISTIC(NumAttributesTimedOut,
"Number of abstract attributes timed out before fixpoint");
STATISTIC(NumAttributesValidFixpoint,
"Number of abstract attributes in a valid fixpoint state");
STATISTIC(NumAttributesManifested,
"Number of abstract attributes manifested in IR");
// Some helper macros to deal with statistics tracking.
//
// Usage:
// For simple IR attribute tracking overload trackStatistics in the abstract
// attribute and choose the right STATS_DECLTRACK_********* macro,
// e.g.,:
// void trackStatistics() const override {
// STATS_DECLTRACK_ARG_ATTR(returned)
// }
// If there is a single "increment" side one can use the macro
// STATS_DECLTRACK with a custom message. If there are multiple increment
// sides, STATS_DECL and STATS_TRACK can also be used separatly.
//
#define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \
("Number of " #TYPE " marked '" #NAME "'")
#define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
#define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
#define STATS_DECL(NAME, TYPE, MSG) \
STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
#define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
#define STATS_DECLTRACK(NAME, TYPE, MSG) \
{ \
STATS_DECL(NAME, TYPE, MSG) \
STATS_TRACK(NAME, TYPE) \
}
#define STATS_DECLTRACK_ARG_ATTR(NAME) \
STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
#define STATS_DECLTRACK_CSARG_ATTR(NAME) \
STATS_DECLTRACK(NAME, CSArguments, \
BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
#define STATS_DECLTRACK_FN_ATTR(NAME) \
STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
#define STATS_DECLTRACK_CS_ATTR(NAME) \
STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
#define STATS_DECLTRACK_FNRET_ATTR(NAME) \
STATS_DECLTRACK(NAME, FunctionReturn, \
BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
#define STATS_DECLTRACK_CSRET_ATTR(NAME) \
STATS_DECLTRACK(NAME, CSReturn, \
BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
#define STATS_DECLTRACK_FLOATING_ATTR(NAME) \
STATS_DECLTRACK(NAME, Floating, \
("Number of floating values known to be '" #NAME "'"))
// TODO: Determine a good default value.
//
// In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
// (when run with the first 5 abstract attributes). The results also indicate
// that we never reach 32 iterations but always find a fixpoint sooner.
//
// This will become more evolved once we perform two interleaved fixpoint
// iterations: bottom-up and top-down.
static cl::opt<unsigned>
MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
cl::desc("Maximal number of fixpoint iterations."),
cl::init(32));
static cl::opt<bool> VerifyMaxFixpointIterations(
"attributor-max-iterations-verify", cl::Hidden,
cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
cl::init(false));
static cl::opt<bool> DisableAttributor(
"attributor-disable", cl::Hidden,
cl::desc("Disable the attributor inter-procedural deduction pass."),
cl::init(true));
static cl::opt<bool> ManifestInternal(
"attributor-manifest-internal", cl::Hidden,
cl::desc("Manifest Attributor internal string attributes."),
cl::init(false));
static cl::opt<unsigned> DepRecInterval(
"attributor-dependence-recompute-interval", cl::Hidden,
cl::desc("Number of iterations until dependences are recomputed."),
cl::init(4));
static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
cl::init(true), cl::Hidden);
static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
cl::Hidden);
/// Logic operators for the change status enum class.
///
///{
ChangeStatus llvm::operator|(ChangeStatus l, ChangeStatus r) {
return l == ChangeStatus::CHANGED ? l : r;
}
ChangeStatus llvm::operator&(ChangeStatus l, ChangeStatus r) {
return l == ChangeStatus::UNCHANGED ? l : r;
}
///}
/// Recursively visit all values that might become \p IRP at some point. This
/// will be done by looking through cast instructions, selects, phis, and calls
/// with the "returned" attribute. Once we cannot look through the value any
/// further, the callback \p VisitValueCB is invoked and passed the current
/// value, the \p State, and a flag to indicate if we stripped anything. To
/// limit how much effort is invested, we will never visit more values than
/// specified by \p MaxValues.
template <typename AAType, typename StateTy>
static bool genericValueTraversal(
Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB,
int MaxValues = 8) {
const AAIsDead *LivenessAA = nullptr;
if (IRP.getAnchorScope())
LivenessAA = &A.getAAFor<AAIsDead>(
QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
/* TrackDependence */ false);
bool AnyDead = false;
// TODO: Use Positions here to allow context sensitivity in VisitValueCB
SmallPtrSet<Value *, 16> Visited;
SmallVector<Value *, 16> Worklist;
Worklist.push_back(&IRP.getAssociatedValue());
int Iteration = 0;
do {
Value *V = Worklist.pop_back_val();
// Check if we should process the current value. To prevent endless
// recursion keep a record of the values we followed!
if (!Visited.insert(V).second)
continue;
// Make sure we limit the compile time for complex expressions.
if (Iteration++ >= MaxValues)
return false;
// Explicitly look through calls with a "returned" attribute if we do
// not have a pointer as stripPointerCasts only works on them.
Value *NewV = nullptr;
if (V->getType()->isPointerTy()) {
NewV = V->stripPointerCasts();
} else {
CallSite CS(V);
if (CS && CS.getCalledFunction()) {
for (Argument &Arg : CS.getCalledFunction()->args())
if (Arg.hasReturnedAttr()) {
NewV = CS.getArgOperand(Arg.getArgNo());
break;
}
}
}
if (NewV && NewV != V) {
Worklist.push_back(NewV);
continue;
}
// Look through select instructions, visit both potential values.
if (auto *SI = dyn_cast<SelectInst>(V)) {
Worklist.push_back(SI->getTrueValue());
Worklist.push_back(SI->getFalseValue());
continue;
}
// Look through phi nodes, visit all live operands.
if (auto *PHI = dyn_cast<PHINode>(V)) {
assert(LivenessAA &&
"Expected liveness in the presence of instructions!");
for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
if (LivenessAA->isAssumedDead(IncomingBB->getTerminator())) {
AnyDead = true;
continue;
}
Worklist.push_back(PHI->getIncomingValue(u));
}
continue;
}
// Once a leaf is reached we inform the user through the callback.
if (!VisitValueCB(*V, State, Iteration > 1))
return false;
} while (!Worklist.empty());
// If we actually used liveness information so we have to record a dependence.
if (AnyDead)
A.recordDependence(*LivenessAA, QueryingAA);
// All values have been visited.
return true;
}
/// Return true if \p New is equal or worse than \p Old.
static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
if (!Old.isIntAttribute())
return true;
return Old.getValueAsInt() >= New.getValueAsInt();
}
/// Return true if the information provided by \p Attr was added to the
/// attribute list \p Attrs. This is only the case if it was not already present
/// in \p Attrs at the position describe by \p PK and \p AttrIdx.
static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
AttributeList &Attrs, int AttrIdx) {
if (Attr.isEnumAttribute()) {
Attribute::AttrKind Kind = Attr.getKindAsEnum();
if (Attrs.hasAttribute(AttrIdx, Kind))
if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
return false;
Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
return true;
}
if (Attr.isStringAttribute()) {
StringRef Kind = Attr.getKindAsString();
if (Attrs.hasAttribute(AttrIdx, Kind))
if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
return false;
Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
return true;
}
if (Attr.isIntAttribute()) {
Attribute::AttrKind Kind = Attr.getKindAsEnum();
if (Attrs.hasAttribute(AttrIdx, Kind))
if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
return false;
Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
return true;
}
llvm_unreachable("Expected enum or string attribute!");
}
static const Value *getPointerOperand(const Instruction *I) {
if (auto *LI = dyn_cast<LoadInst>(I))
if (!LI->isVolatile())
return LI->getPointerOperand();
if (auto *SI = dyn_cast<StoreInst>(I))
if (!SI->isVolatile())
return SI->getPointerOperand();
if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I))
if (!CXI->isVolatile())
return CXI->getPointerOperand();
if (auto *RMWI = dyn_cast<AtomicRMWInst>(I))
if (!RMWI->isVolatile())
return RMWI->getPointerOperand();
return nullptr;
}
static const Value *getBasePointerOfAccessPointerOperand(const Instruction *I,
int64_t &BytesOffset,
const DataLayout &DL) {
const Value *Ptr = getPointerOperand(I);
if (!Ptr)
return nullptr;
return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
/*AllowNonInbounds*/ false);
}
ChangeStatus AbstractAttribute::update(Attributor &A) {
ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
if (getState().isAtFixpoint())
return HasChanged;
LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
HasChanged = updateImpl(A);
LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
<< "\n");
return HasChanged;
}
ChangeStatus
IRAttributeManifest::manifestAttrs(Attributor &A, IRPosition &IRP,
const ArrayRef<Attribute> &DeducedAttrs) {
Function *ScopeFn = IRP.getAssociatedFunction();
IRPosition::Kind PK = IRP.getPositionKind();
// In the following some generic code that will manifest attributes in
// DeducedAttrs if they improve the current IR. Due to the different
// annotation positions we use the underlying AttributeList interface.
AttributeList Attrs;
switch (PK) {
case IRPosition::IRP_INVALID:
case IRPosition::IRP_FLOAT:
return ChangeStatus::UNCHANGED;
case IRPosition::IRP_ARGUMENT:
case IRPosition::IRP_FUNCTION:
case IRPosition::IRP_RETURNED:
Attrs = ScopeFn->getAttributes();
break;
case IRPosition::IRP_CALL_SITE:
case IRPosition::IRP_CALL_SITE_RETURNED:
case IRPosition::IRP_CALL_SITE_ARGUMENT:
Attrs = ImmutableCallSite(&IRP.getAnchorValue()).getAttributes();
break;
}
ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
LLVMContext &Ctx = IRP.getAnchorValue().getContext();
for (const Attribute &Attr : DeducedAttrs) {
if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
continue;
HasChanged = ChangeStatus::CHANGED;
}
if (HasChanged == ChangeStatus::UNCHANGED)
return HasChanged;
switch (PK) {
case IRPosition::IRP_ARGUMENT:
case IRPosition::IRP_FUNCTION:
case IRPosition::IRP_RETURNED:
ScopeFn->setAttributes(Attrs);
break;
case IRPosition::IRP_CALL_SITE:
case IRPosition::IRP_CALL_SITE_RETURNED:
case IRPosition::IRP_CALL_SITE_ARGUMENT:
CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
break;
case IRPosition::IRP_INVALID:
case IRPosition::IRP_FLOAT:
break;
}
return HasChanged;
}
const IRPosition IRPosition::EmptyKey(255);
const IRPosition IRPosition::TombstoneKey(256);
SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
IRPositions.emplace_back(IRP);
ImmutableCallSite ICS(&IRP.getAnchorValue());
switch (IRP.getPositionKind()) {
case IRPosition::IRP_INVALID:
case IRPosition::IRP_FLOAT:
case IRPosition::IRP_FUNCTION:
return;
case IRPosition::IRP_ARGUMENT:
case IRPosition::IRP_RETURNED:
IRPositions.emplace_back(
IRPosition::function(*IRP.getAssociatedFunction()));
return;
case IRPosition::IRP_CALL_SITE:
assert(ICS && "Expected call site!");
// TODO: We need to look at the operand bundles similar to the redirection
// in CallBase.
if (!ICS.hasOperandBundles())
if (const Function *Callee = ICS.getCalledFunction())
IRPositions.emplace_back(IRPosition::function(*Callee));
return;
case IRPosition::IRP_CALL_SITE_RETURNED:
assert(ICS && "Expected call site!");
// TODO: We need to look at the operand bundles similar to the redirection
// in CallBase.
if (!ICS.hasOperandBundles()) {
if (const Function *Callee = ICS.getCalledFunction()) {
IRPositions.emplace_back(IRPosition::returned(*Callee));
IRPositions.emplace_back(IRPosition::function(*Callee));
}
}
IRPositions.emplace_back(
IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
return;
case IRPosition::IRP_CALL_SITE_ARGUMENT: {
int ArgNo = IRP.getArgNo();
assert(ICS && ArgNo >= 0 && "Expected call site!");
// TODO: We need to look at the operand bundles similar to the redirection
// in CallBase.
if (!ICS.hasOperandBundles()) {
const Function *Callee = ICS.getCalledFunction();
if (Callee && Callee->arg_size() > unsigned(ArgNo))
IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
if (Callee)
IRPositions.emplace_back(IRPosition::function(*Callee));
}
IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
return;
}
}
}
bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs,
bool IgnoreSubsumingPositions) const {
for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
for (Attribute::AttrKind AK : AKs)
if (EquivIRP.getAttr(AK).getKindAsEnum() == AK)
return true;
// The first position returned by the SubsumingPositionIterator is
// always the position itself. If we ignore subsuming positions we
// are done after the first iteration.
if (IgnoreSubsumingPositions)
break;
}
return false;
}
void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs,
SmallVectorImpl<Attribute> &Attrs) const {
for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this))
for (Attribute::AttrKind AK : AKs) {
const Attribute &Attr = EquivIRP.getAttr(AK);
if (Attr.getKindAsEnum() == AK)
Attrs.push_back(Attr);
}
}
void IRPosition::verify() {
switch (KindOrArgNo) {
default:
assert(KindOrArgNo >= 0 && "Expected argument or call site argument!");
assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&
"Expected call base or argument for positive attribute index!");
if (isa<Argument>(AnchorVal)) {
assert(cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) &&
"Argument number mismatch!");
assert(cast<Argument>(AnchorVal) == &getAssociatedValue() &&
"Associated value mismatch!");
} else {
assert(cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) &&
"Call site argument number mismatch!");
assert(cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
&getAssociatedValue() &&
"Associated value mismatch!");
}
break;
case IRP_INVALID:
assert(!AnchorVal && "Expected no value for an invalid position!");
break;
case IRP_FLOAT:
assert((!isa<CallBase>(&getAssociatedValue()) &&
!isa<Argument>(&getAssociatedValue())) &&
"Expected specialized kind for call base and argument values!");
break;
case IRP_RETURNED:
assert(isa<Function>(AnchorVal) &&
"Expected function for a 'returned' position!");
assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
break;
case IRP_CALL_SITE_RETURNED:
assert((isa<CallBase>(AnchorVal)) &&
"Expected call base for 'call site returned' position!");
assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
break;
case IRP_CALL_SITE:
assert((isa<CallBase>(AnchorVal)) &&
"Expected call base for 'call site function' position!");
assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
break;
case IRP_FUNCTION:
assert(isa<Function>(AnchorVal) &&
"Expected function for a 'function' position!");
assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
break;
}
}
namespace {
/// Helper functions to clamp a state \p S of type \p StateType with the
/// information in \p R and indicate/return if \p S did change (as-in update is
/// required to be run again).
///
///{
template <typename StateType>
ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R);
template <>
ChangeStatus clampStateAndIndicateChange<IntegerState>(IntegerState &S,
const IntegerState &R) {
auto Assumed = S.getAssumed();
S ^= R;
return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
: ChangeStatus::CHANGED;
}
template <>
ChangeStatus clampStateAndIndicateChange<BooleanState>(BooleanState &S,
const BooleanState &R) {
return clampStateAndIndicateChange<IntegerState>(S, R);
}
///}
/// Clamp the information known for all returned values of a function
/// (identified by \p QueryingAA) into \p S.
template <typename AAType, typename StateType = typename AAType::StateType>
static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
StateType &S) {
LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
<< static_cast<const AbstractAttribute &>(QueryingAA)
<< " into " << S << "\n");
assert((QueryingAA.getIRPosition().getPositionKind() ==
IRPosition::IRP_RETURNED ||
QueryingAA.getIRPosition().getPositionKind() ==
IRPosition::IRP_CALL_SITE_RETURNED) &&
"Can only clamp returned value states for a function returned or call "
"site returned position!");
// Use an optional state as there might not be any return values and we want
// to join (IntegerState::operator&) the state of all there are.
Optional<StateType> T;
// Callback for each possibly returned value.
auto CheckReturnValue = [&](Value &RV) -> bool {
const IRPosition &RVPos = IRPosition::value(RV);
const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
<< " @ " << RVPos << "\n");
const StateType &AAS = static_cast<const StateType &>(AA.getState());
if (T.hasValue())
*T &= AAS;
else
T = AAS;
LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
<< "\n");
return T->isValidState();
};
if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
S.indicatePessimisticFixpoint();
else if (T.hasValue())
S ^= *T;
}
/// Helper class to compose two generic deduction
template <typename AAType, typename Base, typename StateType,
template <typename...> class F, template <typename...> class G>
struct AAComposeTwoGenericDeduction
: public F<AAType, G<AAType, Base, StateType>, StateType> {
AAComposeTwoGenericDeduction(const IRPosition &IRP)
: F<AAType, G<AAType, Base, StateType>, StateType>(IRP) {}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
ChangeStatus ChangedF = F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
return ChangedF | ChangedG;
}
};
/// Helper class for generic deduction: return value -> returned position.
template <typename AAType, typename Base,
typename StateType = typename AAType::StateType>
struct AAReturnedFromReturnedValues : public Base {
AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
StateType S;
clampReturnedValueStates<AAType, StateType>(A, *this, S);
// TODO: If we know we visited all returned values, thus no are assumed
// dead, we can take the known information from the state T.
return clampStateAndIndicateChange<StateType>(this->getState(), S);
}
};
/// Clamp the information known at all call sites for a given argument
/// (identified by \p QueryingAA) into \p S.
template <typename AAType, typename StateType = typename AAType::StateType>
static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
StateType &S) {
LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
<< static_cast<const AbstractAttribute &>(QueryingAA)
<< " into " << S << "\n");
assert(QueryingAA.getIRPosition().getPositionKind() ==
IRPosition::IRP_ARGUMENT &&
"Can only clamp call site argument states for an argument position!");
// Use an optional state as there might not be any return values and we want
// to join (IntegerState::operator&) the state of all there are.
Optional<StateType> T;
// The argument number which is also the call site argument number.
unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
auto CallSiteCheck = [&](AbstractCallSite ACS) {
const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
// Check if a coresponding argument was found or if it is on not associated
// (which can happen for callback calls).
if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
return false;
const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
<< " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
const StateType &AAS = static_cast<const StateType &>(AA.getState());
if (T.hasValue())
*T &= AAS;
else
T = AAS;
LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
<< "\n");
return T->isValidState();
};
if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true))
S.indicatePessimisticFixpoint();
else if (T.hasValue())
S ^= *T;
}
/// Helper class for generic deduction: call site argument -> argument position.
template <typename AAType, typename Base,
typename StateType = typename AAType::StateType>
struct AAArgumentFromCallSiteArguments : public Base {
AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
StateType S;
clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
// TODO: If we know we visited all incoming values, thus no are assumed
// dead, we can take the known information from the state T.
return clampStateAndIndicateChange<StateType>(this->getState(), S);
}
};
/// Helper class for generic replication: function returned -> cs returned.
template <typename AAType, typename Base,
typename StateType = typename AAType::StateType>
struct AACallSiteReturnedFromReturned : public Base {
AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
assert(this->getIRPosition().getPositionKind() ==
IRPosition::IRP_CALL_SITE_RETURNED &&
"Can only wrap function returned positions for call site returned "
"positions!");
auto &S = this->getState();
const Function *AssociatedFunction =
this->getIRPosition().getAssociatedFunction();
if (!AssociatedFunction)
return S.indicatePessimisticFixpoint();
IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
return clampStateAndIndicateChange(
S, static_cast<const typename AAType::StateType &>(AA.getState()));
}
};
/// Helper class for generic deduction using must-be-executed-context
/// Base class is required to have `followUse` method.
/// bool followUse(Attributor &A, const Use *U, const Instruction *I)
/// U - Underlying use.
/// I - The user of the \p U.
/// `followUse` returns true if the value should be tracked transitively.
template <typename AAType, typename Base,
typename StateType = typename AAType::StateType>
struct AAFromMustBeExecutedContext : public Base {
AAFromMustBeExecutedContext(const IRPosition &IRP) : Base(IRP) {}
void initialize(Attributor &A) override {
Base::initialize(A);
IRPosition &IRP = this->getIRPosition();
Instruction *CtxI = IRP.getCtxI();
if (!CtxI)
return;
for (const Use &U : IRP.getAssociatedValue().uses())
Uses.insert(&U);
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
auto BeforeState = this->getState();
auto &S = this->getState();
Instruction *CtxI = this->getIRPosition().getCtxI();
if (!CtxI)
return ChangeStatus::UNCHANGED;
MustBeExecutedContextExplorer &Explorer =
A.getInfoCache().getMustBeExecutedContextExplorer();
SetVector<const Use *> NextUses;
for (const Use *U : Uses) {
if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
bool Found = EIt.count(UserI);
while (!Found && ++EIt != EEnd)
Found = EIt.getCurrentInst() == UserI;
if (Found && Base::followUse(A, U, UserI))
for (const Use &Us : UserI->uses())
NextUses.insert(&Us);
}
}
for (const Use *U : NextUses)
Uses.insert(U);
return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
}
private:
/// Container for (transitive) uses of the associated value.
SetVector<const Use *> Uses;
};
template <typename AAType, typename Base,
typename StateType = typename AAType::StateType>
using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
AAComposeTwoGenericDeduction<AAType, Base, StateType,
AAFromMustBeExecutedContext,
AAArgumentFromCallSiteArguments>;
template <typename AAType, typename Base,
typename StateType = typename AAType::StateType>
using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
AAComposeTwoGenericDeduction<AAType, Base, StateType,
AAFromMustBeExecutedContext,
AACallSiteReturnedFromReturned>;
/// -----------------------NoUnwind Function Attribute--------------------------
struct AANoUnwindImpl : AANoUnwind {
AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
const std::string getAsStr() const override {
return getAssumed() ? "nounwind" : "may-unwind";
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
auto Opcodes = {
(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
(unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet,
(unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
auto CheckForNoUnwind = [&](Instruction &I) {
if (!I.mayThrow())
return true;
if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
const auto &NoUnwindAA =
A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS));
return NoUnwindAA.isAssumedNoUnwind();
}
return false;
};
if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
return indicatePessimisticFixpoint();
return ChangeStatus::UNCHANGED;
}
};
struct AANoUnwindFunction final : public AANoUnwindImpl {
AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
};
/// NoUnwind attribute deduction for a call sites.
struct AANoUnwindCallSite final : AANoUnwindImpl {
AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
AANoUnwindImpl::initialize(A);
Function *F = getAssociatedFunction();
if (!F)
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
// TODO: Once we have call site specific value information we can provide
// call site specific liveness information and then it makes
// sense to specialize attributes for call sites arguments instead of
// redirecting requests to the callee argument.
Function *F = getAssociatedFunction();
const IRPosition &FnPos = IRPosition::function(*F);
auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
return clampStateAndIndicateChange(
getState(),
static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
};
/// --------------------- Function Return Values -------------------------------
/// "Attribute" that collects all potential returned values and the return
/// instructions that they arise from.
///
/// If there is a unique returned value R, the manifest method will:
/// - mark R with the "returned" attribute, if R is an argument.
class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
/// Mapping of values potentially returned by the associated function to the
/// return instructions that might return them.
MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
/// Mapping to remember the number of returned values for a call site such
/// that we can avoid updates if nothing changed.
DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
/// Set of unresolved calls returned by the associated function.
SmallSetVector<CallBase *, 4> UnresolvedCalls;
/// State flags
///
///{
bool IsFixed = false;
bool IsValidState = true;
///}
public:
AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
// Reset the state.
IsFixed = false;
IsValidState = true;
ReturnedValues.clear();
Function *F = getAssociatedFunction();
if (!F) {
indicatePessimisticFixpoint();
return;
}
// The map from instruction opcodes to those instructions in the function.
auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
// Look through all arguments, if one is marked as returned we are done.
for (Argument &Arg : F->args()) {
if (Arg.hasReturnedAttr()) {
auto &ReturnInstSet = ReturnedValues[&Arg];
for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
ReturnInstSet.insert(cast<ReturnInst>(RI));
indicateOptimisticFixpoint();
return;
}
}
if (!F->hasExactDefinition())
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::manifest(...).
ChangeStatus manifest(Attributor &A) override;
/// See AbstractAttribute::getState(...).
AbstractState &getState() override { return *this; }
/// See AbstractAttribute::getState(...).
const AbstractState &getState() const override { return *this; }
/// See AbstractAttribute::updateImpl(Attributor &A).
ChangeStatus updateImpl(Attributor &A) override;
llvm::iterator_range<iterator> returned_values() override {
return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
}
llvm::iterator_range<const_iterator> returned_values() const override {
return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
}
const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
return UnresolvedCalls;
}
/// Return the number of potential return values, -1 if unknown.
size_t getNumReturnValues() const override {
return isValidState() ? ReturnedValues.size() : -1;
}
/// Return an assumed unique return value if a single candidate is found. If
/// there cannot be one, return a nullptr. If it is not clear yet, return the
/// Optional::NoneType.
Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
/// See AbstractState::checkForAllReturnedValues(...).
bool checkForAllReturnedValuesAndReturnInsts(
const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
&Pred) const override;
/// Pretty print the attribute similar to the IR representation.
const std::string getAsStr() const override;
/// See AbstractState::isAtFixpoint().
bool isAtFixpoint() const override { return IsFixed; }
/// See AbstractState::isValidState().
bool isValidState() const override { return IsValidState; }
/// See AbstractState::indicateOptimisticFixpoint(...).
ChangeStatus indicateOptimisticFixpoint() override {
IsFixed = true;
return ChangeStatus::UNCHANGED;
}
ChangeStatus indicatePessimisticFixpoint() override {
IsFixed = true;
IsValidState = false;
return ChangeStatus::CHANGED;
}
};
ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
ChangeStatus Changed = ChangeStatus::UNCHANGED;
// Bookkeeping.
assert(isValidState());
STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
"Number of function with known return values");
// Check if we have an assumed unique return value that we could manifest.
Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
if (!UniqueRV.hasValue() || !UniqueRV.getValue())
return Changed;
// Bookkeeping.
STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
"Number of function with unique return");
// Callback to replace the uses of CB with the constant C.
auto ReplaceCallSiteUsersWith = [](CallBase &CB, Constant &C) {
if (CB.getNumUses() == 0 || CB.isMustTailCall())
return ChangeStatus::UNCHANGED;
CB.replaceAllUsesWith(&C);
return ChangeStatus::CHANGED;
};
// If the assumed unique return value is an argument, annotate it.
if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
getIRPosition() = IRPosition::argument(*UniqueRVArg);
Changed = IRAttribute::manifest(A);
} else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
// We can replace the returned value with the unique returned constant.
Value &AnchorValue = getAnchorValue();
if (Function *F = dyn_cast<Function>(&AnchorValue)) {
for (const Use &U : F->uses())
if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
if (CB->isCallee(&U)) {
Constant *RVCCast =
ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
}
} else {
assert(isa<CallBase>(AnchorValue) &&
"Expcected a function or call base anchor!");
Constant *RVCCast =
ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
}
if (Changed == ChangeStatus::CHANGED)
STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
"Number of function returns replaced by constant return");
}
return Changed;
}
const std::string AAReturnedValuesImpl::getAsStr() const {
return (isAtFixpoint() ? "returns(#" : "may-return(#") +
(isValidState() ? std::to_string(getNumReturnValues()) : "?") +
")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
}
Optional<Value *>
AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
// If checkForAllReturnedValues provides a unique value, ignoring potential
// undef values that can also be present, it is assumed to be the actual
// return value and forwarded to the caller of this method. If there are
// multiple, a nullptr is returned indicating there cannot be a unique
// returned value.
Optional<Value *> UniqueRV;
auto Pred = [&](Value &RV) -> bool {
// If we found a second returned value and neither the current nor the saved
// one is an undef, there is no unique returned value. Undefs are special
// since we can pretend they have any value.
if (UniqueRV.hasValue() && UniqueRV != &RV &&
!(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
UniqueRV = nullptr;
return false;
}
// Do not overwrite a value with an undef.
if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
UniqueRV = &RV;
return true;
};
if (!A.checkForAllReturnedValues(Pred, *this))
UniqueRV = nullptr;
return UniqueRV;
}
bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
&Pred) const {
if (!isValidState())
return false;
// Check all returned values but ignore call sites as long as we have not
// encountered an overdefined one during an update.
for (auto &It : ReturnedValues) {
Value *RV = It.first;
CallBase *CB = dyn_cast<CallBase>(RV);
if (CB && !UnresolvedCalls.count(CB))
continue;
if (!Pred(*RV, It.second))
return false;
}
return true;
}
ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
size_t NumUnresolvedCalls = UnresolvedCalls.size();
bool Changed = false;
// State used in the value traversals starting in returned values.
struct RVState {
// The map in which we collect return values -> return instrs.
decltype(ReturnedValues) &RetValsMap;
// The flag to indicate a change.
bool &Changed;
// The return instrs we come from.
SmallSetVector<ReturnInst *, 4> RetInsts;
};
// Callback for a leaf value returned by the associated function.
auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool {
auto Size = RVS.RetValsMap[&Val].size();
RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
bool Inserted = RVS.RetValsMap[&Val].size() != Size;
RVS.Changed |= Inserted;
LLVM_DEBUG({
if (Inserted)
dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
<< " => " << RVS.RetInsts.size() << "\n";
});
return true;
};
// Helper method to invoke the generic value traversal.
auto VisitReturnedValue = [&](Value &RV, RVState &RVS) {
IRPosition RetValPos = IRPosition::value(RV);
return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this,
RVS, VisitValueCB);
};
// Callback for all "return intructions" live in the associated function.
auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
ReturnInst &Ret = cast<ReturnInst>(I);
RVState RVS({ReturnedValues, Changed, {}});
RVS.RetInsts.insert(&Ret);
return VisitReturnedValue(*Ret.getReturnValue(), RVS);
};
// Start by discovering returned values from all live returned instructions in
// the associated function.
if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
return indicatePessimisticFixpoint();
// Once returned values "directly" present in the code are handled we try to
// resolve returned calls.
decltype(ReturnedValues) NewRVsMap;
for (auto &It : ReturnedValues) {
LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
<< " by #" << It.second.size() << " RIs\n");
CallBase *CB = dyn_cast<CallBase>(It.first);
if (!CB || UnresolvedCalls.count(CB))
continue;
if (!CB->getCalledFunction()) {
LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
<< "\n");
UnresolvedCalls.insert(CB);
continue;
}
// TODO: use the function scope once we have call site AAReturnedValues.
const auto &RetValAA = A.getAAFor<AAReturnedValues>(
*this, IRPosition::function(*CB->getCalledFunction()));
LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
<< static_cast<const AbstractAttribute &>(RetValAA)
<< "\n");
// Skip dead ends, thus if we do not know anything about the returned
// call we mark it as unresolved and it will stay that way.
if (!RetValAA.getState().isValidState()) {
LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
<< "\n");
UnresolvedCalls.insert(CB);
continue;
}
// Do not try to learn partial information. If the callee has unresolved
// return values we will treat the call as unresolved/opaque.
auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
if (!RetValAAUnresolvedCalls.empty()) {
UnresolvedCalls.insert(CB);
continue;
}
// Now check if we can track transitively returned values. If possible, thus
// if all return value can be represented in the current scope, do so.
bool Unresolved = false;
for (auto &RetValAAIt : RetValAA.returned_values()) {
Value *RetVal = RetValAAIt.first;
if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
isa<Constant>(RetVal))
continue;
// Anything that did not fit in the above categories cannot be resolved,
// mark the call as unresolved.
LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
"cannot be translated: "
<< *RetVal << "\n");
UnresolvedCalls.insert(CB);
Unresolved = true;
break;
}
if (Unresolved)
continue;
// Now track transitively returned values.
unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
if (NumRetAA == RetValAA.getNumReturnValues()) {
LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
"changed since it was seen last\n");
continue;
}
NumRetAA = RetValAA.getNumReturnValues();
for (auto &RetValAAIt : RetValAA.returned_values()) {
Value *RetVal = RetValAAIt.first;
if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
// Arguments are mapped to call site operands and we begin the traversal
// again.
bool Unused = false;
RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS);
continue;
} else if (isa<CallBase>(RetVal)) {
// Call sites are resolved by the callee attribute over time, no need to
// do anything for us.
continue;
} else if (isa<Constant>(RetVal)) {
// Constants are valid everywhere, we can simply take them.
NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
continue;
}
}
}
// To avoid modifications to the ReturnedValues map while we iterate over it
// we kept record of potential new entries in a copy map, NewRVsMap.
for (auto &It : NewRVsMap) {
assert(!It.second.empty() && "Entry does not add anything.");
auto &ReturnInsts = ReturnedValues[It.first];
for (ReturnInst *RI : It.second)
if (ReturnInsts.insert(RI)) {
LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
<< *It.first << " => " << *RI << "\n");
Changed = true;
}
}
Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
}
struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
};
/// Returned values information for a call sites.
struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
// TODO: Once we have call site specific value information we can provide
// call site specific liveness information and then it makes
// sense to specialize attributes for call sites instead of
// redirecting requests to the callee.
llvm_unreachable("Abstract attributes for returned values are not "
"supported for call sites yet!");
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
return indicatePessimisticFixpoint();
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {}
};
/// ------------------------ NoSync Function Attribute -------------------------
struct AANoSyncImpl : AANoSync {
AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
const std::string getAsStr() const override {
return getAssumed() ? "nosync" : "may-sync";
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override;
/// Helper function used to determine whether an instruction is non-relaxed
/// atomic. In other words, if an atomic instruction does not have unordered
/// or monotonic ordering
static bool isNonRelaxedAtomic(Instruction *I);
/// Helper function used to determine whether an instruction is volatile.
static bool isVolatile(Instruction *I);
/// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
/// memset).
static bool isNoSyncIntrinsic(Instruction *I);
};
bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
if (!I->isAtomic())
return false;
AtomicOrdering Ordering;
switch (I->getOpcode()) {
case Instruction::AtomicRMW:
Ordering = cast<AtomicRMWInst>(I)->getOrdering();
break;
case Instruction::Store:
Ordering = cast<StoreInst>(I)->getOrdering();
break;
case Instruction::Load:
Ordering = cast<LoadInst>(I)->getOrdering();
break;
case Instruction::Fence: {
auto *FI = cast<FenceInst>(I);
if (FI->getSyncScopeID() == SyncScope::SingleThread)
return false;
Ordering = FI->getOrdering();
break;
}
case Instruction::AtomicCmpXchg: {
AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
// Only if both are relaxed, than it can be treated as relaxed.
// Otherwise it is non-relaxed.
if (Success != AtomicOrdering::Unordered &&
Success != AtomicOrdering::Monotonic)
return true;
if (Failure != AtomicOrdering::Unordered &&
Failure != AtomicOrdering::Monotonic)
return true;
return false;
}
default:
llvm_unreachable(
"New atomic operations need to be known in the attributor.");
}
// Relaxed.
if (Ordering == AtomicOrdering::Unordered ||
Ordering == AtomicOrdering::Monotonic)
return false;
return true;
}
/// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
/// FIXME: We should ipmrove the handling of intrinsics.
bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
if (auto *II = dyn_cast<IntrinsicInst>(I)) {
switch (II->getIntrinsicID()) {
/// Element wise atomic memory intrinsics are can only be unordered,
/// therefore nosync.
case Intrinsic::memset_element_unordered_atomic:
case Intrinsic::memmove_element_unordered_atomic:
case Intrinsic::memcpy_element_unordered_atomic:
return true;
case Intrinsic::memset:
case Intrinsic::memmove:
case Intrinsic::memcpy:
if (!cast<MemIntrinsic>(II)->isVolatile())
return true;
return false;
default:
return false;
}
}
return false;
}
bool AANoSyncImpl::isVolatile(Instruction *I) {
assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
"Calls should not be checked here");
switch (I->getOpcode()) {
case Instruction::AtomicRMW:
return cast<AtomicRMWInst>(I)->isVolatile();
case Instruction::Store:
return cast<StoreInst>(I)->isVolatile();
case Instruction::Load:
return cast<LoadInst>(I)->isVolatile();
case Instruction::AtomicCmpXchg:
return cast<AtomicCmpXchgInst>(I)->isVolatile();
default:
return false;
}
}
ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
auto CheckRWInstForNoSync = [&](Instruction &I) {
/// We are looking for volatile instructions or Non-Relaxed atomics.
/// FIXME: We should ipmrove the handling of intrinsics.
if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
return true;
if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
if (ICS.hasFnAttr(Attribute::NoSync))
return true;
const auto &NoSyncAA =
A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS));
if (NoSyncAA.isAssumedNoSync())
return true;
return false;
}
if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
return true;
return false;
};
auto CheckForNoSync = [&](Instruction &I) {
// At this point we handled all read/write effects and they are all
// nosync, so they can be skipped.
if (I.mayReadOrWriteMemory())
return true;
// non-convergent and readnone imply nosync.
return !ImmutableCallSite(&I).isConvergent();
};
if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
!A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
return indicatePessimisticFixpoint();
return ChangeStatus::UNCHANGED;
}
struct AANoSyncFunction final : public AANoSyncImpl {
AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
};
/// NoSync attribute deduction for a call sites.
struct AANoSyncCallSite final : AANoSyncImpl {
AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
AANoSyncImpl::initialize(A);
Function *F = getAssociatedFunction();
if (!F)
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
// TODO: Once we have call site specific value information we can provide
// call site specific liveness information and then it makes
// sense to specialize attributes for call sites arguments instead of
// redirecting requests to the callee argument.
Function *F = getAssociatedFunction();
const IRPosition &FnPos = IRPosition::function(*F);
auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
return clampStateAndIndicateChange(
getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
};
/// ------------------------ No-Free Attributes ----------------------------
struct AANoFreeImpl : public AANoFree {
AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
auto CheckForNoFree = [&](Instruction &I) {
ImmutableCallSite ICS(&I);
if (ICS.hasFnAttr(Attribute::NoFree))
return true;
const auto &NoFreeAA =
A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS));
return NoFreeAA.isAssumedNoFree();
};
if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
return indicatePessimisticFixpoint();
return ChangeStatus::UNCHANGED;
}
/// See AbstractAttribute::getAsStr().
const std::string getAsStr() const override {
return getAssumed() ? "nofree" : "may-free";
}
};
struct AANoFreeFunction final : public AANoFreeImpl {
AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
};
/// NoFree attribute deduction for a call sites.
struct AANoFreeCallSite final : AANoFreeImpl {
AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
AANoFreeImpl::initialize(A);
Function *F = getAssociatedFunction();
if (!F)
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
// TODO: Once we have call site specific value information we can provide
// call site specific liveness information and then it makes
// sense to specialize attributes for call sites arguments instead of
// redirecting requests to the callee argument.
Function *F = getAssociatedFunction();
const IRPosition &FnPos = IRPosition::function(*F);
auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
return clampStateAndIndicateChange(
getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
};
/// ------------------------ NonNull Argument Attribute ------------------------
static int64_t getKnownNonNullAndDerefBytesForUse(
Attributor &A, AbstractAttribute &QueryingAA, Value &AssociatedValue,
const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
TrackUse = false;
const Value *UseV = U->get();
if (!UseV->getType()->isPointerTy())
return 0;
Type *PtrTy = UseV->getType();
const Function *F = I->getFunction();
bool NullPointerIsDefined =
F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
const DataLayout &DL = A.getInfoCache().getDL();
if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
if (ICS.isBundleOperand(U))
return 0;
if (ICS.isCallee(U)) {
IsNonNull |= !NullPointerIsDefined;
return 0;
}
unsigned ArgNo = ICS.getArgumentNo(U);
IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP);
IsNonNull |= DerefAA.isKnownNonNull();
return DerefAA.getKnownDereferenceableBytes();
}
int64_t Offset;
if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
if (Base == &AssociatedValue && getPointerOperand(I) == UseV) {
int64_t DerefBytes =
Offset + (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
IsNonNull |= !NullPointerIsDefined;
return DerefBytes;
}
}
if (const Value *Base =
GetPointerBaseWithConstantOffset(UseV, Offset, DL,
/*AllowNonInbounds*/ false)) {
auto &DerefAA =
A.getAAFor<AADereferenceable>(QueryingAA, IRPosition::value(*Base));
IsNonNull |= (!NullPointerIsDefined && DerefAA.isKnownNonNull());
IsNonNull |= (!NullPointerIsDefined && (Offset != 0));
int64_t DerefBytes = DerefAA.getKnownDereferenceableBytes();
return std::max(int64_t(0), DerefBytes - Offset);
}
return 0;
}
struct AANonNullImpl : AANonNull {
AANonNullImpl(const IRPosition &IRP)
: AANonNull(IRP),
NullIsDefined(NullPointerIsDefined(
getAnchorScope(),
getAssociatedValue().getType()->getPointerAddressSpace())) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
if (!NullIsDefined &&
hasAttr({Attribute::NonNull, Attribute::Dereferenceable}))
indicateOptimisticFixpoint();
else
AANonNull::initialize(A);
}
/// See AAFromMustBeExecutedContext
bool followUse(Attributor &A, const Use *U, const Instruction *I) {
bool IsNonNull = false;
bool TrackUse = false;
getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
IsNonNull, TrackUse);
takeKnownMaximum(IsNonNull);
return TrackUse;
}
/// See AbstractAttribute::getAsStr().
const std::string getAsStr() const override {
return getAssumed() ? "nonnull" : "may-null";
}
/// Flag to determine if the underlying value can be null and still allow
/// valid accesses.
const bool NullIsDefined;
};
/// NonNull attribute for a floating value.
struct AANonNullFloating
: AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
AANonNullFloating(const IRPosition &IRP) : Base(IRP) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
Base::initialize(A);
if (isAtFixpoint())
return;
const IRPosition &IRP = getIRPosition();
const Value &V = IRP.getAssociatedValue();
const DataLayout &DL = A.getDataLayout();
// TODO: This context sensitive query should be removed once we can do
// context sensitive queries in the genericValueTraversal below.
if (isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, IRP.getCtxI(),
/* TODO: DT */ nullptr))
indicateOptimisticFixpoint();
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
ChangeStatus Change = Base::updateImpl(A);
if (isKnownNonNull())
return Change;
if (!NullIsDefined) {
const auto &DerefAA = A.getAAFor<AADereferenceable>(*this, getIRPosition());
if (DerefAA.getAssumedDereferenceableBytes())
return Change;
}
const DataLayout &DL = A.getDataLayout();
auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
bool Stripped) -> bool {
const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
if (!Stripped && this == &AA) {
if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr,
/* CtxI */ getCtxI(),
/* TODO: DT */ nullptr))
T.indicatePessimisticFixpoint();
} else {
// Use abstract attribute information.
const AANonNull::StateType &NS =
static_cast<const AANonNull::StateType &>(AA.getState());
T ^= NS;
}
return T.isValidState();
};
StateType T;
if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this,
T, VisitValueCB))
return indicatePessimisticFixpoint();
return clampStateAndIndicateChange(getState(), T);
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
};
/// NonNull attribute for function return value.
struct AANonNullReturned final
: AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
AANonNullReturned(const IRPosition &IRP)
: AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
};
/// NonNull attribute for function argument.
struct AANonNullArgument final
: AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
AANonNullImpl> {
AANonNullArgument(const IRPosition &IRP)
: AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
AANonNullImpl>(
IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
};
struct AANonNullCallSiteArgument final : AANonNullFloating {
AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
};
/// NonNull attribute for a call site return position.
struct AANonNullCallSiteReturned final
: AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
AANonNullImpl> {
AANonNullCallSiteReturned(const IRPosition &IRP)
: AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
AANonNullImpl>(
IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
};
/// ------------------------ No-Recurse Attributes ----------------------------
struct AANoRecurseImpl : public AANoRecurse {
AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
/// See AbstractAttribute::getAsStr()
const std::string getAsStr() const override {
return getAssumed() ? "norecurse" : "may-recurse";
}
};
struct AANoRecurseFunction final : AANoRecurseImpl {
AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
AANoRecurseImpl::initialize(A);
if (const Function *F = getAnchorScope())
if (A.getInfoCache().getSccSize(*F) == 1)
return;
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
auto CheckForNoRecurse = [&](Instruction &I) {
ImmutableCallSite ICS(&I);
if (ICS.hasFnAttr(Attribute::NoRecurse))
return true;
const auto &NoRecurseAA =
A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(ICS));
if (!NoRecurseAA.isAssumedNoRecurse())
return false;
// Recursion to the same function
if (ICS.getCalledFunction() == getAnchorScope())
return false;
return true;
};
if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
return indicatePessimisticFixpoint();
return ChangeStatus::UNCHANGED;
}
void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
};
/// NoRecurse attribute deduction for a call sites.
struct AANoRecurseCallSite final : AANoRecurseImpl {
AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
AANoRecurseImpl::initialize(A);
Function *F = getAssociatedFunction();
if (!F)
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
// TODO: Once we have call site specific value information we can provide
// call site specific liveness information and then it makes
// sense to specialize attributes for call sites arguments instead of
// redirecting requests to the callee argument.
Function *F = getAssociatedFunction();
const IRPosition &FnPos = IRPosition::function(*F);
auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
return clampStateAndIndicateChange(
getState(),
static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
};
/// ------------------------ Will-Return Attributes ----------------------------
// Helper function that checks whether a function has any cycle.
// TODO: Replace with more efficent code
static bool containsCycle(Function &F) {
SmallPtrSet<BasicBlock *, 32> Visited;
// Traverse BB by dfs and check whether successor is already visited.
for (BasicBlock *BB : depth_first(&F)) {
Visited.insert(BB);
for (auto *SuccBB : successors(BB)) {
if (Visited.count(SuccBB))
return true;
}
}
return false;
}
// Helper function that checks the function have a loop which might become an
// endless loop
// FIXME: Any cycle is regarded as endless loop for now.
// We have to allow some patterns.
static bool containsPossiblyEndlessLoop(Function *F) {
return !F || !F->hasExactDefinition() || containsCycle(*F);
}
struct AAWillReturnImpl : public AAWillReturn {
AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
AAWillReturn::initialize(A);
Function *F = getAssociatedFunction();
if (containsPossiblyEndlessLoop(F))
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
auto CheckForWillReturn = [&](Instruction &I) {
IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I));
const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
if (WillReturnAA.isKnownWillReturn())
return true;
if (!WillReturnAA.isAssumedWillReturn())
return false;
const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
return NoRecurseAA.isAssumedNoRecurse();
};
if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
return indicatePessimisticFixpoint();
return ChangeStatus::UNCHANGED;
}
/// See AbstractAttribute::getAsStr()
const std::string getAsStr() const override {
return getAssumed() ? "willreturn" : "may-noreturn";
}
};
struct AAWillReturnFunction final : AAWillReturnImpl {
AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
};
/// WillReturn attribute deduction for a call sites.
struct AAWillReturnCallSite final : AAWillReturnImpl {
AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
AAWillReturnImpl::initialize(A);
Function *F = getAssociatedFunction();
if (!F)
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
// TODO: Once we have call site specific value information we can provide
// call site specific liveness information and then it makes
// sense to specialize attributes for call sites arguments instead of
// redirecting requests to the callee argument.
Function *F = getAssociatedFunction();
const IRPosition &FnPos = IRPosition::function(*F);
auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
return clampStateAndIndicateChange(
getState(),
static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
};
/// ------------------------ NoAlias Argument Attribute ------------------------
struct AANoAliasImpl : AANoAlias {
AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {}
const std::string getAsStr() const override {
return getAssumed() ? "noalias" : "may-alias";
}
};
/// NoAlias attribute for a floating value.
struct AANoAliasFloating final : AANoAliasImpl {
AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
AANoAliasImpl::initialize(A);
Value &Val = getAssociatedValue();
if (isa<AllocaInst>(Val))
indicateOptimisticFixpoint();
if (isa<ConstantPointerNull>(Val) &&
Val.getType()->getPointerAddressSpace() == 0)
indicateOptimisticFixpoint();
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
// TODO: Implement this.
return indicatePessimisticFixpoint();
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {
STATS_DECLTRACK_FLOATING_ATTR(noalias)
}
};
/// NoAlias attribute for an argument.
struct AANoAliasArgument final
: AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
AANoAliasArgument(const IRPosition &IRP)
: AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
};
struct AANoAliasCallSiteArgument final : AANoAliasImpl {
AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
// See callsite argument attribute and callee argument attribute.
ImmutableCallSite ICS(&getAnchorValue());
if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias))
indicateOptimisticFixpoint();
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
// We can deduce "noalias" if the following conditions hold.
// (i) Associated value is assumed to be noalias in the definition.
// (ii) Associated value is assumed to be no-capture in all the uses
// possibly executed before this callsite.
// (iii) There is no other pointer argument which could alias with the
// value.
const Value &V = getAssociatedValue();
const IRPosition IRP = IRPosition::value(V);
// (i) Check whether noalias holds in the definition.
auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
if (!NoAliasAA.isAssumedNoAlias())
return indicatePessimisticFixpoint();
LLVM_DEBUG(dbgs() << "[Attributor][AANoAliasCSArg] " << V
<< " is assumed NoAlias in the definition\n");
// (ii) Check whether the value is captured in the scope using AANoCapture.
// FIXME: This is conservative though, it is better to look at CFG and
// check only uses possibly executed before this callsite.
auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
LLVM_DEBUG(
dbgs() << "[Attributor][AANoAliasCSArg] " << V
<< " cannot be noalias as it is potentially captured\n");
return indicatePessimisticFixpoint();
}
// (iii) Check there is no other pointer argument which could alias with the
// value.
ImmutableCallSite ICS(&getAnchorValue());
for (unsigned i = 0; i < ICS.getNumArgOperands(); i++) {
if (getArgNo() == (int)i)
continue;
const Value *ArgOp = ICS.getArgOperand(i);
if (!ArgOp->getType()->isPointerTy())
continue;
if (const Function *F = getAnchorScope()) {
if (AAResults *AAR = A.getInfoCache().getAAResultsForFunction(*F)) {
bool IsAliasing = AAR->isNoAlias(&getAssociatedValue(), ArgOp);
LLVM_DEBUG(dbgs()
<< "[Attributor][NoAliasCSArg] Check alias between "
"callsite arguments "
<< AAR->isNoAlias(&getAssociatedValue(), ArgOp) << " "
<< getAssociatedValue() << " " << *ArgOp << " => "
<< (IsAliasing ? "" : "no-") << "alias \n");
if (IsAliasing)
continue;
}
}
return indicatePessimisticFixpoint();
}
return ChangeStatus::UNCHANGED;
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
};
/// NoAlias attribute for function return value.
struct AANoAliasReturned final : AANoAliasImpl {
AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
/// See AbstractAttribute::updateImpl(...).
virtual ChangeStatus updateImpl(Attributor &A) override {
auto CheckReturnValue = [&](Value &RV) -> bool {
if (Constant *C = dyn_cast<Constant>(&RV))
if (C->isNullValue() || isa<UndefValue>(C))
return true;
/// For now, we can only deduce noalias if we have call sites.
/// FIXME: add more support.
ImmutableCallSite ICS(&RV);
if (!ICS)
return false;
const IRPosition &RVPos = IRPosition::value(RV);
const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
if (!NoAliasAA.isAssumedNoAlias())
return false;
const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
};
if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
return indicatePessimisticFixpoint();
return ChangeStatus::UNCHANGED;
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
};
/// NoAlias attribute deduction for a call site return value.
struct AANoAliasCallSiteReturned final : AANoAliasImpl {
AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
AANoAliasImpl::initialize(A);
Function *F = getAssociatedFunction();
if (!F)
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
// TODO: Once we have call site specific value information we can provide
// call site specific liveness information and then it makes
// sense to specialize attributes for call sites arguments instead of
// redirecting requests to the callee argument.
Function *F = getAssociatedFunction();
const IRPosition &FnPos = IRPosition::returned(*F);
auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
return clampStateAndIndicateChange(
getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
};
/// -------------------AAIsDead Function Attribute-----------------------
struct AAIsDeadImpl : public AAIsDead {
AAIsDeadImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
void initialize(Attributor &A) override {
const Function *F = getAssociatedFunction();
if (F && !F->isDeclaration())
exploreFromEntry(A, F);
}
void exploreFromEntry(Attributor &A, const Function *F) {
ToBeExploredPaths.insert(&(F->getEntryBlock().front()));
for (size_t i = 0; i < ToBeExploredPaths.size(); ++i)
if (const Instruction *NextNoReturnI =
findNextNoReturn(A, ToBeExploredPaths[i]))
NoReturnCalls.insert(NextNoReturnI);
// Mark the block live after we looked for no-return instructions.
assumeLive(A, F->getEntryBlock());
}
/// Find the next assumed noreturn instruction in the block of \p I starting
/// from, thus including, \p I.
///
/// The caller is responsible to monitor the ToBeExploredPaths set as new
/// instructions discovered in other basic block will be placed in there.
///
/// \returns The next assumed noreturn instructions in the block of \p I
/// starting from, thus including, \p I.
const Instruction *findNextNoReturn(Attributor &A, const Instruction *I);
/// See AbstractAttribute::getAsStr().
const std::string getAsStr() const override {
return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
std::to_string(getAssociatedFunction()->size()) + "][#NRI " +
std::to_string(NoReturnCalls.size()) + "]";
}
/// See AbstractAttribute::manifest(...).
ChangeStatus manifest(Attributor &A) override {
assert(getState().isValidState() &&
"Attempted to manifest an invalid state!");
ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
Function &F = *getAssociatedFunction();
if (AssumedLiveBlocks.empty()) {
A.deleteAfterManifest(F);
return ChangeStatus::CHANGED;
}
// Flag to determine if we can change an invoke to a call assuming the
// callee is nounwind. This is not possible if the personality of the
// function allows to catch asynchronous exceptions.
bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
for (const Instruction *NRC : NoReturnCalls) {
Instruction *I = const_cast<Instruction *>(NRC);
BasicBlock *BB = I->getParent();
Instruction *SplitPos = I->getNextNode();
// TODO: mark stuff before unreachable instructions as dead.
if (auto *II = dyn_cast<InvokeInst>(I)) {
// If we keep the invoke the split position is at the beginning of the
// normal desitination block (it invokes a noreturn function after all).
BasicBlock *NormalDestBB = II->getNormalDest();
SplitPos = &NormalDestBB->front();
/// Invoke is replaced with a call and unreachable is placed after it if
/// the callee is nounwind and noreturn. Otherwise, we keep the invoke
/// and only place an unreachable in the normal successor.
if (Invoke2CallAllowed) {
if (II->getCalledFunction()) {
const IRPosition &IPos = IRPosition::callsite_function(*II);
const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
if (AANoUnw.isAssumedNoUnwind()) {
LLVM_DEBUG(dbgs()
<< "[AAIsDead] Replace invoke with call inst\n");
// We do not need an invoke (II) but instead want a call followed
// by an unreachable. However, we do not remove II as other
// abstract attributes might have it cached as part of their
// results. Given that we modify the CFG anyway, we simply keep II
// around but in a new dead block. To avoid II being live through
// a different edge we have to ensure the block we place it in is
// only reached from the current block of II and then not reached
// at all when we insert the unreachable.
SplitBlockPredecessors(NormalDestBB, {BB}, ".i2c");
CallInst *CI = createCallMatchingInvoke(II);
CI->insertBefore(II);
CI->takeName(II);
II->replaceAllUsesWith(CI);
SplitPos = CI->getNextNode();
}
}
}
if (SplitPos == &NormalDestBB->front()) {
// If this is an invoke of a noreturn function the edge to the normal
// destination block is dead but not necessarily the block itself.
// TODO: We need to move to an edge based system during deduction and
// also manifest.
assert(!NormalDestBB->isLandingPad() &&
"Expected the normal destination not to be a landingpad!");
if (NormalDestBB->getUniquePredecessor() == BB) {
assumeLive(A, *NormalDestBB);
} else {
BasicBlock *SplitBB =
SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
// The split block is live even if it contains only an unreachable
// instruction at the end.
assumeLive(A, *SplitBB);
SplitPos = SplitBB->getTerminator();
HasChanged = ChangeStatus::CHANGED;
}
}
}
if (isa_and_nonnull<UnreachableInst>(SplitPos))
continue;
BB = SplitPos->getParent();
SplitBlock(BB, SplitPos);
changeToUnreachable(BB->getTerminator(), /* UseLLVMTrap */ false);
HasChanged = ChangeStatus::CHANGED;
}
for (BasicBlock &BB : F)
if (!AssumedLiveBlocks.count(&BB))
A.deleteAfterManifest(BB);
return HasChanged;
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override;
/// See AAIsDead::isAssumedDead(BasicBlock *).
bool isAssumedDead(const BasicBlock *BB) const override {
assert(BB->getParent() == getAssociatedFunction() &&
"BB must be in the same anchor scope function.");
if (!getAssumed())
return false;
return !AssumedLiveBlocks.count(BB);
}
/// See AAIsDead::isKnownDead(BasicBlock *).
bool isKnownDead(const BasicBlock *BB) const override {
return getKnown() && isAssumedDead(BB);
}
/// See AAIsDead::isAssumed(Instruction *I).
bool isAssumedDead(const Instruction *I) const override {
assert(I->getParent()->getParent() == getAssociatedFunction() &&
"Instruction must be in the same anchor scope function.");
if (!getAssumed())
return false;
// If it is not in AssumedLiveBlocks then it for sure dead.
// Otherwise, it can still be after noreturn call in a live block.
if (!AssumedLiveBlocks.count(I->getParent()))
return true;
// If it is not after a noreturn call, than it is live.
return isAfterNoReturn(I);
}
/// See AAIsDead::isKnownDead(Instruction *I).
bool isKnownDead(const Instruction *I) const override {
return getKnown() && isAssumedDead(I);
}
/// Check if instruction is after noreturn call, in other words, assumed dead.
bool isAfterNoReturn(const Instruction *I) const;
/// Determine if \p F might catch asynchronous exceptions.
static bool mayCatchAsynchronousExceptions(const Function &F) {
return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
}
/// Assume \p BB is (partially) live now and indicate to the Attributor \p A
/// that internal function called from \p BB should now be looked at.
void assumeLive(Attributor &A, const BasicBlock &BB) {
if (!AssumedLiveBlocks.insert(&BB).second)
return;
// We assume that all of BB is (probably) live now and if there are calls to
// internal functions we will assume that those are now live as well. This
// is a performance optimization for blocks with calls to a lot of internal
// functions. It can however cause dead functions to be treated as live.
for (const Instruction &I : BB)
if (ImmutableCallSite ICS = ImmutableCallSite(&I))
if (const Function *F = ICS.getCalledFunction())
if (F->hasLocalLinkage())
A.markLiveInternalFunction(*F);
}
/// Collection of to be explored paths.
SmallSetVector<const Instruction *, 8> ToBeExploredPaths;
/// Collection of all assumed live BasicBlocks.
DenseSet<const BasicBlock *> AssumedLiveBlocks;
/// Collection of calls with noreturn attribute, assumed or knwon.
SmallSetVector<const Instruction *, 4> NoReturnCalls;
};
struct AAIsDeadFunction final : public AAIsDeadImpl {
AAIsDeadFunction(const IRPosition &IRP) : AAIsDeadImpl(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {
STATS_DECL(PartiallyDeadBlocks, Function,
"Number of basic blocks classified as partially dead");
BUILD_STAT_NAME(PartiallyDeadBlocks, Function) += NoReturnCalls.size();
}
};
bool AAIsDeadImpl::isAfterNoReturn(const Instruction *I) const {
const Instruction *PrevI = I->getPrevNode();
while (PrevI) {
if (NoReturnCalls.count(PrevI))
return true;
PrevI = PrevI->getPrevNode();
}
return false;
}
const Instruction *AAIsDeadImpl::findNextNoReturn(Attributor &A,
const Instruction *I) {
const BasicBlock *BB = I->getParent();
const Function &F = *BB->getParent();
// Flag to determine if we can change an invoke to a call assuming the callee
// is nounwind. This is not possible if the personality of the function allows
// to catch asynchronous exceptions.
bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
// TODO: We should have a function that determines if an "edge" is dead.
// Edges could be from an instruction to the next or from a terminator
// to the successor. For now, we need to special case the unwind block
// of InvokeInst below.
while (I) {
ImmutableCallSite ICS(I);
if (ICS) {
const IRPosition &IPos = IRPosition::callsite_function(ICS);
// Regarless of the no-return property of an invoke instruction we only
// learn that the regular successor is not reachable through this
// instruction but the unwind block might still be.
if (auto *Invoke = dyn_cast<InvokeInst>(I)) {
// Use nounwind to justify the unwind block is dead as well.
const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
if (!Invoke2CallAllowed || !AANoUnw.isAssumedNoUnwind()) {
assumeLive(A, *Invoke->getUnwindDest());
ToBeExploredPaths.insert(&Invoke->getUnwindDest()->front());
}
}
const auto &NoReturnAA = A.getAAFor<AANoReturn>(*this, IPos);
if (NoReturnAA.isAssumedNoReturn())
return I;
}
I = I->getNextNode();
}
// get new paths (reachable blocks).
for (const BasicBlock *SuccBB : successors(BB)) {
assumeLive(A, *SuccBB);
ToBeExploredPaths.insert(&SuccBB->front());
}
// No noreturn instruction found.
return nullptr;
}
ChangeStatus AAIsDeadImpl::updateImpl(Attributor &A) {
ChangeStatus Status = ChangeStatus::UNCHANGED;
// Temporary collection to iterate over existing noreturn instructions. This
// will alow easier modification of NoReturnCalls collection
SmallVector<const Instruction *, 8> NoReturnChanged;
for (const Instruction *I : NoReturnCalls)
NoReturnChanged.push_back(I);
for (const Instruction *I : NoReturnChanged) {
size_t Size = ToBeExploredPaths.size();
const Instruction *NextNoReturnI = findNextNoReturn(A, I);
if (NextNoReturnI != I) {
Status = ChangeStatus::CHANGED;
NoReturnCalls.remove(I);
if (NextNoReturnI)
NoReturnCalls.insert(NextNoReturnI);
}
// Explore new paths.
while (Size != ToBeExploredPaths.size()) {
Status = ChangeStatus::CHANGED;
if (const Instruction *NextNoReturnI =
findNextNoReturn(A, ToBeExploredPaths[Size++]))
NoReturnCalls.insert(NextNoReturnI);
}
}
LLVM_DEBUG(dbgs() << "[AAIsDead] AssumedLiveBlocks: "
<< AssumedLiveBlocks.size() << " Total number of blocks: "
<< getAssociatedFunction()->size() << "\n");
// If we know everything is live there is no need to query for liveness.
if (NoReturnCalls.empty() &&
getAssociatedFunction()->size() == AssumedLiveBlocks.size()) {
// Indicating a pessimistic fixpoint will cause the state to be "invalid"
// which will cause the Attributor to not return the AAIsDead on request,
// which will prevent us from querying isAssumedDead().
indicatePessimisticFixpoint();
assert(!isValidState() && "Expected an invalid state!");
Status = ChangeStatus::CHANGED;
}
return Status;
}
/// Liveness information for a call sites.
struct AAIsDeadCallSite final : AAIsDeadImpl {
AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadImpl(IRP) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
// TODO: Once we have call site specific value information we can provide
// call site specific liveness information and then it makes
// sense to specialize attributes for call sites instead of
// redirecting requests to the callee.
llvm_unreachable("Abstract attributes for liveness are not "
"supported for call sites yet!");
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
return indicatePessimisticFixpoint();
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {}
};
/// -------------------- Dereferenceable Argument Attribute --------------------
template <>
ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
const DerefState &R) {
ChangeStatus CS0 = clampStateAndIndicateChange<IntegerState>(
S.DerefBytesState, R.DerefBytesState);
ChangeStatus CS1 =
clampStateAndIndicateChange<IntegerState>(S.GlobalState, R.GlobalState);
return CS0 | CS1;
}
struct AADereferenceableImpl : AADereferenceable {
AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
using StateType = DerefState;
void initialize(Attributor &A) override {
SmallVector<Attribute, 4> Attrs;
getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
Attrs);
for (const Attribute &Attr : Attrs)
takeKnownDerefBytesMaximum(Attr.getValueAsInt());
NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition());
const IRPosition &IRP = this->getIRPosition();
bool IsFnInterface = IRP.isFnInterfaceKind();
const Function *FnScope = IRP.getAnchorScope();
if (IsFnInterface && (!FnScope || !FnScope->hasExactDefinition()))
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::getState()
/// {
StateType &getState() override { return *this; }
const StateType &getState() const override { return *this; }
/// }
/// See AAFromMustBeExecutedContext
bool followUse(Attributor &A, const Use *U, const Instruction *I) {
bool IsNonNull = false;
bool TrackUse = false;
int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
takeKnownDerefBytesMaximum(DerefBytes);
return TrackUse;
}
void getDeducedAttributes(LLVMContext &Ctx,
SmallVectorImpl<Attribute> &Attrs) const override {
// TODO: Add *_globally support
if (isAssumedNonNull())
Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
Ctx, getAssumedDereferenceableBytes()));
else
Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
Ctx, getAssumedDereferenceableBytes()));
}
/// See AbstractAttribute::getAsStr().
const std::string getAsStr() const override {
if (!getAssumedDereferenceableBytes())
return "unknown-dereferenceable";
return std::string("dereferenceable") +
(isAssumedNonNull() ? "" : "_or_null") +
(isAssumedGlobal() ? "_globally" : "") + "<" +
std::to_string(getKnownDereferenceableBytes()) + "-" +
std::to_string(getAssumedDereferenceableBytes()) + ">";
}
};
/// Dereferenceable attribute for a floating value.
struct AADereferenceableFloating
: AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
using Base =
AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
AADereferenceableFloating(const IRPosition &IRP) : Base(IRP) {}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
ChangeStatus Change = Base::updateImpl(A);
const DataLayout &DL = A.getDataLayout();
auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool {
unsigned IdxWidth =
DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
APInt Offset(IdxWidth, 0);
const Value *Base =
V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
const auto &AA =
A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
int64_t DerefBytes = 0;
if (!Stripped && this == &AA) {
// Use IR information if we did not strip anything.
// TODO: track globally.
bool CanBeNull;
DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
T.GlobalState.indicatePessimisticFixpoint();
} else {
const DerefState &DS = static_cast<const DerefState &>(AA.getState());
DerefBytes = DS.DerefBytesState.getAssumed();
T.GlobalState &= DS.GlobalState;
}
// For now we do not try to "increase" dereferenceability due to negative
// indices as we first have to come up with code to deal with loops and
// for overflows of the dereferenceable bytes.
int64_t OffsetSExt = Offset.getSExtValue();
if (OffsetSExt < 0)
OffsetSExt = 0;
T.takeAssumedDerefBytesMinimum(
std::max(int64_t(0), DerefBytes - OffsetSExt));
if (this == &AA) {
if (!Stripped) {
// If nothing was stripped IR information is all we got.
T.takeKnownDerefBytesMaximum(
std::max(int64_t(0), DerefBytes - OffsetSExt));
T.indicatePessimisticFixpoint();
} else if (OffsetSExt > 0) {
// If something was stripped but there is circular reasoning we look
// for the offset. If it is positive we basically decrease the
// dereferenceable bytes in a circluar loop now, which will simply
// drive them down to the known value in a very slow way which we
// can accelerate.
T.indicatePessimisticFixpoint();
}
}
return T.isValidState();
};
DerefState T;
if (!genericValueTraversal<AADereferenceable, DerefState>(
A, getIRPosition(), *this, T, VisitValueCB))
return indicatePessimisticFixpoint();
return Change | clampStateAndIndicateChange(getState(), T);
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {
STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
}
};
/// Dereferenceable attribute for a return value.
struct AADereferenceableReturned final
: AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
DerefState> {
AADereferenceableReturned(const IRPosition &IRP)
: AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
DerefState>(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {
STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
}
};
/// Dereferenceable attribute for an argument
struct AADereferenceableArgument final
: AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
AADereferenceable, AADereferenceableImpl, DerefState> {
using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
AADereferenceable, AADereferenceableImpl, DerefState>;
AADereferenceableArgument(const IRPosition &IRP) : Base(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {
STATS_DECLTRACK_ARG_ATTR(dereferenceable)
}
};
/// Dereferenceable attribute for a call site argument.
struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
AADereferenceableCallSiteArgument(const IRPosition &IRP)
: AADereferenceableFloating(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {
STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
}
};
/// Dereferenceable attribute deduction for a call site return value.
struct AADereferenceableCallSiteReturned final
: AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
AADereferenceable, AADereferenceableImpl> {
using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
AADereferenceable, AADereferenceableImpl>;
AADereferenceableCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
Base::initialize(A);
Function *F = getAssociatedFunction();
if (!F)
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
// TODO: Once we have call site specific value information we can provide
// call site specific liveness information and then it makes
// sense to specialize attributes for call sites arguments instead of
// redirecting requests to the callee argument.
ChangeStatus Change = Base::updateImpl(A);
Function *F = getAssociatedFunction();
const IRPosition &FnPos = IRPosition::returned(*F);
auto &FnAA = A.getAAFor<AADereferenceable>(*this, FnPos);
return Change |
clampStateAndIndicateChange(
getState(), static_cast<const DerefState &>(FnAA.getState()));
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {
STATS_DECLTRACK_CS_ATTR(dereferenceable);
}
};
// ------------------------ Align Argument Attribute ------------------------
struct AAAlignImpl : AAAlign {
AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
// Max alignemnt value allowed in IR
static const unsigned MAX_ALIGN = 1U << 29;
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
takeAssumedMinimum(MAX_ALIGN);
SmallVector<Attribute, 4> Attrs;
getAttrs({Attribute::Alignment}, Attrs);
for (const Attribute &Attr : Attrs)
takeKnownMaximum(Attr.getValueAsInt());
if (getIRPosition().isFnInterfaceKind() &&
(!getAssociatedFunction() ||
!getAssociatedFunction()->hasExactDefinition()))
indicatePessimisticFixpoint();
}
/// See AbstractAttribute::manifest(...).
ChangeStatus manifest(Attributor &A) override {
ChangeStatus Changed = ChangeStatus::UNCHANGED;
// Check for users that allow alignment annotations.
Value &AnchorVal = getIRPosition().getAnchorValue();
for (const Use &U : AnchorVal.uses()) {
if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
if (SI->getPointerOperand() == &AnchorVal)
if (SI->getAlignment() < getAssumedAlign()) {
STATS_DECLTRACK(AAAlign, Store,
"Number of times alignemnt added to a store");
SI->setAlignment(Align(getAssumedAlign()));
Changed = ChangeStatus::CHANGED;
}
} else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
if (LI->getPointerOperand() == &AnchorVal)
if (LI->getAlignment() < getAssumedAlign()) {
LI->setAlignment(Align(getAssumedAlign()));
STATS_DECLTRACK(AAAlign, Load,
"Number of times alignemnt added to a load");
Changed = ChangeStatus::CHANGED;
}
}
}
return AAAlign::manifest(A) | Changed;
}
// TODO: Provide a helper to determine the implied ABI alignment and check in
// the existing manifest method and a new one for AAAlignImpl that value
// to avoid making the alignment explicit if it did not improve.
/// See AbstractAttribute::getDeducedAttributes
virtual void
getDeducedAttributes(LLVMContext &Ctx,
SmallVectorImpl<Attribute> &Attrs) const override {