blob: 6baada2c1ae1ff1181ca2f8ce61d4693d07589bb [file] [log] [blame]
//===- SampleProfile.cpp - Incorporate sample profiles into the IR --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the SampleProfileLoader transformation. This pass
// reads a profile file generated by a sampling profiler (e.g. Linux Perf -
// http://perf.wiki.kernel.org/) and generates IR metadata to reflect the
// profile information in the given profile.
//
// This pass generates branch weight annotations on the IR:
//
// - prof: Represents branch weights. This annotation is added to branches
// to indicate the weights of each edge coming out of the branch.
// The weight of each edge is the weight of the target block for
// that edge. The weight of a block B is computed as the maximum
// number of samples found in B.
//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/SampleProfile.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ValueSymbolTable.h"
#include "llvm/Pass.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/ProfileData/SampleProfReader.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include <cctype>
using namespace llvm;
using namespace sampleprof;
#define DEBUG_TYPE "sample-profile"
// Command line option to specify the file to read samples from. This is
// mainly used for debugging.
static cl::opt<std::string> SampleProfileFile(
"sample-profile-file", cl::init(""), cl::value_desc("filename"),
cl::desc("Profile file loaded by -sample-profile"), cl::Hidden);
static cl::opt<unsigned> SampleProfileMaxPropagateIterations(
"sample-profile-max-propagate-iterations", cl::init(100),
cl::desc("Maximum number of iterations to go through when propagating "
"sample block/edge weights through the CFG."));
static cl::opt<unsigned> SampleProfileRecordCoverage(
"sample-profile-check-record-coverage", cl::init(0), cl::value_desc("N"),
cl::desc("Emit a warning if less than N% of records in the input profile "
"are matched to the IR."));
static cl::opt<unsigned> SampleProfileSampleCoverage(
"sample-profile-check-sample-coverage", cl::init(0), cl::value_desc("N"),
cl::desc("Emit a warning if less than N% of samples in the input profile "
"are matched to the IR."));
static cl::opt<double> SampleProfileHotThreshold(
"sample-profile-inline-hot-threshold", cl::init(0.1), cl::value_desc("N"),
cl::desc("Inlined functions that account for more than N% of all samples "
"collected in the parent function, will be inlined again."));
namespace {
typedef DenseMap<const BasicBlock *, uint64_t> BlockWeightMap;
typedef DenseMap<const BasicBlock *, const BasicBlock *> EquivalenceClassMap;
typedef std::pair<const BasicBlock *, const BasicBlock *> Edge;
typedef DenseMap<Edge, uint64_t> EdgeWeightMap;
typedef DenseMap<const BasicBlock *, SmallVector<const BasicBlock *, 8>>
BlockEdgeMap;
class SampleCoverageTracker {
public:
SampleCoverageTracker() : SampleCoverage(), TotalUsedSamples(0) {}
bool markSamplesUsed(const FunctionSamples *FS, uint32_t LineOffset,
uint32_t Discriminator, uint64_t Samples);
unsigned computeCoverage(unsigned Used, unsigned Total) const;
unsigned countUsedRecords(const FunctionSamples *FS) const;
unsigned countBodyRecords(const FunctionSamples *FS) const;
uint64_t getTotalUsedSamples() const { return TotalUsedSamples; }
uint64_t countBodySamples(const FunctionSamples *FS) const;
void clear() {
SampleCoverage.clear();
TotalUsedSamples = 0;
}
private:
typedef std::map<LineLocation, unsigned> BodySampleCoverageMap;
typedef DenseMap<const FunctionSamples *, BodySampleCoverageMap>
FunctionSamplesCoverageMap;
/// Coverage map for sampling records.
///
/// This map keeps a record of sampling records that have been matched to
/// an IR instruction. This is used to detect some form of staleness in
/// profiles (see flag -sample-profile-check-coverage).
///
/// Each entry in the map corresponds to a FunctionSamples instance. This is
/// another map that counts how many times the sample record at the
/// given location has been used.
FunctionSamplesCoverageMap SampleCoverage;
/// Number of samples used from the profile.
///
/// When a sampling record is used for the first time, the samples from
/// that record are added to this accumulator. Coverage is later computed
/// based on the total number of samples available in this function and
/// its callsites.
///
/// Note that this accumulator tracks samples used from a single function
/// and all the inlined callsites. Strictly, we should have a map of counters
/// keyed by FunctionSamples pointers, but these stats are cleared after
/// every function, so we just need to keep a single counter.
uint64_t TotalUsedSamples;
};
/// \brief Sample profile pass.
///
/// This pass reads profile data from the file specified by
/// -sample-profile-file and annotates every affected function with the
/// profile information found in that file.
class SampleProfileLoader {
public:
SampleProfileLoader(StringRef Name = SampleProfileFile)
: DT(nullptr), PDT(nullptr), LI(nullptr), ACT(nullptr), Reader(),
Samples(nullptr), Filename(Name), ProfileIsValid(false),
TotalCollectedSamples(0) {}
bool doInitialization(Module &M);
bool runOnModule(Module &M);
void setACT(AssumptionCacheTracker *A) { ACT = A; }
void dump() { Reader->dump(); }
protected:
bool runOnFunction(Function &F);
unsigned getFunctionLoc(Function &F);
bool emitAnnotations(Function &F);
ErrorOr<uint64_t> getInstWeight(const Instruction &I);
ErrorOr<uint64_t> getBlockWeight(const BasicBlock *BB);
const FunctionSamples *findCalleeFunctionSamples(const Instruction &I) const;
std::vector<const FunctionSamples *>
findIndirectCallFunctionSamples(const Instruction &I) const;
const FunctionSamples *findFunctionSamples(const Instruction &I) const;
bool inlineHotFunctions(Function &F,
DenseSet<GlobalValue::GUID> &ImportGUIDs);
void printEdgeWeight(raw_ostream &OS, Edge E);
void printBlockWeight(raw_ostream &OS, const BasicBlock *BB) const;
void printBlockEquivalence(raw_ostream &OS, const BasicBlock *BB);
bool computeBlockWeights(Function &F);
void findEquivalenceClasses(Function &F);
template <bool IsPostDom>
void findEquivalencesFor(BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants,
DominatorTreeBase<BasicBlock, IsPostDom> *DomTree);
void propagateWeights(Function &F);
uint64_t visitEdge(Edge E, unsigned *NumUnknownEdges, Edge *UnknownEdge);
void buildEdges(Function &F);
bool propagateThroughEdges(Function &F, bool UpdateBlockCount);
void computeDominanceAndLoopInfo(Function &F);
unsigned getOffset(const DILocation *DIL) const;
void clearFunctionData();
/// \brief Map basic blocks to their computed weights.
///
/// The weight of a basic block is defined to be the maximum
/// of all the instruction weights in that block.
BlockWeightMap BlockWeights;
/// \brief Map edges to their computed weights.
///
/// Edge weights are computed by propagating basic block weights in
/// SampleProfile::propagateWeights.
EdgeWeightMap EdgeWeights;
/// \brief Set of visited blocks during propagation.
SmallPtrSet<const BasicBlock *, 32> VisitedBlocks;
/// \brief Set of visited edges during propagation.
SmallSet<Edge, 32> VisitedEdges;
/// \brief Equivalence classes for block weights.
///
/// Two blocks BB1 and BB2 are in the same equivalence class if they
/// dominate and post-dominate each other, and they are in the same loop
/// nest. When this happens, the two blocks are guaranteed to execute
/// the same number of times.
EquivalenceClassMap EquivalenceClass;
/// Map from function name to Function *. Used to find the function from
/// the function name. If the function name contains suffix, additional
/// entry is added to map from the stripped name to the function if there
/// is one-to-one mapping.
StringMap<Function *> SymbolMap;
/// \brief Dominance, post-dominance and loop information.
std::unique_ptr<DominatorTree> DT;
std::unique_ptr<PostDomTreeBase<BasicBlock>> PDT;
std::unique_ptr<LoopInfo> LI;
AssumptionCacheTracker *ACT;
/// \brief Predecessors for each basic block in the CFG.
BlockEdgeMap Predecessors;
/// \brief Successors for each basic block in the CFG.
BlockEdgeMap Successors;
SampleCoverageTracker CoverageTracker;
/// \brief Profile reader object.
std::unique_ptr<SampleProfileReader> Reader;
/// \brief Samples collected for the body of this function.
FunctionSamples *Samples;
/// \brief Name of the profile file to load.
std::string Filename;
/// \brief Flag indicating whether the profile input loaded successfully.
bool ProfileIsValid;
/// \brief Total number of samples collected in this profile.
///
/// This is the sum of all the samples collected in all the functions executed
/// at runtime.
uint64_t TotalCollectedSamples;
};
class SampleProfileLoaderLegacyPass : public ModulePass {
public:
// Class identification, replacement for typeinfo
static char ID;
SampleProfileLoaderLegacyPass(StringRef Name = SampleProfileFile)
: ModulePass(ID), SampleLoader(Name) {
initializeSampleProfileLoaderLegacyPassPass(
*PassRegistry::getPassRegistry());
}
void dump() { SampleLoader.dump(); }
bool doInitialization(Module &M) override {
return SampleLoader.doInitialization(M);
}
StringRef getPassName() const override { return "Sample profile pass"; }
bool runOnModule(Module &M) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AssumptionCacheTracker>();
}
private:
SampleProfileLoader SampleLoader;
};
/// Return true if the given callsite is hot wrt to its caller.
///
/// Functions that were inlined in the original binary will be represented
/// in the inline stack in the sample profile. If the profile shows that
/// the original inline decision was "good" (i.e., the callsite is executed
/// frequently), then we will recreate the inline decision and apply the
/// profile from the inlined callsite.
///
/// To decide whether an inlined callsite is hot, we compute the fraction
/// of samples used by the callsite with respect to the total number of samples
/// collected in the caller.
///
/// If that fraction is larger than the default given by
/// SampleProfileHotThreshold, the callsite will be inlined again.
bool callsiteIsHot(const FunctionSamples *CallerFS,
const FunctionSamples *CallsiteFS) {
if (!CallsiteFS)
return false; // The callsite was not inlined in the original binary.
uint64_t ParentTotalSamples = CallerFS->getTotalSamples();
if (ParentTotalSamples == 0)
return false; // Avoid division by zero.
uint64_t CallsiteTotalSamples = CallsiteFS->getTotalSamples();
if (CallsiteTotalSamples == 0)
return false; // Callsite is trivially cold.
double PercentSamples =
(double)CallsiteTotalSamples / (double)ParentTotalSamples * 100.0;
return PercentSamples >= SampleProfileHotThreshold;
}
}
/// Mark as used the sample record for the given function samples at
/// (LineOffset, Discriminator).
///
/// \returns true if this is the first time we mark the given record.
bool SampleCoverageTracker::markSamplesUsed(const FunctionSamples *FS,
uint32_t LineOffset,
uint32_t Discriminator,
uint64_t Samples) {
LineLocation Loc(LineOffset, Discriminator);
unsigned &Count = SampleCoverage[FS][Loc];
bool FirstTime = (++Count == 1);
if (FirstTime)
TotalUsedSamples += Samples;
return FirstTime;
}
/// Return the number of sample records that were applied from this profile.
///
/// This count does not include records from cold inlined callsites.
unsigned
SampleCoverageTracker::countUsedRecords(const FunctionSamples *FS) const {
auto I = SampleCoverage.find(FS);
// The size of the coverage map for FS represents the number of records
// that were marked used at least once.
unsigned Count = (I != SampleCoverage.end()) ? I->second.size() : 0;
// If there are inlined callsites in this function, count the samples found
// in the respective bodies. However, do not bother counting callees with 0
// total samples, these are callees that were never invoked at runtime.
for (const auto &I : FS->getCallsiteSamples())
for (const auto &J : I.second) {
const FunctionSamples *CalleeSamples = &J.second;
if (callsiteIsHot(FS, CalleeSamples))
Count += countUsedRecords(CalleeSamples);
}
return Count;
}
/// Return the number of sample records in the body of this profile.
///
/// This count does not include records from cold inlined callsites.
unsigned
SampleCoverageTracker::countBodyRecords(const FunctionSamples *FS) const {
unsigned Count = FS->getBodySamples().size();
// Only count records in hot callsites.
for (const auto &I : FS->getCallsiteSamples())
for (const auto &J : I.second) {
const FunctionSamples *CalleeSamples = &J.second;
if (callsiteIsHot(FS, CalleeSamples))
Count += countBodyRecords(CalleeSamples);
}
return Count;
}
/// Return the number of samples collected in the body of this profile.
///
/// This count does not include samples from cold inlined callsites.
uint64_t
SampleCoverageTracker::countBodySamples(const FunctionSamples *FS) const {
uint64_t Total = 0;
for (const auto &I : FS->getBodySamples())
Total += I.second.getSamples();
// Only count samples in hot callsites.
for (const auto &I : FS->getCallsiteSamples())
for (const auto &J : I.second) {
const FunctionSamples *CalleeSamples = &J.second;
if (callsiteIsHot(FS, CalleeSamples))
Total += countBodySamples(CalleeSamples);
}
return Total;
}
/// Return the fraction of sample records used in this profile.
///
/// The returned value is an unsigned integer in the range 0-100 indicating
/// the percentage of sample records that were used while applying this
/// profile to the associated function.
unsigned SampleCoverageTracker::computeCoverage(unsigned Used,
unsigned Total) const {
assert(Used <= Total &&
"number of used records cannot exceed the total number of records");
return Total > 0 ? Used * 100 / Total : 100;
}
/// Clear all the per-function data used to load samples and propagate weights.
void SampleProfileLoader::clearFunctionData() {
BlockWeights.clear();
EdgeWeights.clear();
VisitedBlocks.clear();
VisitedEdges.clear();
EquivalenceClass.clear();
DT = nullptr;
PDT = nullptr;
LI = nullptr;
Predecessors.clear();
Successors.clear();
CoverageTracker.clear();
}
/// Returns the line offset to the start line of the subprogram.
/// We assume that a single function will not exceed 65535 LOC.
unsigned SampleProfileLoader::getOffset(const DILocation *DIL) const {
return (DIL->getLine() - DIL->getScope()->getSubprogram()->getLine()) &
0xffff;
}
/// \brief Print the weight of edge \p E on stream \p OS.
///
/// \param OS Stream to emit the output to.
/// \param E Edge to print.
void SampleProfileLoader::printEdgeWeight(raw_ostream &OS, Edge E) {
OS << "weight[" << E.first->getName() << "->" << E.second->getName()
<< "]: " << EdgeWeights[E] << "\n";
}
/// \brief Print the equivalence class of block \p BB on stream \p OS.
///
/// \param OS Stream to emit the output to.
/// \param BB Block to print.
void SampleProfileLoader::printBlockEquivalence(raw_ostream &OS,
const BasicBlock *BB) {
const BasicBlock *Equiv = EquivalenceClass[BB];
OS << "equivalence[" << BB->getName()
<< "]: " << ((Equiv) ? EquivalenceClass[BB]->getName() : "NONE") << "\n";
}
/// \brief Print the weight of block \p BB on stream \p OS.
///
/// \param OS Stream to emit the output to.
/// \param BB Block to print.
void SampleProfileLoader::printBlockWeight(raw_ostream &OS,
const BasicBlock *BB) const {
const auto &I = BlockWeights.find(BB);
uint64_t W = (I == BlockWeights.end() ? 0 : I->second);
OS << "weight[" << BB->getName() << "]: " << W << "\n";
}
/// \brief Get the weight for an instruction.
///
/// The "weight" of an instruction \p Inst is the number of samples
/// collected on that instruction at runtime. To retrieve it, we
/// need to compute the line number of \p Inst relative to the start of its
/// function. We use HeaderLineno to compute the offset. We then
/// look up the samples collected for \p Inst using BodySamples.
///
/// \param Inst Instruction to query.
///
/// \returns the weight of \p Inst.
ErrorOr<uint64_t> SampleProfileLoader::getInstWeight(const Instruction &Inst) {
const DebugLoc &DLoc = Inst.getDebugLoc();
if (!DLoc)
return std::error_code();
const FunctionSamples *FS = findFunctionSamples(Inst);
if (!FS)
return std::error_code();
// Ignore all intrinsics and branch instructions.
// Branch instruction usually contains debug info from sources outside of
// the residing basic block, thus we ignore them during annotation.
if (isa<BranchInst>(Inst) || isa<IntrinsicInst>(Inst))
return std::error_code();
// If a call/invoke instruction is inlined in profile, but not inlined here,
// it means that the inlined callsite has no sample, thus the call
// instruction should have 0 count.
if ((isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) &&
findCalleeFunctionSamples(Inst))
return 0;
const DILocation *DIL = DLoc;
uint32_t LineOffset = getOffset(DIL);
uint32_t Discriminator = DIL->getBaseDiscriminator();
ErrorOr<uint64_t> R = FS->findSamplesAt(LineOffset, Discriminator);
if (R) {
bool FirstMark =
CoverageTracker.markSamplesUsed(FS, LineOffset, Discriminator, R.get());
if (FirstMark) {
const Function *F = Inst.getParent()->getParent();
LLVMContext &Ctx = F->getContext();
emitOptimizationRemark(
Ctx, DEBUG_TYPE, *F, DLoc,
Twine("Applied ") + Twine(*R) +
" samples from profile (offset: " + Twine(LineOffset) +
((Discriminator) ? Twine(".") + Twine(Discriminator) : "") + ")");
}
DEBUG(dbgs() << " " << DLoc.getLine() << "."
<< DIL->getBaseDiscriminator() << ":" << Inst
<< " (line offset: " << LineOffset << "."
<< DIL->getBaseDiscriminator() << " - weight: " << R.get()
<< ")\n");
}
return R;
}
/// \brief Compute the weight of a basic block.
///
/// The weight of basic block \p BB is the maximum weight of all the
/// instructions in BB.
///
/// \param BB The basic block to query.
///
/// \returns the weight for \p BB.
ErrorOr<uint64_t> SampleProfileLoader::getBlockWeight(const BasicBlock *BB) {
uint64_t Max = 0;
bool HasWeight = false;
for (auto &I : BB->getInstList()) {
const ErrorOr<uint64_t> &R = getInstWeight(I);
if (R) {
Max = std::max(Max, R.get());
HasWeight = true;
}
}
return HasWeight ? ErrorOr<uint64_t>(Max) : std::error_code();
}
/// \brief Compute and store the weights of every basic block.
///
/// This populates the BlockWeights map by computing
/// the weights of every basic block in the CFG.
///
/// \param F The function to query.
bool SampleProfileLoader::computeBlockWeights(Function &F) {
bool Changed = false;
DEBUG(dbgs() << "Block weights\n");
for (const auto &BB : F) {
ErrorOr<uint64_t> Weight = getBlockWeight(&BB);
if (Weight) {
BlockWeights[&BB] = Weight.get();
VisitedBlocks.insert(&BB);
Changed = true;
}
DEBUG(printBlockWeight(dbgs(), &BB));
}
return Changed;
}
/// \brief Get the FunctionSamples for a call instruction.
///
/// The FunctionSamples of a call/invoke instruction \p Inst is the inlined
/// instance in which that call instruction is calling to. It contains
/// all samples that resides in the inlined instance. We first find the
/// inlined instance in which the call instruction is from, then we
/// traverse its children to find the callsite with the matching
/// location.
///
/// \param Inst Call/Invoke instruction to query.
///
/// \returns The FunctionSamples pointer to the inlined instance.
const FunctionSamples *
SampleProfileLoader::findCalleeFunctionSamples(const Instruction &Inst) const {
const DILocation *DIL = Inst.getDebugLoc();
if (!DIL) {
return nullptr;
}
StringRef CalleeName;
if (const CallInst *CI = dyn_cast<CallInst>(&Inst))
if (Function *Callee = CI->getCalledFunction())
CalleeName = Callee->getName();
const FunctionSamples *FS = findFunctionSamples(Inst);
if (FS == nullptr)
return nullptr;
return FS->findFunctionSamplesAt(
LineLocation(getOffset(DIL), DIL->getBaseDiscriminator()), CalleeName);
}
/// Returns a vector of FunctionSamples that are the indirect call targets
/// of \p Inst. The vector is sorted by the total number of samples.
std::vector<const FunctionSamples *>
SampleProfileLoader::findIndirectCallFunctionSamples(
const Instruction &Inst) const {
const DILocation *DIL = Inst.getDebugLoc();
std::vector<const FunctionSamples *> R;
if (!DIL) {
return R;
}
const FunctionSamples *FS = findFunctionSamples(Inst);
if (FS == nullptr)
return R;
if (const FunctionSamplesMap *M = FS->findFunctionSamplesMapAt(
LineLocation(getOffset(DIL), DIL->getBaseDiscriminator()))) {
if (M->size() == 0)
return R;
for (const auto &NameFS : *M) {
R.push_back(&NameFS.second);
}
std::sort(R.begin(), R.end(),
[](const FunctionSamples *L, const FunctionSamples *R) {
return L->getTotalSamples() > R->getTotalSamples();
});
}
return R;
}
/// \brief Get the FunctionSamples for an instruction.
///
/// The FunctionSamples of an instruction \p Inst is the inlined instance
/// in which that instruction is coming from. We traverse the inline stack
/// of that instruction, and match it with the tree nodes in the profile.
///
/// \param Inst Instruction to query.
///
/// \returns the FunctionSamples pointer to the inlined instance.
const FunctionSamples *
SampleProfileLoader::findFunctionSamples(const Instruction &Inst) const {
SmallVector<std::pair<LineLocation, StringRef>, 10> S;
const DILocation *DIL = Inst.getDebugLoc();
if (!DIL)
return Samples;
const DILocation *PrevDIL = DIL;
for (DIL = DIL->getInlinedAt(); DIL; DIL = DIL->getInlinedAt()) {
S.push_back(std::make_pair(
LineLocation(getOffset(DIL), DIL->getBaseDiscriminator()),
PrevDIL->getScope()->getSubprogram()->getLinkageName()));
PrevDIL = DIL;
}
if (S.size() == 0)
return Samples;
const FunctionSamples *FS = Samples;
for (int i = S.size() - 1; i >= 0 && FS != nullptr; i--) {
FS = FS->findFunctionSamplesAt(S[i].first, S[i].second);
}
return FS;
}
/// \brief Iteratively inline hot callsites of a function.
///
/// Iteratively traverse all callsites of the function \p F, and find if
/// the corresponding inlined instance exists and is hot in profile. If
/// it is hot enough, inline the callsites and adds new callsites of the
/// callee into the caller. If the call is an indirect call, first promote
/// it to direct call. Each indirect call is limited with a single target.
///
/// \param F function to perform iterative inlining.
/// \param ImportGUIDs a set to be updated to include all GUIDs that come
/// from a different module but inlined in the profiled binary.
///
/// \returns True if there is any inline happened.
bool SampleProfileLoader::inlineHotFunctions(
Function &F, DenseSet<GlobalValue::GUID> &ImportGUIDs) {
DenseSet<Instruction *> PromotedInsns;
bool Changed = false;
LLVMContext &Ctx = F.getContext();
std::function<AssumptionCache &(Function &)> GetAssumptionCache = [&](
Function &F) -> AssumptionCache & { return ACT->getAssumptionCache(F); };
while (true) {
bool LocalChanged = false;
SmallVector<Instruction *, 10> CIS;
for (auto &BB : F) {
bool Hot = false;
SmallVector<Instruction *, 10> Candidates;
for (auto &I : BB.getInstList()) {
const FunctionSamples *FS = nullptr;
if ((isa<CallInst>(I) || isa<InvokeInst>(I)) &&
!isa<IntrinsicInst>(I) && (FS = findCalleeFunctionSamples(I))) {
Candidates.push_back(&I);
if (callsiteIsHot(Samples, FS))
Hot = true;
}
}
if (Hot) {
CIS.insert(CIS.begin(), Candidates.begin(), Candidates.end());
}
}
for (auto I : CIS) {
InlineFunctionInfo IFI(nullptr, ACT ? &GetAssumptionCache : nullptr);
Function *CalledFunction = CallSite(I).getCalledFunction();
// Do not inline recursive calls.
if (CalledFunction == &F)
continue;
Instruction *DI = I;
if (!CalledFunction && !PromotedInsns.count(I) &&
CallSite(I).isIndirectCall())
for (const auto *FS : findIndirectCallFunctionSamples(*I)) {
auto CalleeFunctionName = FS->getName();
// If it is a recursive call, we do not inline it as it could bloat
// the code exponentially. There is way to better handle this, e.g.
// clone the caller first, and inline the cloned caller if it is
// recursive. As llvm does not inline recursive calls, we will simply
// ignore it instead of handling it explicitly.
if (CalleeFunctionName == F.getName())
continue;
const char *Reason = "Callee function not available";
auto R = SymbolMap.find(CalleeFunctionName);
if (R == SymbolMap.end())
continue;
CalledFunction = R->getValue();
if (CalledFunction && isLegalToPromote(I, CalledFunction, &Reason)) {
// The indirect target was promoted and inlined in the profile, as a
// result, we do not have profile info for the branch probability.
// We set the probability to 80% taken to indicate that the static
// call is likely taken.
DI = dyn_cast<Instruction>(
promoteIndirectCall(I, CalledFunction, 80, 100, false)
->stripPointerCasts());
PromotedInsns.insert(I);
} else {
DEBUG(dbgs() << "\nFailed to promote indirect call to "
<< CalleeFunctionName << " because " << Reason
<< "\n");
continue;
}
}
if (!CalledFunction || !CalledFunction->getSubprogram()) {
findCalleeFunctionSamples(*I)->findImportedFunctions(
ImportGUIDs, F.getParent(),
Samples->getTotalSamples() * SampleProfileHotThreshold / 100);
continue;
}
DebugLoc DLoc = I->getDebugLoc();
if (InlineFunction(CallSite(DI), IFI)) {
LocalChanged = true;
emitOptimizationRemark(Ctx, DEBUG_TYPE, F, DLoc,
Twine("inlined hot callee '") +
CalledFunction->getName() + "' into '" +
F.getName() + "'");
}
}
if (LocalChanged) {
Changed = true;
} else {
break;
}
}
return Changed;
}
/// \brief Find equivalence classes for the given block.
///
/// This finds all the blocks that are guaranteed to execute the same
/// number of times as \p BB1. To do this, it traverses all the
/// descendants of \p BB1 in the dominator or post-dominator tree.
///
/// A block BB2 will be in the same equivalence class as \p BB1 if
/// the following holds:
///
/// 1- \p BB1 is a descendant of BB2 in the opposite tree. So, if BB2
/// is a descendant of \p BB1 in the dominator tree, then BB2 should
/// dominate BB1 in the post-dominator tree.
///
/// 2- Both BB2 and \p BB1 must be in the same loop.
///
/// For every block BB2 that meets those two requirements, we set BB2's
/// equivalence class to \p BB1.
///
/// \param BB1 Block to check.
/// \param Descendants Descendants of \p BB1 in either the dom or pdom tree.
/// \param DomTree Opposite dominator tree. If \p Descendants is filled
/// with blocks from \p BB1's dominator tree, then
/// this is the post-dominator tree, and vice versa.
template <bool IsPostDom>
void SampleProfileLoader::findEquivalencesFor(
BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants,
DominatorTreeBase<BasicBlock, IsPostDom> *DomTree) {
const BasicBlock *EC = EquivalenceClass[BB1];
uint64_t Weight = BlockWeights[EC];
for (const auto *BB2 : Descendants) {
bool IsDomParent = DomTree->dominates(BB2, BB1);
bool IsInSameLoop = LI->getLoopFor(BB1) == LI->getLoopFor(BB2);
if (BB1 != BB2 && IsDomParent && IsInSameLoop) {
EquivalenceClass[BB2] = EC;
// If BB2 is visited, then the entire EC should be marked as visited.
if (VisitedBlocks.count(BB2)) {
VisitedBlocks.insert(EC);
}
// If BB2 is heavier than BB1, make BB2 have the same weight
// as BB1.
//
// Note that we don't worry about the opposite situation here
// (when BB2 is lighter than BB1). We will deal with this
// during the propagation phase. Right now, we just want to
// make sure that BB1 has the largest weight of all the
// members of its equivalence set.
Weight = std::max(Weight, BlockWeights[BB2]);
}
}
if (EC == &EC->getParent()->getEntryBlock()) {
BlockWeights[EC] = Samples->getHeadSamples() + 1;
} else {
BlockWeights[EC] = Weight;
}
}
/// \brief Find equivalence classes.
///
/// Since samples may be missing from blocks, we can fill in the gaps by setting
/// the weights of all the blocks in the same equivalence class to the same
/// weight. To compute the concept of equivalence, we use dominance and loop
/// information. Two blocks B1 and B2 are in the same equivalence class if B1
/// dominates B2, B2 post-dominates B1 and both are in the same loop.
///
/// \param F The function to query.
void SampleProfileLoader::findEquivalenceClasses(Function &F) {
SmallVector<BasicBlock *, 8> DominatedBBs;
DEBUG(dbgs() << "\nBlock equivalence classes\n");
// Find equivalence sets based on dominance and post-dominance information.
for (auto &BB : F) {
BasicBlock *BB1 = &BB;
// Compute BB1's equivalence class once.
if (EquivalenceClass.count(BB1)) {
DEBUG(printBlockEquivalence(dbgs(), BB1));
continue;
}
// By default, blocks are in their own equivalence class.
EquivalenceClass[BB1] = BB1;
// Traverse all the blocks dominated by BB1. We are looking for
// every basic block BB2 such that:
//
// 1- BB1 dominates BB2.
// 2- BB2 post-dominates BB1.
// 3- BB1 and BB2 are in the same loop nest.
//
// If all those conditions hold, it means that BB2 is executed
// as many times as BB1, so they are placed in the same equivalence
// class by making BB2's equivalence class be BB1.
DominatedBBs.clear();
DT->getDescendants(BB1, DominatedBBs);
findEquivalencesFor(BB1, DominatedBBs, PDT.get());
DEBUG(printBlockEquivalence(dbgs(), BB1));
}
// Assign weights to equivalence classes.
//
// All the basic blocks in the same equivalence class will execute
// the same number of times. Since we know that the head block in
// each equivalence class has the largest weight, assign that weight
// to all the blocks in that equivalence class.
DEBUG(dbgs() << "\nAssign the same weight to all blocks in the same class\n");
for (auto &BI : F) {
const BasicBlock *BB = &BI;
const BasicBlock *EquivBB = EquivalenceClass[BB];
if (BB != EquivBB)
BlockWeights[BB] = BlockWeights[EquivBB];
DEBUG(printBlockWeight(dbgs(), BB));
}
}
/// \brief Visit the given edge to decide if it has a valid weight.
///
/// If \p E has not been visited before, we copy to \p UnknownEdge
/// and increment the count of unknown edges.
///
/// \param E Edge to visit.
/// \param NumUnknownEdges Current number of unknown edges.
/// \param UnknownEdge Set if E has not been visited before.
///
/// \returns E's weight, if known. Otherwise, return 0.
uint64_t SampleProfileLoader::visitEdge(Edge E, unsigned *NumUnknownEdges,
Edge *UnknownEdge) {
if (!VisitedEdges.count(E)) {
(*NumUnknownEdges)++;
*UnknownEdge = E;
return 0;
}
return EdgeWeights[E];
}
/// \brief Propagate weights through incoming/outgoing edges.
///
/// If the weight of a basic block is known, and there is only one edge
/// with an unknown weight, we can calculate the weight of that edge.
///
/// Similarly, if all the edges have a known count, we can calculate the
/// count of the basic block, if needed.
///
/// \param F Function to process.
/// \param UpdateBlockCount Whether we should update basic block counts that
/// has already been annotated.
///
/// \returns True if new weights were assigned to edges or blocks.
bool SampleProfileLoader::propagateThroughEdges(Function &F,
bool UpdateBlockCount) {
bool Changed = false;
DEBUG(dbgs() << "\nPropagation through edges\n");
for (const auto &BI : F) {
const BasicBlock *BB = &BI;
const BasicBlock *EC = EquivalenceClass[BB];
// Visit all the predecessor and successor edges to determine
// which ones have a weight assigned already. Note that it doesn't
// matter that we only keep track of a single unknown edge. The
// only case we are interested in handling is when only a single
// edge is unknown (see setEdgeOrBlockWeight).
for (unsigned i = 0; i < 2; i++) {
uint64_t TotalWeight = 0;
unsigned NumUnknownEdges = 0, NumTotalEdges = 0;
Edge UnknownEdge, SelfReferentialEdge, SingleEdge;
if (i == 0) {
// First, visit all predecessor edges.
NumTotalEdges = Predecessors[BB].size();
for (auto *Pred : Predecessors[BB]) {
Edge E = std::make_pair(Pred, BB);
TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge);
if (E.first == E.second)
SelfReferentialEdge = E;
}
if (NumTotalEdges == 1) {
SingleEdge = std::make_pair(Predecessors[BB][0], BB);
}
} else {
// On the second round, visit all successor edges.
NumTotalEdges = Successors[BB].size();
for (auto *Succ : Successors[BB]) {
Edge E = std::make_pair(BB, Succ);
TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge);
}
if (NumTotalEdges == 1) {
SingleEdge = std::make_pair(BB, Successors[BB][0]);
}
}
// After visiting all the edges, there are three cases that we
// can handle immediately:
//
// - All the edge weights are known (i.e., NumUnknownEdges == 0).
// In this case, we simply check that the sum of all the edges
// is the same as BB's weight. If not, we change BB's weight
// to match. Additionally, if BB had not been visited before,
// we mark it visited.
//
// - Only one edge is unknown and BB has already been visited.
// In this case, we can compute the weight of the edge by
// subtracting the total block weight from all the known
// edge weights. If the edges weight more than BB, then the
// edge of the last remaining edge is set to zero.
//
// - There exists a self-referential edge and the weight of BB is
// known. In this case, this edge can be based on BB's weight.
// We add up all the other known edges and set the weight on
// the self-referential edge as we did in the previous case.
//
// In any other case, we must continue iterating. Eventually,
// all edges will get a weight, or iteration will stop when
// it reaches SampleProfileMaxPropagateIterations.
if (NumUnknownEdges <= 1) {
uint64_t &BBWeight = BlockWeights[EC];
if (NumUnknownEdges == 0) {
if (!VisitedBlocks.count(EC)) {
// If we already know the weight of all edges, the weight of the
// basic block can be computed. It should be no larger than the sum
// of all edge weights.
if (TotalWeight > BBWeight) {
BBWeight = TotalWeight;
Changed = true;
DEBUG(dbgs() << "All edge weights for " << BB->getName()
<< " known. Set weight for block: ";
printBlockWeight(dbgs(), BB););
}
} else if (NumTotalEdges == 1 &&
EdgeWeights[SingleEdge] < BlockWeights[EC]) {
// If there is only one edge for the visited basic block, use the
// block weight to adjust edge weight if edge weight is smaller.
EdgeWeights[SingleEdge] = BlockWeights[EC];
Changed = true;
}
} else if (NumUnknownEdges == 1 && VisitedBlocks.count(EC)) {
// If there is a single unknown edge and the block has been
// visited, then we can compute E's weight.
if (BBWeight >= TotalWeight)
EdgeWeights[UnknownEdge] = BBWeight - TotalWeight;
else
EdgeWeights[UnknownEdge] = 0;
const BasicBlock *OtherEC;
if (i == 0)
OtherEC = EquivalenceClass[UnknownEdge.first];
else
OtherEC = EquivalenceClass[UnknownEdge.second];
// Edge weights should never exceed the BB weights it connects.
if (VisitedBlocks.count(OtherEC) &&
EdgeWeights[UnknownEdge] > BlockWeights[OtherEC])
EdgeWeights[UnknownEdge] = BlockWeights[OtherEC];
VisitedEdges.insert(UnknownEdge);
Changed = true;
DEBUG(dbgs() << "Set weight for edge: ";
printEdgeWeight(dbgs(), UnknownEdge));
}
} else if (VisitedBlocks.count(EC) && BlockWeights[EC] == 0) {
// If a block Weights 0, all its in/out edges should weight 0.
if (i == 0) {
for (auto *Pred : Predecessors[BB]) {
Edge E = std::make_pair(Pred, BB);
EdgeWeights[E] = 0;
VisitedEdges.insert(E);
}
} else {
for (auto *Succ : Successors[BB]) {
Edge E = std::make_pair(BB, Succ);
EdgeWeights[E] = 0;
VisitedEdges.insert(E);
}
}
} else if (SelfReferentialEdge.first && VisitedBlocks.count(EC)) {
uint64_t &BBWeight = BlockWeights[BB];
// We have a self-referential edge and the weight of BB is known.
if (BBWeight >= TotalWeight)
EdgeWeights[SelfReferentialEdge] = BBWeight - TotalWeight;
else
EdgeWeights[SelfReferentialEdge] = 0;
VisitedEdges.insert(SelfReferentialEdge);
Changed = true;
DEBUG(dbgs() << "Set self-referential edge weight to: ";
printEdgeWeight(dbgs(), SelfReferentialEdge));
}
if (UpdateBlockCount && !VisitedBlocks.count(EC) && TotalWeight > 0) {
BlockWeights[EC] = TotalWeight;
VisitedBlocks.insert(EC);
Changed = true;
}
}
}
return Changed;
}
/// \brief Build in/out edge lists for each basic block in the CFG.
///
/// We are interested in unique edges. If a block B1 has multiple
/// edges to another block B2, we only add a single B1->B2 edge.
void SampleProfileLoader::buildEdges(Function &F) {
for (auto &BI : F) {
BasicBlock *B1 = &BI;
// Add predecessors for B1.
SmallPtrSet<BasicBlock *, 16> Visited;
if (!Predecessors[B1].empty())
llvm_unreachable("Found a stale predecessors list in a basic block.");
for (pred_iterator PI = pred_begin(B1), PE = pred_end(B1); PI != PE; ++PI) {
BasicBlock *B2 = *PI;
if (Visited.insert(B2).second)
Predecessors[B1].push_back(B2);
}
// Add successors for B1.
Visited.clear();
if (!Successors[B1].empty())
llvm_unreachable("Found a stale successors list in a basic block.");
for (succ_iterator SI = succ_begin(B1), SE = succ_end(B1); SI != SE; ++SI) {
BasicBlock *B2 = *SI;
if (Visited.insert(B2).second)
Successors[B1].push_back(B2);
}
}
}
/// Sorts the CallTargetMap \p M by count in descending order and stores the
/// sorted result in \p Sorted. Returns the total counts.
static uint64_t SortCallTargets(SmallVector<InstrProfValueData, 2> &Sorted,
const SampleRecord::CallTargetMap &M) {
Sorted.clear();
uint64_t Sum = 0;
for (auto I = M.begin(); I != M.end(); ++I) {
Sum += I->getValue();
Sorted.push_back({Function::getGUID(I->getKey()), I->getValue()});
}
std::sort(Sorted.begin(), Sorted.end(),
[](const InstrProfValueData &L, const InstrProfValueData &R) {
if (L.Count == R.Count)
return L.Value > R.Value;
else
return L.Count > R.Count;
});
return Sum;
}
/// \brief Propagate weights into edges
///
/// The following rules are applied to every block BB in the CFG:
///
/// - If BB has a single predecessor/successor, then the weight
/// of that edge is the weight of the block.
///
/// - If all incoming or outgoing edges are known except one, and the
/// weight of the block is already known, the weight of the unknown
/// edge will be the weight of the block minus the sum of all the known
/// edges. If the sum of all the known edges is larger than BB's weight,
/// we set the unknown edge weight to zero.
///
/// - If there is a self-referential edge, and the weight of the block is
/// known, the weight for that edge is set to the weight of the block
/// minus the weight of the other incoming edges to that block (if
/// known).
void SampleProfileLoader::propagateWeights(Function &F) {
bool Changed = true;
unsigned I = 0;
// If BB weight is larger than its corresponding loop's header BB weight,
// use the BB weight to replace the loop header BB weight.
for (auto &BI : F) {
BasicBlock *BB = &BI;
Loop *L = LI->getLoopFor(BB);
if (!L) {
continue;
}
BasicBlock *Header = L->getHeader();
if (Header && BlockWeights[BB] > BlockWeights[Header]) {
BlockWeights[Header] = BlockWeights[BB];
}
}
// Before propagation starts, build, for each block, a list of
// unique predecessors and successors. This is necessary to handle
// identical edges in multiway branches. Since we visit all blocks and all
// edges of the CFG, it is cleaner to build these lists once at the start
// of the pass.
buildEdges(F);
// Propagate until we converge or we go past the iteration limit.
while (Changed && I++ < SampleProfileMaxPropagateIterations) {
Changed = propagateThroughEdges(F, false);
}
// The first propagation propagates BB counts from annotated BBs to unknown
// BBs. The 2nd propagation pass resets edges weights, and use all BB weights
// to propagate edge weights.
VisitedEdges.clear();
Changed = true;
while (Changed && I++ < SampleProfileMaxPropagateIterations) {
Changed = propagateThroughEdges(F, false);
}
// The 3rd propagation pass allows adjust annotated BB weights that are
// obviously wrong.
Changed = true;
while (Changed && I++ < SampleProfileMaxPropagateIterations) {
Changed = propagateThroughEdges(F, true);
}
// Generate MD_prof metadata for every branch instruction using the
// edge weights computed during propagation.
DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n");
LLVMContext &Ctx = F.getContext();
MDBuilder MDB(Ctx);
for (auto &BI : F) {
BasicBlock *BB = &BI;
if (BlockWeights[BB]) {
for (auto &I : BB->getInstList()) {
if (!isa<CallInst>(I) && !isa<InvokeInst>(I))
continue;
CallSite CS(&I);
if (!CS.getCalledFunction()) {
const DebugLoc &DLoc = I.getDebugLoc();
if (!DLoc)
continue;
const DILocation *DIL = DLoc;
uint32_t LineOffset = getOffset(DIL);
uint32_t Discriminator = DIL->getBaseDiscriminator();
const FunctionSamples *FS = findFunctionSamples(I);
if (!FS)
continue;
auto T = FS->findCallTargetMapAt(LineOffset, Discriminator);
if (!T || T.get().size() == 0)
continue;
SmallVector<InstrProfValueData, 2> SortedCallTargets;
uint64_t Sum = SortCallTargets(SortedCallTargets, T.get());
annotateValueSite(*I.getParent()->getParent()->getParent(), I,
SortedCallTargets, Sum, IPVK_IndirectCallTarget,
SortedCallTargets.size());
} else if (!dyn_cast<IntrinsicInst>(&I)) {
SmallVector<uint32_t, 1> Weights;
Weights.push_back(BlockWeights[BB]);
I.setMetadata(LLVMContext::MD_prof, MDB.createBranchWeights(Weights));
}
}
}
TerminatorInst *TI = BB->getTerminator();
if (TI->getNumSuccessors() == 1)
continue;
if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI))
continue;
DebugLoc BranchLoc = TI->getDebugLoc();
DEBUG(dbgs() << "\nGetting weights for branch at line "
<< ((BranchLoc) ? Twine(BranchLoc.getLine())
: Twine("<UNKNOWN LOCATION>"))
<< ".\n");
SmallVector<uint32_t, 4> Weights;
uint32_t MaxWeight = 0;
DebugLoc MaxDestLoc;
for (unsigned I = 0; I < TI->getNumSuccessors(); ++I) {
BasicBlock *Succ = TI->getSuccessor(I);
Edge E = std::make_pair(BB, Succ);
uint64_t Weight = EdgeWeights[E];
DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E));
// Use uint32_t saturated arithmetic to adjust the incoming weights,
// if needed. Sample counts in profiles are 64-bit unsigned values,
// but internally branch weights are expressed as 32-bit values.
if (Weight > std::numeric_limits<uint32_t>::max()) {
DEBUG(dbgs() << " (saturated due to uint32_t overflow)");
Weight = std::numeric_limits<uint32_t>::max();
}
// Weight is added by one to avoid propagation errors introduced by
// 0 weights.
Weights.push_back(static_cast<uint32_t>(Weight + 1));
if (Weight != 0) {
if (Weight > MaxWeight) {
MaxWeight = Weight;
MaxDestLoc = Succ->getFirstNonPHIOrDbgOrLifetime()->getDebugLoc();
}
}
}
uint64_t TempWeight;
// Only set weights if there is at least one non-zero weight.
// In any other case, let the analyzer set weights.
// Do not set weights if the weights are present. In ThinLTO, the profile
// annotation is done twice. If the first annotation already set the
// weights, the second pass does not need to set it.
if (MaxWeight > 0 && !TI->extractProfTotalWeight(TempWeight)) {
DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n");
TI->setMetadata(llvm::LLVMContext::MD_prof,
MDB.createBranchWeights(Weights));
emitOptimizationRemark(
Ctx, DEBUG_TYPE, F, MaxDestLoc,
Twine("most popular destination for conditional branches at ") +
((BranchLoc) ? Twine(BranchLoc->getFilename() + ":" +
Twine(BranchLoc.getLine()) + ":" +
Twine(BranchLoc.getCol()))
: Twine("<UNKNOWN LOCATION>")));
} else {
DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n");
}
}
}
/// \brief Get the line number for the function header.
///
/// This looks up function \p F in the current compilation unit and
/// retrieves the line number where the function is defined. This is
/// line 0 for all the samples read from the profile file. Every line
/// number is relative to this line.
///
/// \param F Function object to query.
///
/// \returns the line number where \p F is defined. If it returns 0,
/// it means that there is no debug information available for \p F.
unsigned SampleProfileLoader::getFunctionLoc(Function &F) {
if (DISubprogram *S = F.getSubprogram())
return S->getLine();
// If the start of \p F is missing, emit a diagnostic to inform the user
// about the missed opportunity.
F.getContext().diagnose(DiagnosticInfoSampleProfile(
"No debug information found in function " + F.getName() +
": Function profile not used",
DS_Warning));
return 0;
}
void SampleProfileLoader::computeDominanceAndLoopInfo(Function &F) {
DT.reset(new DominatorTree);
DT->recalculate(F);
PDT.reset(new PostDomTreeBase<BasicBlock>());
PDT->recalculate(F);
LI.reset(new LoopInfo);
LI->analyze(*DT);
}
/// \brief Generate branch weight metadata for all branches in \p F.
///
/// Branch weights are computed out of instruction samples using a
/// propagation heuristic. Propagation proceeds in 3 phases:
///
/// 1- Assignment of block weights. All the basic blocks in the function
/// are initial assigned the same weight as their most frequently
/// executed instruction.
///
/// 2- Creation of equivalence classes. Since samples may be missing from
/// blocks, we can fill in the gaps by setting the weights of all the
/// blocks in the same equivalence class to the same weight. To compute
/// the concept of equivalence, we use dominance and loop information.
/// Two blocks B1 and B2 are in the same equivalence class if B1
/// dominates B2, B2 post-dominates B1 and both are in the same loop.
///
/// 3- Propagation of block weights into edges. This uses a simple
/// propagation heuristic. The following rules are applied to every
/// block BB in the CFG:
///
/// - If BB has a single predecessor/successor, then the weight
/// of that edge is the weight of the block.
///
/// - If all the edges are known except one, and the weight of the
/// block is already known, the weight of the unknown edge will
/// be the weight of the block minus the sum of all the known
/// edges. If the sum of all the known edges is larger than BB's weight,
/// we set the unknown edge weight to zero.
///
/// - If there is a self-referential edge, and the weight of the block is
/// known, the weight for that edge is set to the weight of the block
/// minus the weight of the other incoming edges to that block (if
/// known).
///
/// Since this propagation is not guaranteed to finalize for every CFG, we
/// only allow it to proceed for a limited number of iterations (controlled
/// by -sample-profile-max-propagate-iterations).
///
/// FIXME: Try to replace this propagation heuristic with a scheme
/// that is guaranteed to finalize. A work-list approach similar to
/// the standard value propagation algorithm used by SSA-CCP might
/// work here.
///
/// Once all the branch weights are computed, we emit the MD_prof
/// metadata on BB using the computed values for each of its branches.
///
/// \param F The function to query.
///
/// \returns true if \p F was modified. Returns false, otherwise.
bool SampleProfileLoader::emitAnnotations(Function &F) {
bool Changed = false;
if (getFunctionLoc(F) == 0)
return false;
DEBUG(dbgs() << "Line number for the first instruction in " << F.getName()
<< ": " << getFunctionLoc(F) << "\n");
DenseSet<GlobalValue::GUID> ImportGUIDs;
Changed |= inlineHotFunctions(F, ImportGUIDs);
// Compute basic block weights.
Changed |= computeBlockWeights(F);
if (Changed) {
// Add an entry count to the function using the samples gathered at the
// function entry. Also sets the GUIDs that comes from a different
// module but inlined in the profiled binary. This is aiming at making
// the IR match the profiled binary before annotation.
F.setEntryCount(Samples->getHeadSamples() + 1, &ImportGUIDs);
// Compute dominance and loop info needed for propagation.
computeDominanceAndLoopInfo(F);
// Find equivalence classes.
findEquivalenceClasses(F);
// Propagate weights to all edges.
propagateWeights(F);
}
// If coverage checking was requested, compute it now.
if (SampleProfileRecordCoverage) {
unsigned Used = CoverageTracker.countUsedRecords(Samples);
unsigned Total = CoverageTracker.countBodyRecords(Samples);
unsigned Coverage = CoverageTracker.computeCoverage(Used, Total);
if (Coverage < SampleProfileRecordCoverage) {
F.getContext().diagnose(DiagnosticInfoSampleProfile(
F.getSubprogram()->getFilename(), getFunctionLoc(F),
Twine(Used) + " of " + Twine(Total) + " available profile records (" +
Twine(Coverage) + "%) were applied",
DS_Warning));
}
}
if (SampleProfileSampleCoverage) {
uint64_t Used = CoverageTracker.getTotalUsedSamples();
uint64_t Total = CoverageTracker.countBodySamples(Samples);
unsigned Coverage = CoverageTracker.computeCoverage(Used, Total);
if (Coverage < SampleProfileSampleCoverage) {
F.getContext().diagnose(DiagnosticInfoSampleProfile(
F.getSubprogram()->getFilename(), getFunctionLoc(F),
Twine(Used) + " of " + Twine(Total) + " available profile samples (" +
Twine(Coverage) + "%) were applied",
DS_Warning));
}
}
return Changed;
}
char SampleProfileLoaderLegacyPass::ID = 0;
INITIALIZE_PASS_BEGIN(SampleProfileLoaderLegacyPass, "sample-profile",
"Sample Profile loader", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_END(SampleProfileLoaderLegacyPass, "sample-profile",
"Sample Profile loader", false, false)
bool SampleProfileLoader::doInitialization(Module &M) {
auto &Ctx = M.getContext();
auto ReaderOrErr = SampleProfileReader::create(Filename, Ctx);
if (std::error_code EC = ReaderOrErr.getError()) {
std::string Msg = "Could not open profile: " + EC.message();
Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg));
return false;
}
Reader = std::move(ReaderOrErr.get());
ProfileIsValid = (Reader->read() == sampleprof_error::success);
return true;
}
ModulePass *llvm::createSampleProfileLoaderPass() {
return new SampleProfileLoaderLegacyPass(SampleProfileFile);
}
ModulePass *llvm::createSampleProfileLoaderPass(StringRef Name) {
return new SampleProfileLoaderLegacyPass(Name);
}
bool SampleProfileLoader::runOnModule(Module &M) {
if (!ProfileIsValid)
return false;
// Compute the total number of samples collected in this profile.
for (const auto &I : Reader->getProfiles())
TotalCollectedSamples += I.second.getTotalSamples();
// Populate the symbol map.
for (const auto &N_F : M.getValueSymbolTable()) {
std::string OrigName = N_F.getKey();
Function *F = dyn_cast<Function>(N_F.getValue());
if (F == nullptr)
continue;
SymbolMap[OrigName] = F;
auto pos = OrigName.find('.');
if (pos != std::string::npos) {
std::string NewName = OrigName.substr(0, pos);
auto r = SymbolMap.insert(std::make_pair(NewName, F));
// Failiing to insert means there is already an entry in SymbolMap,
// thus there are multiple functions that are mapped to the same
// stripped name. In this case of name conflicting, set the value
// to nullptr to avoid confusion.
if (!r.second)
r.first->second = nullptr;
}
}
bool retval = false;
for (auto &F : M)
if (!F.isDeclaration()) {
clearFunctionData();
retval |= runOnFunction(F);
}
if (M.getProfileSummary() == nullptr)
M.setProfileSummary(Reader->getSummary().getMD(M.getContext()));
return retval;
}
bool SampleProfileLoaderLegacyPass::runOnModule(Module &M) {
// FIXME: pass in AssumptionCache correctly for the new pass manager.
SampleLoader.setACT(&getAnalysis<AssumptionCacheTracker>());
return SampleLoader.runOnModule(M);
}
bool SampleProfileLoader::runOnFunction(Function &F) {
F.setEntryCount(0);
Samples = Reader->getSamplesFor(F);
if (Samples && !Samples->empty())
return emitAnnotations(F);
return false;
}
PreservedAnalyses SampleProfileLoaderPass::run(Module &M,
ModuleAnalysisManager &AM) {
SampleProfileLoader SampleLoader(
ProfileFileName.empty() ? SampleProfileFile : ProfileFileName);
SampleLoader.doInitialization(M);
if (!SampleLoader.runOnModule(M))
return PreservedAnalyses::all();
return PreservedAnalyses::none();
}