blob: 47d40f0823c8fb6f73d57d5fd36be45e2f92b4d8 [file] [log] [blame]
//===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// MachineScheduler schedules machine instructions after phi elimination. It
// preserves LiveIntervals so it can be invoked before register allocation.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineScheduler.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PriorityQueue.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/LiveIntervals.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachinePassRegistry.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/CodeGen/RegisterPressure.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/CodeGen/ScheduleDAGMutation.h"
#include "llvm/CodeGen/ScheduleDFS.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/InitializePasses.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/MachineValueType.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
using namespace llvm;
#define DEBUG_TYPE "machine-scheduler"
STATISTIC(NumClustered, "Number of load/store pairs clustered");
namespace llvm {
cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
cl::desc("Force top-down list scheduling"));
cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
cl::desc("Force bottom-up list scheduling"));
cl::opt<bool>
DumpCriticalPathLength("misched-dcpl", cl::Hidden,
cl::desc("Print critical path length to stdout"));
cl::opt<bool> VerifyScheduling(
"verify-misched", cl::Hidden,
cl::desc("Verify machine instrs before and after machine scheduling"));
} // end namespace llvm
#ifndef NDEBUG
static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
cl::desc("Pop up a window to show MISched dags after they are processed"));
/// In some situations a few uninteresting nodes depend on nearly all other
/// nodes in the graph, provide a cutoff to hide them.
static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden,
cl::desc("Hide nodes with more predecessor/successor than cutoff"));
static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
cl::desc("Only schedule this function"));
static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
cl::desc("Only schedule this MBB#"));
static cl::opt<bool> PrintDAGs("misched-print-dags", cl::Hidden,
cl::desc("Print schedule DAGs"));
#else
static const bool ViewMISchedDAGs = false;
static const bool PrintDAGs = false;
#endif // NDEBUG
/// Avoid quadratic complexity in unusually large basic blocks by limiting the
/// size of the ready lists.
static cl::opt<unsigned> ReadyListLimit("misched-limit", cl::Hidden,
cl::desc("Limit ready list to N instructions"), cl::init(256));
static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
cl::desc("Enable register pressure scheduling."), cl::init(true));
static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
cl::desc("Enable cyclic critical path analysis."), cl::init(true));
static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden,
cl::desc("Enable memop clustering."),
cl::init(true));
static cl::opt<bool>
ForceFastCluster("force-fast-cluster", cl::Hidden,
cl::desc("Switch to fast cluster algorithm with the lost "
"of some fusion opportunities"),
cl::init(false));
static cl::opt<unsigned>
FastClusterThreshold("fast-cluster-threshold", cl::Hidden,
cl::desc("The threshold for fast cluster"),
cl::init(1000));
// DAG subtrees must have at least this many nodes.
static const unsigned MinSubtreeSize = 8;
// Pin the vtables to this file.
void MachineSchedStrategy::anchor() {}
void ScheduleDAGMutation::anchor() {}
//===----------------------------------------------------------------------===//
// Machine Instruction Scheduling Pass and Registry
//===----------------------------------------------------------------------===//
MachineSchedContext::MachineSchedContext() {
RegClassInfo = new RegisterClassInfo();
}
MachineSchedContext::~MachineSchedContext() {
delete RegClassInfo;
}
namespace {
/// Base class for a machine scheduler class that can run at any point.
class MachineSchedulerBase : public MachineSchedContext,
public MachineFunctionPass {
public:
MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
void print(raw_ostream &O, const Module* = nullptr) const override;
protected:
void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags);
};
/// MachineScheduler runs after coalescing and before register allocation.
class MachineScheduler : public MachineSchedulerBase {
public:
MachineScheduler();
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnMachineFunction(MachineFunction&) override;
static char ID; // Class identification, replacement for typeinfo
protected:
ScheduleDAGInstrs *createMachineScheduler();
};
/// PostMachineScheduler runs after shortly before code emission.
class PostMachineScheduler : public MachineSchedulerBase {
public:
PostMachineScheduler();
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnMachineFunction(MachineFunction&) override;
static char ID; // Class identification, replacement for typeinfo
protected:
ScheduleDAGInstrs *createPostMachineScheduler();
};
} // end anonymous namespace
char MachineScheduler::ID = 0;
char &llvm::MachineSchedulerID = MachineScheduler::ID;
INITIALIZE_PASS_BEGIN(MachineScheduler, DEBUG_TYPE,
"Machine Instruction Scheduler", false, false)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
INITIALIZE_PASS_END(MachineScheduler, DEBUG_TYPE,
"Machine Instruction Scheduler", false, false)
MachineScheduler::MachineScheduler() : MachineSchedulerBase(ID) {
initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
}
void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
AU.addRequired<AAResultsWrapperPass>();
AU.addRequired<TargetPassConfig>();
AU.addRequired<SlotIndexes>();
AU.addPreserved<SlotIndexes>();
AU.addRequired<LiveIntervals>();
AU.addPreserved<LiveIntervals>();
MachineFunctionPass::getAnalysisUsage(AU);
}
char PostMachineScheduler::ID = 0;
char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
INITIALIZE_PASS_BEGIN(PostMachineScheduler, "postmisched",
"PostRA Machine Instruction Scheduler", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
INITIALIZE_PASS_END(PostMachineScheduler, "postmisched",
"PostRA Machine Instruction Scheduler", false, false)
PostMachineScheduler::PostMachineScheduler() : MachineSchedulerBase(ID) {
initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
}
void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
AU.addRequired<AAResultsWrapperPass>();
AU.addRequired<TargetPassConfig>();
MachineFunctionPass::getAnalysisUsage(AU);
}
MachinePassRegistry<MachineSchedRegistry::ScheduleDAGCtor>
MachineSchedRegistry::Registry;
/// A dummy default scheduler factory indicates whether the scheduler
/// is overridden on the command line.
static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
return nullptr;
}
/// MachineSchedOpt allows command line selection of the scheduler.
static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
RegisterPassParser<MachineSchedRegistry>>
MachineSchedOpt("misched",
cl::init(&useDefaultMachineSched), cl::Hidden,
cl::desc("Machine instruction scheduler to use"));
static MachineSchedRegistry
DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
useDefaultMachineSched);
static cl::opt<bool> EnableMachineSched(
"enable-misched",
cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
cl::Hidden);
static cl::opt<bool> EnablePostRAMachineSched(
"enable-post-misched",
cl::desc("Enable the post-ra machine instruction scheduling pass."),
cl::init(true), cl::Hidden);
/// Decrement this iterator until reaching the top or a non-debug instr.
static MachineBasicBlock::const_iterator
priorNonDebug(MachineBasicBlock::const_iterator I,
MachineBasicBlock::const_iterator Beg) {
assert(I != Beg && "reached the top of the region, cannot decrement");
while (--I != Beg) {
if (!I->isDebugOrPseudoInstr())
break;
}
return I;
}
/// Non-const version.
static MachineBasicBlock::iterator
priorNonDebug(MachineBasicBlock::iterator I,
MachineBasicBlock::const_iterator Beg) {
return priorNonDebug(MachineBasicBlock::const_iterator(I), Beg)
.getNonConstIterator();
}
/// If this iterator is a debug value, increment until reaching the End or a
/// non-debug instruction.
static MachineBasicBlock::const_iterator
nextIfDebug(MachineBasicBlock::const_iterator I,
MachineBasicBlock::const_iterator End) {
for(; I != End; ++I) {
if (!I->isDebugOrPseudoInstr())
break;
}
return I;
}
/// Non-const version.
static MachineBasicBlock::iterator
nextIfDebug(MachineBasicBlock::iterator I,
MachineBasicBlock::const_iterator End) {
return nextIfDebug(MachineBasicBlock::const_iterator(I), End)
.getNonConstIterator();
}
/// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
// Select the scheduler, or set the default.
MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
if (Ctor != useDefaultMachineSched)
return Ctor(this);
// Get the default scheduler set by the target for this function.
ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
if (Scheduler)
return Scheduler;
// Default to GenericScheduler.
return createGenericSchedLive(this);
}
/// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
/// the caller. We don't have a command line option to override the postRA
/// scheduler. The Target must configure it.
ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
// Get the postRA scheduler set by the target for this function.
ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
if (Scheduler)
return Scheduler;
// Default to GenericScheduler.
return createGenericSchedPostRA(this);
}
/// Top-level MachineScheduler pass driver.
///
/// Visit blocks in function order. Divide each block into scheduling regions
/// and visit them bottom-up. Visiting regions bottom-up is not required, but is
/// consistent with the DAG builder, which traverses the interior of the
/// scheduling regions bottom-up.
///
/// This design avoids exposing scheduling boundaries to the DAG builder,
/// simplifying the DAG builder's support for "special" target instructions.
/// At the same time the design allows target schedulers to operate across
/// scheduling boundaries, for example to bundle the boundary instructions
/// without reordering them. This creates complexity, because the target
/// scheduler must update the RegionBegin and RegionEnd positions cached by
/// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
/// design would be to split blocks at scheduling boundaries, but LLVM has a
/// general bias against block splitting purely for implementation simplicity.
bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
if (skipFunction(mf.getFunction()))
return false;
if (EnableMachineSched.getNumOccurrences()) {
if (!EnableMachineSched)
return false;
} else if (!mf.getSubtarget().enableMachineScheduler())
return false;
LLVM_DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
// Initialize the context of the pass.
MF = &mf;
MLI = &getAnalysis<MachineLoopInfo>();
MDT = &getAnalysis<MachineDominatorTree>();
PassConfig = &getAnalysis<TargetPassConfig>();
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
LIS = &getAnalysis<LiveIntervals>();
if (VerifyScheduling) {
LLVM_DEBUG(LIS->dump());
MF->verify(this, "Before machine scheduling.");
}
RegClassInfo->runOnMachineFunction(*MF);
// Instantiate the selected scheduler for this target, function, and
// optimization level.
std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
scheduleRegions(*Scheduler, false);
LLVM_DEBUG(LIS->dump());
if (VerifyScheduling)
MF->verify(this, "After machine scheduling.");
return true;
}
bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
if (skipFunction(mf.getFunction()))
return false;
if (EnablePostRAMachineSched.getNumOccurrences()) {
if (!EnablePostRAMachineSched)
return false;
} else if (!mf.getSubtarget().enablePostRAMachineScheduler()) {
LLVM_DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
return false;
}
LLVM_DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
// Initialize the context of the pass.
MF = &mf;
MLI = &getAnalysis<MachineLoopInfo>();
PassConfig = &getAnalysis<TargetPassConfig>();
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
if (VerifyScheduling)
MF->verify(this, "Before post machine scheduling.");
// Instantiate the selected scheduler for this target, function, and
// optimization level.
std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
scheduleRegions(*Scheduler, true);
if (VerifyScheduling)
MF->verify(this, "After post machine scheduling.");
return true;
}
/// Return true of the given instruction should not be included in a scheduling
/// region.
///
/// MachineScheduler does not currently support scheduling across calls. To
/// handle calls, the DAG builder needs to be modified to create register
/// anti/output dependencies on the registers clobbered by the call's regmask
/// operand. In PreRA scheduling, the stack pointer adjustment already prevents
/// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
/// the boundary, but there would be no benefit to postRA scheduling across
/// calls this late anyway.
static bool isSchedBoundary(MachineBasicBlock::iterator MI,
MachineBasicBlock *MBB,
MachineFunction *MF,
const TargetInstrInfo *TII) {
return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF);
}
/// A region of an MBB for scheduling.
namespace {
struct SchedRegion {
/// RegionBegin is the first instruction in the scheduling region, and
/// RegionEnd is either MBB->end() or the scheduling boundary after the
/// last instruction in the scheduling region. These iterators cannot refer
/// to instructions outside of the identified scheduling region because
/// those may be reordered before scheduling this region.
MachineBasicBlock::iterator RegionBegin;
MachineBasicBlock::iterator RegionEnd;
unsigned NumRegionInstrs;
SchedRegion(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E,
unsigned N) :
RegionBegin(B), RegionEnd(E), NumRegionInstrs(N) {}
};
} // end anonymous namespace
using MBBRegionsVector = SmallVector<SchedRegion, 16>;
static void
getSchedRegions(MachineBasicBlock *MBB,
MBBRegionsVector &Regions,
bool RegionsTopDown) {
MachineFunction *MF = MBB->getParent();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
MachineBasicBlock::iterator I = nullptr;
for(MachineBasicBlock::iterator RegionEnd = MBB->end();
RegionEnd != MBB->begin(); RegionEnd = I) {
// Avoid decrementing RegionEnd for blocks with no terminator.
if (RegionEnd != MBB->end() ||
isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) {
--RegionEnd;
}
// The next region starts above the previous region. Look backward in the
// instruction stream until we find the nearest boundary.
unsigned NumRegionInstrs = 0;
I = RegionEnd;
for (;I != MBB->begin(); --I) {
MachineInstr &MI = *std::prev(I);
if (isSchedBoundary(&MI, &*MBB, MF, TII))
break;
if (!MI.isDebugOrPseudoInstr()) {
// MBB::size() uses instr_iterator to count. Here we need a bundle to
// count as a single instruction.
++NumRegionInstrs;
}
}
// It's possible we found a scheduling region that only has debug
// instructions. Don't bother scheduling these.
if (NumRegionInstrs != 0)
Regions.push_back(SchedRegion(I, RegionEnd, NumRegionInstrs));
}
if (RegionsTopDown)
std::reverse(Regions.begin(), Regions.end());
}
/// Main driver for both MachineScheduler and PostMachineScheduler.
void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
bool FixKillFlags) {
// Visit all machine basic blocks.
//
// TODO: Visit blocks in global postorder or postorder within the bottom-up
// loop tree. Then we can optionally compute global RegPressure.
for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
MBB != MBBEnd; ++MBB) {
Scheduler.startBlock(&*MBB);
#ifndef NDEBUG
if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
continue;
if (SchedOnlyBlock.getNumOccurrences()
&& (int)SchedOnlyBlock != MBB->getNumber())
continue;
#endif
// Break the block into scheduling regions [I, RegionEnd). RegionEnd
// points to the scheduling boundary at the bottom of the region. The DAG
// does not include RegionEnd, but the region does (i.e. the next
// RegionEnd is above the previous RegionBegin). If the current block has
// no terminator then RegionEnd == MBB->end() for the bottom region.
//
// All the regions of MBB are first found and stored in MBBRegions, which
// will be processed (MBB) top-down if initialized with true.
//
// The Scheduler may insert instructions during either schedule() or
// exitRegion(), even for empty regions. So the local iterators 'I' and
// 'RegionEnd' are invalid across these calls. Instructions must not be
// added to other regions than the current one without updating MBBRegions.
MBBRegionsVector MBBRegions;
getSchedRegions(&*MBB, MBBRegions, Scheduler.doMBBSchedRegionsTopDown());
for (MBBRegionsVector::iterator R = MBBRegions.begin();
R != MBBRegions.end(); ++R) {
MachineBasicBlock::iterator I = R->RegionBegin;
MachineBasicBlock::iterator RegionEnd = R->RegionEnd;
unsigned NumRegionInstrs = R->NumRegionInstrs;
// Notify the scheduler of the region, even if we may skip scheduling
// it. Perhaps it still needs to be bundled.
Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs);
// Skip empty scheduling regions (0 or 1 schedulable instructions).
if (I == RegionEnd || I == std::prev(RegionEnd)) {
// Close the current region. Bundle the terminator if needed.
// This invalidates 'RegionEnd' and 'I'.
Scheduler.exitRegion();
continue;
}
LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
LLVM_DEBUG(dbgs() << MF->getName() << ":" << printMBBReference(*MBB)
<< " " << MBB->getName() << "\n From: " << *I
<< " To: ";
if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
else dbgs() << "End\n";
dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
if (DumpCriticalPathLength) {
errs() << MF->getName();
errs() << ":%bb. " << MBB->getNumber();
errs() << " " << MBB->getName() << " \n";
}
// Schedule a region: possibly reorder instructions.
// This invalidates the original region iterators.
Scheduler.schedule();
// Close the current region.
Scheduler.exitRegion();
}
Scheduler.finishBlock();
// FIXME: Ideally, no further passes should rely on kill flags. However,
// thumb2 size reduction is currently an exception, so the PostMIScheduler
// needs to do this.
if (FixKillFlags)
Scheduler.fixupKills(*MBB);
}
Scheduler.finalizeSchedule();
}
void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
// unimplemented
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void ReadyQueue::dump() const {
dbgs() << "Queue " << Name << ": ";
for (const SUnit *SU : Queue)
dbgs() << SU->NodeNum << " ";
dbgs() << "\n";
}
#endif
//===----------------------------------------------------------------------===//
// ScheduleDAGMI - Basic machine instruction scheduling. This is
// independent of PreRA/PostRA scheduling and involves no extra book-keeping for
// virtual registers.
// ===----------------------------------------------------------------------===/
// Provide a vtable anchor.
ScheduleDAGMI::~ScheduleDAGMI() = default;
/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
/// NumPredsLeft reaches zero, release the successor node.
///
/// FIXME: Adjust SuccSU height based on MinLatency.
void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
SUnit *SuccSU = SuccEdge->getSUnit();
if (SuccEdge->isWeak()) {
--SuccSU->WeakPredsLeft;
if (SuccEdge->isCluster())
NextClusterSucc = SuccSU;
return;
}
#ifndef NDEBUG
if (SuccSU->NumPredsLeft == 0) {
dbgs() << "*** Scheduling failed! ***\n";
dumpNode(*SuccSU);
dbgs() << " has been released too many times!\n";
llvm_unreachable(nullptr);
}
#endif
// SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
// CurrCycle may have advanced since then.
if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
--SuccSU->NumPredsLeft;
if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
SchedImpl->releaseTopNode(SuccSU);
}
/// releaseSuccessors - Call releaseSucc on each of SU's successors.
void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
for (SDep &Succ : SU->Succs)
releaseSucc(SU, &Succ);
}
/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
/// NumSuccsLeft reaches zero, release the predecessor node.
///
/// FIXME: Adjust PredSU height based on MinLatency.
void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
SUnit *PredSU = PredEdge->getSUnit();
if (PredEdge->isWeak()) {
--PredSU->WeakSuccsLeft;
if (PredEdge->isCluster())
NextClusterPred = PredSU;
return;
}
#ifndef NDEBUG
if (PredSU->NumSuccsLeft == 0) {
dbgs() << "*** Scheduling failed! ***\n";
dumpNode(*PredSU);
dbgs() << " has been released too many times!\n";
llvm_unreachable(nullptr);
}
#endif
// SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
// CurrCycle may have advanced since then.
if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
--PredSU->NumSuccsLeft;
if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
SchedImpl->releaseBottomNode(PredSU);
}
/// releasePredecessors - Call releasePred on each of SU's predecessors.
void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
for (SDep &Pred : SU->Preds)
releasePred(SU, &Pred);
}
void ScheduleDAGMI::startBlock(MachineBasicBlock *bb) {
ScheduleDAGInstrs::startBlock(bb);
SchedImpl->enterMBB(bb);
}
void ScheduleDAGMI::finishBlock() {
SchedImpl->leaveMBB();
ScheduleDAGInstrs::finishBlock();
}
/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
/// crossing a scheduling boundary. [begin, end) includes all instructions in
/// the region, including the boundary itself and single-instruction regions
/// that don't get scheduled.
void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
MachineBasicBlock::iterator begin,
MachineBasicBlock::iterator end,
unsigned regioninstrs)
{
ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
SchedImpl->initPolicy(begin, end, regioninstrs);
}
/// This is normally called from the main scheduler loop but may also be invoked
/// by the scheduling strategy to perform additional code motion.
void ScheduleDAGMI::moveInstruction(
MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
// Advance RegionBegin if the first instruction moves down.
if (&*RegionBegin == MI)
++RegionBegin;
// Update the instruction stream.
BB->splice(InsertPos, BB, MI);
// Update LiveIntervals
if (LIS)
LIS->handleMove(*MI, /*UpdateFlags=*/true);
// Recede RegionBegin if an instruction moves above the first.
if (RegionBegin == InsertPos)
RegionBegin = MI;
}
bool ScheduleDAGMI::checkSchedLimit() {
#ifndef NDEBUG
if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
CurrentTop = CurrentBottom;
return false;
}
++NumInstrsScheduled;
#endif
return true;
}
/// Per-region scheduling driver, called back from
/// MachineScheduler::runOnMachineFunction. This is a simplified driver that
/// does not consider liveness or register pressure. It is useful for PostRA
/// scheduling and potentially other custom schedulers.
void ScheduleDAGMI::schedule() {
LLVM_DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
LLVM_DEBUG(SchedImpl->dumpPolicy());
// Build the DAG.
buildSchedGraph(AA);
postprocessDAG();
SmallVector<SUnit*, 8> TopRoots, BotRoots;
findRootsAndBiasEdges(TopRoots, BotRoots);
LLVM_DEBUG(dump());
if (PrintDAGs) dump();
if (ViewMISchedDAGs) viewGraph();
// Initialize the strategy before modifying the DAG.
// This may initialize a DFSResult to be used for queue priority.
SchedImpl->initialize(this);
// Initialize ready queues now that the DAG and priority data are finalized.
initQueues(TopRoots, BotRoots);
bool IsTopNode = false;
while (true) {
LLVM_DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
SUnit *SU = SchedImpl->pickNode(IsTopNode);
if (!SU) break;
assert(!SU->isScheduled && "Node already scheduled");
if (!checkSchedLimit())
break;
MachineInstr *MI = SU->getInstr();
if (IsTopNode) {
assert(SU->isTopReady() && "node still has unscheduled dependencies");
if (&*CurrentTop == MI)
CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
else
moveInstruction(MI, CurrentTop);
} else {
assert(SU->isBottomReady() && "node still has unscheduled dependencies");
MachineBasicBlock::iterator priorII =
priorNonDebug(CurrentBottom, CurrentTop);
if (&*priorII == MI)
CurrentBottom = priorII;
else {
if (&*CurrentTop == MI)
CurrentTop = nextIfDebug(++CurrentTop, priorII);
moveInstruction(MI, CurrentBottom);
CurrentBottom = MI;
}
}
// Notify the scheduling strategy before updating the DAG.
// This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
// runs, it can then use the accurate ReadyCycle time to determine whether
// newly released nodes can move to the readyQ.
SchedImpl->schedNode(SU, IsTopNode);
updateQueues(SU, IsTopNode);
}
assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
placeDebugValues();
LLVM_DEBUG({
dbgs() << "*** Final schedule for "
<< printMBBReference(*begin()->getParent()) << " ***\n";
dumpSchedule();
dbgs() << '\n';
});
}
/// Apply each ScheduleDAGMutation step in order.
void ScheduleDAGMI::postprocessDAG() {
for (auto &m : Mutations)
m->apply(this);
}
void ScheduleDAGMI::
findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
SmallVectorImpl<SUnit*> &BotRoots) {
for (SUnit &SU : SUnits) {
assert(!SU.isBoundaryNode() && "Boundary node should not be in SUnits");
// Order predecessors so DFSResult follows the critical path.
SU.biasCriticalPath();
// A SUnit is ready to top schedule if it has no predecessors.
if (!SU.NumPredsLeft)
TopRoots.push_back(&SU);
// A SUnit is ready to bottom schedule if it has no successors.
if (!SU.NumSuccsLeft)
BotRoots.push_back(&SU);
}
ExitSU.biasCriticalPath();
}
/// Identify DAG roots and setup scheduler queues.
void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
ArrayRef<SUnit*> BotRoots) {
NextClusterSucc = nullptr;
NextClusterPred = nullptr;
// Release all DAG roots for scheduling, not including EntrySU/ExitSU.
//
// Nodes with unreleased weak edges can still be roots.
// Release top roots in forward order.
for (SUnit *SU : TopRoots)
SchedImpl->releaseTopNode(SU);
// Release bottom roots in reverse order so the higher priority nodes appear
// first. This is more natural and slightly more efficient.
for (SmallVectorImpl<SUnit*>::const_reverse_iterator
I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
SchedImpl->releaseBottomNode(*I);
}
releaseSuccessors(&EntrySU);
releasePredecessors(&ExitSU);
SchedImpl->registerRoots();
// Advance past initial DebugValues.
CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
CurrentBottom = RegionEnd;
}
/// Update scheduler queues after scheduling an instruction.
void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
// Release dependent instructions for scheduling.
if (IsTopNode)
releaseSuccessors(SU);
else
releasePredecessors(SU);
SU->isScheduled = true;
}
/// Reinsert any remaining debug_values, just like the PostRA scheduler.
void ScheduleDAGMI::placeDebugValues() {
// If first instruction was a DBG_VALUE then put it back.
if (FirstDbgValue) {
BB->splice(RegionBegin, BB, FirstDbgValue);
RegionBegin = FirstDbgValue;
}
for (std::vector<std::pair<MachineInstr *, MachineInstr *>>::iterator
DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
MachineInstr *DbgValue = P.first;
MachineBasicBlock::iterator OrigPrevMI = P.second;
if (&*RegionBegin == DbgValue)
++RegionBegin;
BB->splice(++OrigPrevMI, BB, DbgValue);
if (OrigPrevMI == std::prev(RegionEnd))
RegionEnd = DbgValue;
}
DbgValues.clear();
FirstDbgValue = nullptr;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void ScheduleDAGMI::dumpSchedule() const {
for (MachineInstr &MI : *this) {
if (SUnit *SU = getSUnit(&MI))
dumpNode(*SU);
else
dbgs() << "Missing SUnit\n";
}
}
#endif
//===----------------------------------------------------------------------===//
// ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
// preservation.
//===----------------------------------------------------------------------===//
ScheduleDAGMILive::~ScheduleDAGMILive() {
delete DFSResult;
}
void ScheduleDAGMILive::collectVRegUses(SUnit &SU) {
const MachineInstr &MI = *SU.getInstr();
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg())
continue;
if (!MO.readsReg())
continue;
if (TrackLaneMasks && !MO.isUse())
continue;
Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg))
continue;
// Ignore re-defs.
if (TrackLaneMasks) {
bool FoundDef = false;
for (const MachineOperand &MO2 : MI.operands()) {
if (MO2.isReg() && MO2.isDef() && MO2.getReg() == Reg && !MO2.isDead()) {
FoundDef = true;
break;
}
}
if (FoundDef)
continue;
}
// Record this local VReg use.
VReg2SUnitMultiMap::iterator UI = VRegUses.find(Reg);
for (; UI != VRegUses.end(); ++UI) {
if (UI->SU == &SU)
break;
}
if (UI == VRegUses.end())
VRegUses.insert(VReg2SUnit(Reg, LaneBitmask::getNone(), &SU));
}
}
/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
/// crossing a scheduling boundary. [begin, end) includes all instructions in
/// the region, including the boundary itself and single-instruction regions
/// that don't get scheduled.
void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
MachineBasicBlock::iterator begin,
MachineBasicBlock::iterator end,
unsigned regioninstrs)
{
// ScheduleDAGMI initializes SchedImpl's per-region policy.
ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
// For convenience remember the end of the liveness region.
LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
SUPressureDiffs.clear();
ShouldTrackPressure = SchedImpl->shouldTrackPressure();
ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks();
assert((!ShouldTrackLaneMasks || ShouldTrackPressure) &&
"ShouldTrackLaneMasks requires ShouldTrackPressure");
}
// Setup the register pressure trackers for the top scheduled and bottom
// scheduled regions.
void ScheduleDAGMILive::initRegPressure() {
VRegUses.clear();
VRegUses.setUniverse(MRI.getNumVirtRegs());
for (SUnit &SU : SUnits)
collectVRegUses(SU);
TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin,
ShouldTrackLaneMasks, false);
BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
ShouldTrackLaneMasks, false);
// Close the RPTracker to finalize live ins.
RPTracker.closeRegion();
LLVM_DEBUG(RPTracker.dump());
// Initialize the live ins and live outs.
TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
// Close one end of the tracker so we can call
// getMaxUpward/DownwardPressureDelta before advancing across any
// instructions. This converts currently live regs into live ins/outs.
TopRPTracker.closeTop();
BotRPTracker.closeBottom();
BotRPTracker.initLiveThru(RPTracker);
if (!BotRPTracker.getLiveThru().empty()) {
TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
LLVM_DEBUG(dbgs() << "Live Thru: ";
dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
};
// For each live out vreg reduce the pressure change associated with other
// uses of the same vreg below the live-out reaching def.
updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
// Account for liveness generated by the region boundary.
if (LiveRegionEnd != RegionEnd) {
SmallVector<RegisterMaskPair, 8> LiveUses;
BotRPTracker.recede(&LiveUses);
updatePressureDiffs(LiveUses);
}
LLVM_DEBUG(dbgs() << "Top Pressure:\n";
dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
dbgs() << "Bottom Pressure:\n";
dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI););
assert((BotRPTracker.getPos() == RegionEnd ||
(RegionEnd->isDebugInstr() &&
BotRPTracker.getPos() == priorNonDebug(RegionEnd, RegionBegin))) &&
"Can't find the region bottom");
// Cache the list of excess pressure sets in this region. This will also track
// the max pressure in the scheduled code for these sets.
RegionCriticalPSets.clear();
const std::vector<unsigned> &RegionPressure =
RPTracker.getPressure().MaxSetPressure;
for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
if (RegionPressure[i] > Limit) {
LLVM_DEBUG(dbgs() << TRI->getRegPressureSetName(i) << " Limit " << Limit
<< " Actual " << RegionPressure[i] << "\n");
RegionCriticalPSets.push_back(PressureChange(i));
}
}
LLVM_DEBUG(dbgs() << "Excess PSets: ";
for (const PressureChange &RCPS
: RegionCriticalPSets) dbgs()
<< TRI->getRegPressureSetName(RCPS.getPSet()) << " ";
dbgs() << "\n");
}
void ScheduleDAGMILive::
updateScheduledPressure(const SUnit *SU,
const std::vector<unsigned> &NewMaxPressure) {
const PressureDiff &PDiff = getPressureDiff(SU);
unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
for (const PressureChange &PC : PDiff) {
if (!PC.isValid())
break;
unsigned ID = PC.getPSet();
while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
++CritIdx;
if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
&& NewMaxPressure[ID] <= (unsigned)std::numeric_limits<int16_t>::max())
RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
}
unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
if (NewMaxPressure[ID] >= Limit - 2) {
LLVM_DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": "
<< NewMaxPressure[ID]
<< ((NewMaxPressure[ID] > Limit) ? " > " : " <= ")
<< Limit << "(+ " << BotRPTracker.getLiveThru()[ID]
<< " livethru)\n");
}
}
}
/// Update the PressureDiff array for liveness after scheduling this
/// instruction.
void ScheduleDAGMILive::updatePressureDiffs(
ArrayRef<RegisterMaskPair> LiveUses) {
for (const RegisterMaskPair &P : LiveUses) {
Register Reg = P.RegUnit;
/// FIXME: Currently assuming single-use physregs.
if (!Register::isVirtualRegister(Reg))
continue;
if (ShouldTrackLaneMasks) {
// If the register has just become live then other uses won't change
// this fact anymore => decrement pressure.
// If the register has just become dead then other uses make it come
// back to life => increment pressure.
bool Decrement = P.LaneMask.any();
for (const VReg2SUnit &V2SU
: make_range(VRegUses.find(Reg), VRegUses.end())) {
SUnit &SU = *V2SU.SU;
if (SU.isScheduled || &SU == &ExitSU)
continue;
PressureDiff &PDiff = getPressureDiff(&SU);
PDiff.addPressureChange(Reg, Decrement, &MRI);
LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU.NodeNum << ") "
<< printReg(Reg, TRI) << ':'
<< PrintLaneMask(P.LaneMask) << ' ' << *SU.getInstr();
dbgs() << " to "; PDiff.dump(*TRI););
}
} else {
assert(P.LaneMask.any());
LLVM_DEBUG(dbgs() << " LiveReg: " << printVRegOrUnit(Reg, TRI) << "\n");
// This may be called before CurrentBottom has been initialized. However,
// BotRPTracker must have a valid position. We want the value live into the
// instruction or live out of the block, so ask for the previous
// instruction's live-out.
const LiveInterval &LI = LIS->getInterval(Reg);
VNInfo *VNI;
MachineBasicBlock::const_iterator I =
nextIfDebug(BotRPTracker.getPos(), BB->end());
if (I == BB->end())
VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
else {
LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I));
VNI = LRQ.valueIn();
}
// RegisterPressureTracker guarantees that readsReg is true for LiveUses.
assert(VNI && "No live value at use.");
for (const VReg2SUnit &V2SU
: make_range(VRegUses.find(Reg), VRegUses.end())) {
SUnit *SU = V2SU.SU;
// If this use comes before the reaching def, it cannot be a last use,
// so decrease its pressure change.
if (!SU->isScheduled && SU != &ExitSU) {
LiveQueryResult LRQ =
LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
if (LRQ.valueIn() == VNI) {
PressureDiff &PDiff = getPressureDiff(SU);
PDiff.addPressureChange(Reg, true, &MRI);
LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") "
<< *SU->getInstr();
dbgs() << " to "; PDiff.dump(*TRI););
}
}
}
}
}
}
void ScheduleDAGMILive::dump() const {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
if (EntrySU.getInstr() != nullptr)
dumpNodeAll(EntrySU);
for (const SUnit &SU : SUnits) {
dumpNodeAll(SU);
if (ShouldTrackPressure) {
dbgs() << " Pressure Diff : ";
getPressureDiff(&SU).dump(*TRI);
}
dbgs() << " Single Issue : ";
if (SchedModel.mustBeginGroup(SU.getInstr()) &&
SchedModel.mustEndGroup(SU.getInstr()))
dbgs() << "true;";
else
dbgs() << "false;";
dbgs() << '\n';
}
if (ExitSU.getInstr() != nullptr)
dumpNodeAll(ExitSU);
#endif
}
/// schedule - Called back from MachineScheduler::runOnMachineFunction
/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
/// only includes instructions that have DAG nodes, not scheduling boundaries.
///
/// This is a skeletal driver, with all the functionality pushed into helpers,
/// so that it can be easily extended by experimental schedulers. Generally,
/// implementing MachineSchedStrategy should be sufficient to implement a new
/// scheduling algorithm. However, if a scheduler further subclasses
/// ScheduleDAGMILive then it will want to override this virtual method in order
/// to update any specialized state.
void ScheduleDAGMILive::schedule() {
LLVM_DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
LLVM_DEBUG(SchedImpl->dumpPolicy());
buildDAGWithRegPressure();
postprocessDAG();
SmallVector<SUnit*, 8> TopRoots, BotRoots;
findRootsAndBiasEdges(TopRoots, BotRoots);
// Initialize the strategy before modifying the DAG.
// This may initialize a DFSResult to be used for queue priority.
SchedImpl->initialize(this);
LLVM_DEBUG(dump());
if (PrintDAGs) dump();
if (ViewMISchedDAGs) viewGraph();
// Initialize ready queues now that the DAG and priority data are finalized.
initQueues(TopRoots, BotRoots);
bool IsTopNode = false;
while (true) {
LLVM_DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
SUnit *SU = SchedImpl->pickNode(IsTopNode);
if (!SU) break;
assert(!SU->isScheduled && "Node already scheduled");
if (!checkSchedLimit())
break;
scheduleMI(SU, IsTopNode);
if (DFSResult) {
unsigned SubtreeID = DFSResult->getSubtreeID(SU);
if (!ScheduledTrees.test(SubtreeID)) {
ScheduledTrees.set(SubtreeID);
DFSResult->scheduleTree(SubtreeID);
SchedImpl->scheduleTree(SubtreeID);
}
}
// Notify the scheduling strategy after updating the DAG.
SchedImpl->schedNode(SU, IsTopNode);
updateQueues(SU, IsTopNode);
}
assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
placeDebugValues();
LLVM_DEBUG({
dbgs() << "*** Final schedule for "
<< printMBBReference(*begin()->getParent()) << " ***\n";
dumpSchedule();
dbgs() << '\n';
});
}
/// Build the DAG and setup three register pressure trackers.
void ScheduleDAGMILive::buildDAGWithRegPressure() {
if (!ShouldTrackPressure) {
RPTracker.reset();
RegionCriticalPSets.clear();
buildSchedGraph(AA);
return;
}
// Initialize the register pressure tracker used by buildSchedGraph.
RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true);
// Account for liveness generate by the region boundary.
if (LiveRegionEnd != RegionEnd)
RPTracker.recede();
// Build the DAG, and compute current register pressure.
buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks);
// Initialize top/bottom trackers after computing region pressure.
initRegPressure();
}
void ScheduleDAGMILive::computeDFSResult() {
if (!DFSResult)
DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
DFSResult->clear();
ScheduledTrees.clear();
DFSResult->resize(SUnits.size());
DFSResult->compute(SUnits);
ScheduledTrees.resize(DFSResult->getNumSubtrees());
}
/// Compute the max cyclic critical path through the DAG. The scheduling DAG
/// only provides the critical path for single block loops. To handle loops that
/// span blocks, we could use the vreg path latencies provided by
/// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
/// available for use in the scheduler.
///
/// The cyclic path estimation identifies a def-use pair that crosses the back
/// edge and considers the depth and height of the nodes. For example, consider
/// the following instruction sequence where each instruction has unit latency
/// and defines an eponymous virtual register:
///
/// a->b(a,c)->c(b)->d(c)->exit
///
/// The cyclic critical path is a two cycles: b->c->b
/// The acyclic critical path is four cycles: a->b->c->d->exit
/// LiveOutHeight = height(c) = len(c->d->exit) = 2
/// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
/// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
/// LiveInDepth = depth(b) = len(a->b) = 1
///
/// LiveOutDepth - LiveInDepth = 3 - 1 = 2
/// LiveInHeight - LiveOutHeight = 4 - 2 = 2
/// CyclicCriticalPath = min(2, 2) = 2
///
/// This could be relevant to PostRA scheduling, but is currently implemented
/// assuming LiveIntervals.
unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
// This only applies to single block loop.
if (!BB->isSuccessor(BB))
return 0;
unsigned MaxCyclicLatency = 0;
// Visit each live out vreg def to find def/use pairs that cross iterations.
for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) {
Register Reg = P.RegUnit;
if (!Register::isVirtualRegister(Reg))
continue;
const LiveInterval &LI = LIS->getInterval(Reg);
const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
if (!DefVNI)
continue;
MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
const SUnit *DefSU = getSUnit(DefMI);
if (!DefSU)
continue;
unsigned LiveOutHeight = DefSU->getHeight();
unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
// Visit all local users of the vreg def.
for (const VReg2SUnit &V2SU
: make_range(VRegUses.find(Reg), VRegUses.end())) {
SUnit *SU = V2SU.SU;
if (SU == &ExitSU)
continue;
// Only consider uses of the phi.
LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
if (!LRQ.valueIn()->isPHIDef())
continue;
// Assume that a path spanning two iterations is a cycle, which could
// overestimate in strange cases. This allows cyclic latency to be
// estimated as the minimum slack of the vreg's depth or height.
unsigned CyclicLatency = 0;
if (LiveOutDepth > SU->getDepth())
CyclicLatency = LiveOutDepth - SU->getDepth();
unsigned LiveInHeight = SU->getHeight() + DefSU->Latency;
if (LiveInHeight > LiveOutHeight) {
if (LiveInHeight - LiveOutHeight < CyclicLatency)
CyclicLatency = LiveInHeight - LiveOutHeight;
} else
CyclicLatency = 0;
LLVM_DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
<< SU->NodeNum << ") = " << CyclicLatency << "c\n");
if (CyclicLatency > MaxCyclicLatency)
MaxCyclicLatency = CyclicLatency;
}
}
LLVM_DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
return MaxCyclicLatency;
}
/// Release ExitSU predecessors and setup scheduler queues. Re-position
/// the Top RP tracker in case the region beginning has changed.
void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots,
ArrayRef<SUnit*> BotRoots) {
ScheduleDAGMI::initQueues(TopRoots, BotRoots);
if (ShouldTrackPressure) {
assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
TopRPTracker.setPos(CurrentTop);
}
}
/// Move an instruction and update register pressure.
void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
// Move the instruction to its new location in the instruction stream.
MachineInstr *MI = SU->getInstr();
if (IsTopNode) {
assert(SU->isTopReady() && "node still has unscheduled dependencies");
if (&*CurrentTop == MI)
CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
else {
moveInstruction(MI, CurrentTop);
TopRPTracker.setPos(MI);
}
if (ShouldTrackPressure) {
// Update top scheduled pressure.
RegisterOperands RegOpers;
RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
if (ShouldTrackLaneMasks) {
// Adjust liveness and add missing dead+read-undef flags.
SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
} else {
// Adjust for missing dead-def flags.
RegOpers.detectDeadDefs(*MI, *LIS);
}
TopRPTracker.advance(RegOpers);
assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
LLVM_DEBUG(dbgs() << "Top Pressure:\n"; dumpRegSetPressure(
TopRPTracker.getRegSetPressureAtPos(), TRI););
updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
}
} else {
assert(SU->isBottomReady() && "node still has unscheduled dependencies");
MachineBasicBlock::iterator priorII =
priorNonDebug(CurrentBottom, CurrentTop);
if (&*priorII == MI)
CurrentBottom = priorII;
else {
if (&*CurrentTop == MI) {
CurrentTop = nextIfDebug(++CurrentTop, priorII);
TopRPTracker.setPos(CurrentTop);
}
moveInstruction(MI, CurrentBottom);
CurrentBottom = MI;
BotRPTracker.setPos(CurrentBottom);
}
if (ShouldTrackPressure) {
RegisterOperands RegOpers;
RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
if (ShouldTrackLaneMasks) {
// Adjust liveness and add missing dead+read-undef flags.
SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
} else {
// Adjust for missing dead-def flags.
RegOpers.detectDeadDefs(*MI, *LIS);
}
if (BotRPTracker.getPos() != CurrentBottom)
BotRPTracker.recedeSkipDebugValues();
SmallVector<RegisterMaskPair, 8> LiveUses;
BotRPTracker.recede(RegOpers, &LiveUses);
assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
LLVM_DEBUG(dbgs() << "Bottom Pressure:\n"; dumpRegSetPressure(
BotRPTracker.getRegSetPressureAtPos(), TRI););
updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
updatePressureDiffs(LiveUses);
}
}
}
//===----------------------------------------------------------------------===//
// BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
//===----------------------------------------------------------------------===//
namespace {
/// Post-process the DAG to create cluster edges between neighboring
/// loads or between neighboring stores.
class BaseMemOpClusterMutation : public ScheduleDAGMutation {
struct MemOpInfo {
SUnit *SU;
SmallVector<const MachineOperand *, 4> BaseOps;
int64_t Offset;
unsigned Width;
MemOpInfo(SUnit *SU, ArrayRef<const MachineOperand *> BaseOps,
int64_t Offset, unsigned Width)
: SU(SU), BaseOps(BaseOps.begin(), BaseOps.end()), Offset(Offset),
Width(Width) {}
static bool Compare(const MachineOperand *const &A,
const MachineOperand *const &B) {
if (A->getType() != B->getType())
return A->getType() < B->getType();
if (A->isReg())
return A->getReg() < B->getReg();
if (A->isFI()) {
const MachineFunction &MF = *A->getParent()->getParent()->getParent();
const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
bool StackGrowsDown = TFI.getStackGrowthDirection() ==
TargetFrameLowering::StackGrowsDown;
return StackGrowsDown ? A->getIndex() > B->getIndex()
: A->getIndex() < B->getIndex();
}
llvm_unreachable("MemOpClusterMutation only supports register or frame "
"index bases.");
}
bool operator<(const MemOpInfo &RHS) const {
// FIXME: Don't compare everything twice. Maybe use C++20 three way
// comparison instead when it's available.
if (std::lexicographical_compare(BaseOps.begin(), BaseOps.end(),
RHS.BaseOps.begin(), RHS.BaseOps.end(),
Compare))
return true;
if (std::lexicographical_compare(RHS.BaseOps.begin(), RHS.BaseOps.end(),
BaseOps.begin(), BaseOps.end(), Compare))
return false;
if (Offset != RHS.Offset)
return Offset < RHS.Offset;
return SU->NodeNum < RHS.SU->NodeNum;
}
};
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
bool IsLoad;
public:
BaseMemOpClusterMutation(const TargetInstrInfo *tii,
const TargetRegisterInfo *tri, bool IsLoad)
: TII(tii), TRI(tri), IsLoad(IsLoad) {}
void apply(ScheduleDAGInstrs *DAGInstrs) override;
protected:
void clusterNeighboringMemOps(ArrayRef<MemOpInfo> MemOps, bool FastCluster,
ScheduleDAGInstrs *DAG);
void collectMemOpRecords(std::vector<SUnit> &SUnits,
SmallVectorImpl<MemOpInfo> &MemOpRecords);
bool groupMemOps(ArrayRef<MemOpInfo> MemOps, ScheduleDAGInstrs *DAG,
DenseMap<unsigned, SmallVector<MemOpInfo, 32>> &Groups);
};
class StoreClusterMutation : public BaseMemOpClusterMutation {
public:
StoreClusterMutation(const TargetInstrInfo *tii,
const TargetRegisterInfo *tri)
: BaseMemOpClusterMutation(tii, tri, false) {}
};
class LoadClusterMutation : public BaseMemOpClusterMutation {
public:
LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri)
: BaseMemOpClusterMutation(tii, tri, true) {}
};
} // end anonymous namespace
namespace llvm {
std::unique_ptr<ScheduleDAGMutation>
createLoadClusterDAGMutation(const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) {
return EnableMemOpCluster ? std::make_unique<LoadClusterMutation>(TII, TRI)
: nullptr;
}
std::unique_ptr<ScheduleDAGMutation>
createStoreClusterDAGMutation(const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) {
return EnableMemOpCluster ? std::make_unique<StoreClusterMutation>(TII, TRI)
: nullptr;
}
} // end namespace llvm
// Sorting all the loads/stores first, then for each load/store, checking the
// following load/store one by one, until reach the first non-dependent one and
// call target hook to see if they can cluster.
// If FastCluster is enabled, we assume that, all the loads/stores have been
// preprocessed and now, they didn't have dependencies on each other.
void BaseMemOpClusterMutation::clusterNeighboringMemOps(
ArrayRef<MemOpInfo> MemOpRecords, bool FastCluster,
ScheduleDAGInstrs *DAG) {
// Keep track of the current cluster length and bytes for each SUnit.
DenseMap<unsigned, std::pair<unsigned, unsigned>> SUnit2ClusterInfo;
// At this point, `MemOpRecords` array must hold atleast two mem ops. Try to
// cluster mem ops collected within `MemOpRecords` array.
for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
// Decision to cluster mem ops is taken based on target dependent logic
auto MemOpa = MemOpRecords[Idx];
// Seek for the next load/store to do the cluster.
unsigned NextIdx = Idx + 1;
for (; NextIdx < End; ++NextIdx)
// Skip if MemOpb has been clustered already or has dependency with
// MemOpa.
if (!SUnit2ClusterInfo.count(MemOpRecords[NextIdx].SU->NodeNum) &&
(FastCluster ||
(!DAG->IsReachable(MemOpRecords[NextIdx].SU, MemOpa.SU) &&
!DAG->IsReachable(MemOpa.SU, MemOpRecords[NextIdx].SU))))
break;
if (NextIdx == End)
continue;
auto MemOpb = MemOpRecords[NextIdx];
unsigned ClusterLength = 2;
unsigned CurrentClusterBytes = MemOpa.Width + MemOpb.Width;
if (SUnit2ClusterInfo.count(MemOpa.SU->NodeNum)) {
ClusterLength = SUnit2ClusterInfo[MemOpa.SU->NodeNum].first + 1;
CurrentClusterBytes =
SUnit2ClusterInfo[MemOpa.SU->NodeNum].second + MemOpb.Width;
}
if (!TII->shouldClusterMemOps(MemOpa.BaseOps, MemOpb.BaseOps, ClusterLength,
CurrentClusterBytes))
continue;
SUnit *SUa = MemOpa.SU;
SUnit *SUb = MemOpb.SU;
if (SUa->NodeNum > SUb->NodeNum)
std::swap(SUa, SUb);
// FIXME: Is this check really required?
if (!DAG->addEdge(SUb, SDep(SUa, SDep::Cluster)))
continue;
LLVM_DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
<< SUb->NodeNum << ")\n");
++NumClustered;
if (IsLoad) {
// Copy successor edges from SUa to SUb. Interleaving computation
// dependent on SUa can prevent load combining due to register reuse.
// Predecessor edges do not need to be copied from SUb to SUa since
// nearby loads should have effectively the same inputs.
for (const SDep &Succ : SUa->Succs) {
if (Succ.getSUnit() == SUb)
continue;
LLVM_DEBUG(dbgs() << " Copy Succ SU(" << Succ.getSUnit()->NodeNum
<< ")\n");
DAG->addEdge(Succ.getSUnit(), SDep(SUb, SDep::Artificial));
}
} else {
// Copy predecessor edges from SUb to SUa to avoid the SUnits that
// SUb dependent on scheduled in-between SUb and SUa. Successor edges
// do not need to be copied from SUa to SUb since no one will depend
// on stores.
// Notice that, we don't need to care about the memory dependency as
// we won't try to cluster them if they have any memory dependency.
for (const SDep &Pred : SUb->Preds) {
if (Pred.getSUnit() == SUa)
continue;
LLVM_DEBUG(dbgs() << " Copy Pred SU(" << Pred.getSUnit()->NodeNum
<< ")\n");
DAG->addEdge(SUa, SDep(Pred.getSUnit(), SDep::Artificial));
}
}
SUnit2ClusterInfo[MemOpb.SU->NodeNum] = {ClusterLength,
CurrentClusterBytes};
LLVM_DEBUG(dbgs() << " Curr cluster length: " << ClusterLength
<< ", Curr cluster bytes: " << CurrentClusterBytes
<< "\n");
}
}
void BaseMemOpClusterMutation::collectMemOpRecords(
std::vector<SUnit> &SUnits, SmallVectorImpl<MemOpInfo> &MemOpRecords) {
for (auto &SU : SUnits) {
if ((IsLoad && !SU.getInstr()->mayLoad()) ||
(!IsLoad && !SU.getInstr()->mayStore()))
continue;
const MachineInstr &MI = *SU.getInstr();
SmallVector<const MachineOperand *, 4> BaseOps;
int64_t Offset;
bool OffsetIsScalable;
unsigned Width;
if (TII->getMemOperandsWithOffsetWidth(MI, BaseOps, Offset,
OffsetIsScalable, Width, TRI)) {
MemOpRecords.push_back(MemOpInfo(&SU, BaseOps, Offset, Width));
LLVM_DEBUG(dbgs() << "Num BaseOps: " << BaseOps.size() << ", Offset: "
<< Offset << ", OffsetIsScalable: " << OffsetIsScalable
<< ", Width: " << Width << "\n");
}
#ifndef NDEBUG
for (auto *Op : BaseOps)
assert(Op);
#endif
}
}
bool BaseMemOpClusterMutation::groupMemOps(
ArrayRef<MemOpInfo> MemOps, ScheduleDAGInstrs *DAG,
DenseMap<unsigned, SmallVector<MemOpInfo, 32>> &Groups) {
bool FastCluster =
ForceFastCluster ||
MemOps.size() * DAG->SUnits.size() / 1000 > FastClusterThreshold;
for (const auto &MemOp : MemOps) {
unsigned ChainPredID = DAG->SUnits.size();
if (FastCluster) {
for (const SDep &Pred : MemOp.SU->Preds) {
// We only want to cluster the mem ops that have the same ctrl(non-data)
// pred so that they didn't have ctrl dependency for each other. But for
// store instrs, we can still cluster them if the pred is load instr.
if ((Pred.isCtrl() &&
(IsLoad ||
(Pred.getSUnit() && Pred.getSUnit()->getInstr()->mayStore()))) &&
!Pred.isArtificial()) {
ChainPredID = Pred.getSUnit()->NodeNum;
break;
}
}
} else
ChainPredID = 0;
Groups[ChainPredID].push_back(MemOp);
}
return FastCluster;
}
/// Callback from DAG postProcessing to create cluster edges for loads/stores.
void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAG) {
// Collect all the clusterable loads/stores
SmallVector<MemOpInfo, 32> MemOpRecords;
collectMemOpRecords(DAG->SUnits, MemOpRecords);
if (MemOpRecords.size() < 2)
return;
// Put the loads/stores without dependency into the same group with some
// heuristic if the DAG is too complex to avoid compiling time blow up.
// Notice that, some fusion pair could be lost with this.
DenseMap<unsigned, SmallVector<MemOpInfo, 32>> Groups;
bool FastCluster = groupMemOps(MemOpRecords, DAG, Groups);
for (auto &Group : Groups) {
// Sorting the loads/stores, so that, we can stop the cluster as early as
// possible.
llvm::sort(Group.second);
// Trying to cluster all the neighboring loads/stores.
clusterNeighboringMemOps(Group.second, FastCluster, DAG);
}
}
//===----------------------------------------------------------------------===//
// CopyConstrain - DAG post-processing to encourage copy elimination.
//===----------------------------------------------------------------------===//
namespace {
/// Post-process the DAG to create weak edges from all uses of a copy to
/// the one use that defines the copy's source vreg, most likely an induction
/// variable increment.
class CopyConstrain : public ScheduleDAGMutation {
// Transient state.
SlotIndex RegionBeginIdx;
// RegionEndIdx is the slot index of the last non-debug instruction in the
// scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
SlotIndex RegionEndIdx;
public:
CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
void apply(ScheduleDAGInstrs *DAGInstrs) override;
protected:
void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
};
} // end anonymous namespace
namespace llvm {
std::unique_ptr<ScheduleDAGMutation>
createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) {
return std::make_unique<CopyConstrain>(TII, TRI);
}
} // end namespace llvm
/// constrainLocalCopy handles two possibilities:
/// 1) Local src:
/// I0: = dst
/// I1: src = ...
/// I2: = dst
/// I3: dst = src (copy)
/// (create pred->succ edges I0->I1, I2->I1)
///
/// 2) Local copy:
/// I0: dst = src (copy)
/// I1: = dst
/// I2: src = ...
/// I3: = dst
/// (create pred->succ edges I1->I2, I3->I2)
///
/// Although the MachineScheduler is currently constrained to single blocks,
/// this algorithm should handle extended blocks. An EBB is a set of
/// contiguously numbered blocks such that the previous block in the EBB is
/// always the single predecessor.
void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
LiveIntervals *LIS = DAG->getLIS();
MachineInstr *Copy = CopySU->getInstr();
// Check for pure vreg copies.
const MachineOperand &SrcOp = Copy->getOperand(1);
Register SrcReg = SrcOp.getReg();
if (!Register::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
return;
const MachineOperand &DstOp = Copy->getOperand(0);
Register DstReg = DstOp.getReg();
if (!Register::isVirtualRegister(DstReg) || DstOp.isDead())
return;
// Check if either the dest or source is local. If it's live across a back
// edge, it's not local. Note that if both vregs are live across the back
// edge, we cannot successfully contrain the copy without cyclic scheduling.
// If both the copy's source and dest are local live intervals, then we
// should treat the dest as the global for the purpose of adding
// constraints. This adds edges from source's other uses to the copy.
unsigned LocalReg = SrcReg;
unsigned GlobalReg = DstReg;
LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
LocalReg = DstReg;
GlobalReg = SrcReg;
LocalLI = &LIS->getInterval(LocalReg);
if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
return;
}
LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
// Find the global segment after the start of the local LI.
LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
// If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
// local live range. We could create edges from other global uses to the local
// start, but the coalescer should have already eliminated these cases, so
// don't bother dealing with it.
if (GlobalSegment == GlobalLI->end())
return;
// If GlobalSegment is killed at the LocalLI->start, the call to find()
// returned the next global segment. But if GlobalSegment overlaps with
// LocalLI->start, then advance to the next segment. If a hole in GlobalLI
// exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
if (GlobalSegment->contains(LocalLI->beginIndex()))
++GlobalSegment;
if (GlobalSegment == GlobalLI->end())
return;
// Check if GlobalLI contains a hole in the vicinity of LocalLI.
if (GlobalSegment != GlobalLI->begin()) {
// Two address defs have no hole.
if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
GlobalSegment->start)) {
return;
}
// If the prior global segment may be defined by the same two-address
// instruction that also defines LocalLI, then can't make a hole here.
if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
LocalLI->beginIndex())) {
return;
}
// If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
// it would be a disconnected component in the live range.
assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
"Disconnected LRG within the scheduling region.");
}
MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
if (!GlobalDef)
return;
SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
if (!GlobalSU)
return;
// GlobalDef is the bottom of the GlobalLI hole. Open the hole by
// constraining the uses of the last local def to precede GlobalDef.
SmallVector<SUnit*,8> LocalUses;
const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
for (const SDep &Succ : LastLocalSU->Succs) {
if (Succ.getKind() != SDep::Data || Succ.getReg() != LocalReg)
continue;
if (Succ.getSUnit() == GlobalSU)
continue;
if (!DAG->canAddEdge(GlobalSU, Succ.getSUnit()))
return;
LocalUses.push_back(Succ.getSUnit());
}
// Open the top of the GlobalLI hole by constraining any earlier global uses
// to precede the start of LocalLI.
SmallVector<SUnit*,8> GlobalUses;
MachineInstr *FirstLocalDef =
LIS->getInstructionFromIndex(LocalLI->beginIndex());
SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
for (const SDep &Pred : GlobalSU->Preds) {
if (Pred.getKind() != SDep::Anti || Pred.getReg() != GlobalReg)
continue;
if (Pred.getSUnit() == FirstLocalSU)
continue;
if (!DAG->canAddEdge(FirstLocalSU, Pred.getSUnit()))
return;
GlobalUses.push_back(Pred.getSUnit());
}
LLVM_DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
// Add the weak edges.
for (SUnit *LU : LocalUses) {
LLVM_DEBUG(dbgs() << " Local use SU(" << LU->NodeNum << ") -> SU("
<< GlobalSU->NodeNum << ")\n");
DAG->addEdge(GlobalSU, SDep(LU, SDep::Weak));
}
for (SUnit *GU : GlobalUses) {
LLVM_DEBUG(dbgs() << " Global use SU(" << GU->NodeNum << ") -> SU("
<< FirstLocalSU->NodeNum << ")\n");
DAG->addEdge(FirstLocalSU, SDep(GU, SDep::Weak));
}
}
/// Callback from DAG postProcessing to create weak edges to encourage
/// copy elimination.
void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) {
ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
if (FirstPos == DAG->end())
return;
RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos);
RegionEndIdx = DAG->getLIS()->getInstructionIndex(
*priorNonDebug(DAG->end(), DAG->begin()));
for (SUnit &SU : DAG->SUnits) {
if (!SU.getInstr()->isCopy())
continue;
constrainLocalCopy(&SU, static_cast<ScheduleDAGMILive*>(DAG));
}
}
//===----------------------------------------------------------------------===//
// MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
// and possibly other custom schedulers.
//===----------------------------------------------------------------------===//
static const unsigned InvalidCycle = ~0U;
SchedBoundary::~SchedBoundary() { delete HazardRec; }
/// Given a Count of resource usage and a Latency value, return true if a
/// SchedBoundary becomes resource limited.
/// If we are checking after scheduling a node, we should return true when
/// we just reach the resource limit.
static bool checkResourceLimit(unsigned LFactor, unsigned Count,
unsigned Latency, bool AfterSchedNode) {
int ResCntFactor = (int)(Count - (Latency * LFactor));
if (AfterSchedNode)
return ResCntFactor >= (int)LFactor;
else
return ResCntFactor > (int)LFactor;
}
void SchedBoundary::reset() {
// A new HazardRec is created for each DAG and owned by SchedBoundary.
// Destroying and reconstructing it is very expensive though. So keep
// invalid, placeholder HazardRecs.
if (HazardRec && HazardRec->isEnabled()) {
delete HazardRec;
HazardRec = nullptr;
}
Available.clear();
Pending.clear();
CheckPending = false;
CurrCycle = 0;
CurrMOps = 0;
MinReadyCycle = std::numeric_limits<unsigned>::max();
ExpectedLatency = 0;
DependentLatency = 0;
RetiredMOps = 0;
MaxExecutedResCount = 0;
ZoneCritResIdx = 0;
IsResourceLimited = false;
ReservedCycles.clear();
ReservedCyclesIndex.clear();
ResourceGroupSubUnitMasks.clear();
#ifndef NDEBUG
// Track the maximum number of stall cycles that could arise either from the
// latency of a DAG edge or the number of cycles that a processor resource is
// reserved (SchedBoundary::ReservedCycles).
MaxObservedStall = 0;
#endif
// Reserve a zero-count for invalid CritResIdx.
ExecutedResCounts.resize(1);
assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
}
void SchedRemainder::
init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
reset();
if (!SchedModel->hasInstrSchedModel())
return;
RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
for (SUnit &SU : DAG->SUnits) {
const MCSchedClassDesc *SC = DAG->getSchedClass(&SU);
RemIssueCount += SchedModel->getNumMicroOps(SU.getInstr(), SC)
* SchedModel->getMicroOpFactor();
for (TargetSchedModel::ProcResIter
PI = SchedModel->getWriteProcResBegin(SC),
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
unsigned PIdx = PI->ProcResourceIdx;
unsigned Factor = SchedModel->getResourceFactor(PIdx);
RemainingCounts[PIdx] += (Factor * PI->Cycles);
}
}
}
void SchedBoundary::
init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
reset();
DAG = dag;
SchedModel = smodel;
Rem = rem;
if (SchedModel->hasInstrSchedModel()) {
unsigned ResourceCount = SchedModel->getNumProcResourceKinds();
ReservedCyclesIndex.resize(ResourceCount);
ExecutedResCounts.resize(ResourceCount);
ResourceGroupSubUnitMasks.resize(ResourceCount, APInt(ResourceCount, 0));
unsigned NumUnits = 0;
for (unsigned i = 0; i < ResourceCount; ++i) {
ReservedCyclesIndex[i] = NumUnits;
NumUnits += SchedModel->getProcResource(i)->NumUnits;
if (isUnbufferedGroup(i)) {
auto SubUnits = SchedModel->getProcResource(i)->SubUnitsIdxBegin;
for (unsigned U = 0, UE = SchedModel->getProcResource(i)->NumUnits;
U != UE; ++U)
ResourceGroupSubUnitMasks[i].setBit(SubUnits[U]);
}
}
ReservedCycles.resize(NumUnits, InvalidCycle);
}
}
/// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
/// these "soft stalls" differently than the hard stall cycles based on CPU
/// resources and computed by checkHazard(). A fully in-order model
/// (MicroOpBufferSize==0) will not make use of this since instructions are not
/// available for scheduling until they are ready. However, a weaker in-order
/// model may use this for heuristics. For example, if a processor has in-order
/// behavior when reading certain resources, this may come into play.
unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
if (!SU->isUnbuffered)
return 0;
unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
if (ReadyCycle > CurrCycle)
return ReadyCycle - CurrCycle;
return 0;
}
/// Compute the next cycle at which the given processor resource unit
/// can be scheduled.
unsigned SchedBoundary::getNextResourceCycleByInstance(unsigned InstanceIdx,
unsigned Cycles) {
unsigned NextUnreserved = ReservedCycles[InstanceIdx];
// If this resource has never been used, always return cycle zero.
if (NextUnreserved == InvalidCycle)
return 0;
// For bottom-up scheduling add the cycles needed for the current operation.
if (!isTop())
NextUnreserved += Cycles;
return NextUnreserved;
}
/// Compute the next cycle at which the given processor resource can be
/// scheduled. Returns the next cycle and the index of the processor resource
/// instance in the reserved cycles vector.
std::pair<unsigned, unsigned>
SchedBoundary::getNextResourceCycle(const MCSchedClassDesc *SC, unsigned PIdx,
unsigned Cycles) {
unsigned MinNextUnreserved = InvalidCycle;
unsigned InstanceIdx = 0;
unsigned StartIndex = ReservedCyclesIndex[PIdx];
unsigned NumberOfInstances = SchedModel->getProcResource(PIdx)->NumUnits;
assert(NumberOfInstances > 0 &&
"Cannot have zero instances of a ProcResource");
if (isUnbufferedGroup(PIdx)) {
// If any subunits are used by the instruction, report that the resource
// group is available at 0, effectively removing the group record from
// hazarding and basing the hazarding decisions on the subunit records.
// Otherwise, choose the first available instance from among the subunits.
// Specifications which assign cycles to both the subunits and the group or
// which use an unbuffered group with buffered subunits will appear to
// schedule strangely. In the first case, the additional cycles for the
// group will be ignored. In the second, the group will be ignored
// entirely.
for (const MCWriteProcResEntry &PE :
make_range(SchedModel->getWriteProcResBegin(SC),
SchedModel->getWriteProcResEnd(SC)))
if (ResourceGroupSubUnitMasks[PIdx][PE.ProcResourceIdx])
return std::make_pair(0u, StartIndex);
auto SubUnits = SchedModel->getProcResource(PIdx)->SubUnitsIdxBegin;
for (unsigned I = 0, End = NumberOfInstances; I < End; ++I) {
unsigned NextUnreserved, NextInstanceIdx;
std::tie(NextUnreserved, NextInstanceIdx) =
getNextResourceCycle(SC, SubUnits[I], Cycles);
if (MinNextUnreserved > NextUnreserved) {
InstanceIdx = NextInstanceIdx;
MinNextUnreserved = NextUnreserved;
}
}
return std::make_pair(MinNextUnreserved, InstanceIdx);
}
for (unsigned I = StartIndex, End = StartIndex + NumberOfInstances; I < End;
++I) {
unsigned NextUnreserved = getNextResourceCycleByInstance(I, Cycles);
if (MinNextUnreserved > NextUnreserved) {
InstanceIdx = I;
MinNextUnreserved = NextUnreserved;
}
}
return std::make_pair(MinNextUnreserved, InstanceIdx);
}
/// Does this SU have a hazard within the current instruction group.
///
/// The scheduler supports two modes of hazard recognition. The first is the
/// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
/// supports highly complicated in-order reservation tables
/// (ScoreboardHazardRecognizer) and arbitrary target-specific logic.
///
/// The second is a streamlined mechanism that checks for hazards based on
/// simple counters that the scheduler itself maintains. It explicitly checks
/// for instruction dispatch limitations, including the number of micro-ops that
/// can dispatch per cycle.
///
/// TODO: Also check whether the SU must start a new group.
bool SchedBoundary::checkHazard(SUnit *SU) {
if (HazardRec->isEnabled()
&& HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
return true;
}
unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops="
<< SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
return true;
}
if (CurrMOps > 0 &&
((isTop() && SchedModel->mustBeginGroup(SU->getInstr())) ||
(!isTop() && SchedModel->mustEndGroup(SU->getInstr())))) {
LLVM_DEBUG(dbgs() << " hazard: SU(" << SU->NodeNum << ") must "
<< (isTop() ? "begin" : "end") << " group\n");
return true;
}
if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
for (const MCWriteProcResEntry &PE :
make_range(SchedModel->getWriteProcResBegin(SC),
SchedModel->getWriteProcResEnd(SC))) {
unsigned ResIdx = PE.ProcResourceIdx;
unsigned Cycles = PE.Cycles;
unsigned NRCycle, InstanceIdx;
std::tie(NRCycle, InstanceIdx) = getNextResourceCycle(SC, ResIdx, Cycles);
if (NRCycle > CurrCycle) {
#ifndef NDEBUG
MaxObservedStall = std::max(Cycles, MaxObservedStall);
#endif
LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") "
<< SchedModel->getResourceName(ResIdx)
<< '[' << InstanceIdx - ReservedCyclesIndex[ResIdx] << ']'
<< "=" << NRCycle << "c\n");
return true;
}
}
}
return false;
}
// Find the unscheduled node in ReadySUs with the highest latency.
unsigned SchedBoundary::
findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
SUnit *LateSU = nullptr;
unsigned RemLatency = 0;
for (SUnit *SU : ReadySUs) {
unsigned L = getUnscheduledLatency(SU);
if (L > RemLatency) {
RemLatency = L;
LateSU = SU;
}
}
if (LateSU) {
LLVM_DEBUG(dbgs() << Available.getName() << " RemLatency SU("
<< LateSU->NodeNum << ") " << RemLatency << "c\n");
}
return RemLatency;
}
// Count resources in this zone and the remaining unscheduled
// instruction. Return the max count, scaled. Set OtherCritIdx to the critical
// resource index, or zero if the zone is issue limited.
unsigned SchedBoundary::
getOtherResourceCount(unsigned &OtherCritIdx) {
OtherCritIdx = 0;
if (!SchedModel->hasInstrSchedModel())
return 0;
unsigned OtherCritCount = Rem->RemIssueCount
+ (RetiredMOps * SchedModel->getMicroOpFactor());
LLVM_DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: "
<< OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
PIdx != PEnd; ++PIdx) {
unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
if (OtherCount > OtherCritCount) {
OtherCritCount = OtherCount;
OtherCritIdx = PIdx;
}
}
if (OtherCritIdx) {
LLVM_DEBUG(
dbgs() << " " << Available.getName() << " + Remain CritRes: "
<< OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
<< " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
}
return OtherCritCount;
}
void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue,
unsigned Idx) {
assert(SU->getInstr() && "Scheduled SUnit must have instr");
#ifndef NDEBUG
// ReadyCycle was been bumped up to the CurrCycle when this node was
// scheduled, but CurrCycle may have been eagerly advanced immediately after
// scheduling, so may now be greater than ReadyCycle.
if (ReadyCycle > CurrCycle)
MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
#endif
if (ReadyCycle < MinReadyCycle)
MinReadyCycle = ReadyCycle;
// Check for interlocks first. For the purpose of other heuristics, an
// instruction that cannot issue appears as if it's not in the ReadyQueue.
bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
bool HazardDetected = (!IsBuffered && ReadyCycle > CurrCycle) ||
checkHazard(SU) || (Available.size() >= ReadyListLimit);
if (!HazardDetected) {
Available.push(SU);
if (InPQueue)
Pending.remove(Pending.begin() + Idx);
return;
}
if (!InPQueue)
Pending.push(SU);
}
/// Move the boundary of scheduled code by one cycle.
void SchedBoundary::bumpCycle(unsigned NextCycle) {
if (SchedModel->getMicroOpBufferSize() == 0) {
assert(MinReadyCycle < std::numeric_limits<unsigned>::max() &&
"MinReadyCycle uninitialized");
if (MinReadyCycle > NextCycle)
NextCycle = MinReadyCycle;
}
// Update the current micro-ops, which will issue in the next cycle.
unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
// Decrement DependentLatency based on the next cycle.
if ((NextCycle - CurrCycle) > DependentLatency)
DependentLatency = 0;
else
DependentLatency -= (NextCycle - CurrCycle);
if (!HazardRec->isEnabled()) {
// Bypass HazardRec virtual calls.
CurrCycle = NextCycle;
} else {
// Bypass getHazardType calls in case of long latency.
for (; CurrCycle != NextCycle; ++CurrCycle) {
if (isTop())
HazardRec->AdvanceCycle();
else
HazardRec->RecedeCycle();
}
}
CheckPending = true;
IsResourceLimited =
checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(),
getScheduledLatency(), true);
LLVM_DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName()
<< '\n');
}
void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
ExecutedResCounts[PIdx] += Count;
if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
MaxExecutedResCount = ExecutedResCounts[PIdx];
}
/// Add the given processor resource to this scheduled zone.
///
/// \param Cycles indicates the number of consecutive (non-pipelined) cycles
/// during which this resource is consumed.
///
/// \return the next cycle at which the instruction may execute without
/// oversubscribing resources.
unsigned SchedBoundary::countResource(const MCSchedClassDesc *SC, unsigned PIdx,
unsigned Cycles, unsigned NextCycle) {
unsigned Factor = SchedModel->getResourceFactor(PIdx);
unsigned Count = Factor * Cycles;
LLVM_DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx) << " +"
<< Cycles << "x" << Factor << "u\n");
// Update Executed resources counts.
incExecutedResources(PIdx, Count);
assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
Rem->RemainingCounts[PIdx] -= Count;
// Check if this resource exceeds the current critical resource. If so, it
// becomes the critical resource.
if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
ZoneCritResIdx = PIdx;
LLVM_DEBUG(dbgs() << " *** Critical resource "
<< SchedModel->getResourceName(PIdx) << ": "
<< getResourceCount(PIdx) / SchedModel->getLatencyFactor()
<< "c\n");
}
// For reserved resources, record the highest cycle using the resource.
unsigned NextAvailable, InstanceIdx;
std::tie(NextAvailable, InstanceIdx) = getNextResourceCycle(SC, PIdx, Cycles);
if (NextAvailable > CurrCycle) {
LLVM_DEBUG(dbgs() << " Resource conflict: "
<< SchedModel->getResourceName(PIdx)
<< '[' << InstanceIdx - ReservedCyclesIndex[PIdx] << ']'
<< " reserved until @" << NextAvailable << "\n");
}
return NextAvailable;
}
/// Move the boundary of scheduled code by one SUnit.
void SchedBoundary::bumpNode(SUnit *SU) {
// Update the reservation table.
if (HazardRec->isEnabled()) {
if (!isTop() && SU->isCall) {
// Calls are scheduled with their preceding instructions. For bottom-up
// scheduling, clear the pipeline state before emitting.
HazardRec->Reset();
}
HazardRec->EmitInstruction(SU);
// Scheduling an instruction may have made pending instructions available.
CheckPending = true;
}
// checkHazard should prevent scheduling multiple instructions per cycle that
// exceed the issue width.
const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
assert(
(CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
"Cannot schedule this instruction's MicroOps in the current cycle.");
unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
LLVM_DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n");
unsigned NextCycle = CurrCycle;
switch (SchedModel->getMicroOpBufferSize()) {
case 0:
assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
break;
case 1:
if (ReadyCycle > NextCycle) {
NextCycle = ReadyCycle;
LLVM_DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n");
}
break;
default:
// We don't currently model the OOO reorder buffer, so consider all
// scheduled MOps to be "retired". We do loosely model in-order resource
// latency. If this instruction uses an in-order resource, account for any
// likely stall cycles.
if (SU->isUnbuffered && ReadyCycle > NextCycle)
NextCycle = ReadyCycle;
break;
}
RetiredMOps += IncMOps;
// Update resource counts and critical resource.
if (SchedModel->hasInstrSchedModel()) {
unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
Rem->RemIssueCount -= DecRemIssue;
if (ZoneCritResIdx) {
// Scale scheduled micro-ops for comparing with the critical resource.
unsigned ScaledMOps =
RetiredMOps * SchedModel->getMicroOpFactor();
// If scaled micro-ops are now more than the previous critical resource by
// a full cycle, then micro-ops issue becomes critical.
if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
>= (int)SchedModel->getLatencyFactor()) {
ZoneCritResIdx = 0;
LLVM_DEBUG(dbgs() << " *** Critical resource NumMicroOps: "
<< ScaledMOps / SchedModel->getLatencyFactor()
<< "c\n");
}
}
for (TargetSchedModel::ProcResIter
PI = SchedModel->getWriteProcResBegin(SC),
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
unsigned RCycle =
countResource(SC, PI->ProcResourceIdx, PI->Cycles, NextCycle);
if (RCycle > NextCycle)
NextCycle = RCycle;
}
if (SU->hasReservedResource) {
// For reserved resources, record the highest cycle using the resource.
// For top-down scheduling, this is the cycle in which we schedule this
// instruction plus the number of cycles the operations reserves the
// resource. For bottom-up is it simply the instruction's cycle.
for (TargetSchedModel::ProcResIter
PI = SchedModel->getWriteProcResBegin(SC),
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
unsigned PIdx = PI->ProcResourceIdx;
if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
unsigned ReservedUntil, InstanceIdx;
std::tie(ReservedUntil, InstanceIdx) =
getNextResourceCycle(SC, PIdx, 0);
if (isTop()) {
ReservedCycles[InstanceIdx] =
std::max(ReservedUntil, NextCycle + PI->Cycles);
} else
ReservedCycles[InstanceIdx] = NextCycle;
}
}
}
}
// Update ExpectedLatency and DependentLatency.
unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
if (SU->getDepth() > TopLatency) {
TopLatency = SU->getDepth();
LLVM_DEBUG(dbgs() << " " << Available.getName() << " TopLatency SU("
<< SU->NodeNum << ") " << TopLatency << "c\n");
}
if (SU->getHeight() > BotLatency) {
BotLatency = SU->getHeight();
LLVM_DEBUG(dbgs() << " " << Available.getName() << " BotLatency SU("
<< SU->NodeNum << ") " << BotLatency << "c\n");
}
// If we stall for any reason, bump the cycle.
if (NextCycle > CurrCycle)
bumpCycle(NextCycle);
else
// After updating ZoneCritResIdx and ExpectedLatency, check if we're
// resource limited. If a stall occurred, bumpCycle does this.
IsResourceLimited =
checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(),
getScheduledLatency(), true);
// Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
// resets CurrMOps. Loop to handle instructions with more MOps than issue in
// one cycle. Since we commonly reach the max MOps here, opportunistically
// bump the cycle to avoid uselessly checking everything in the readyQ.
CurrMOps += IncMOps;
// Bump the cycle count for issue group constraints.
// This must be done after NextCycle has been adjust for all other stalls.
// Calling bumpCycle(X) will reduce CurrMOps by one issue group and set
// currCycle to X.
if ((isTop() && SchedModel->mustEndGroup(SU->getInstr())) ||
(!isTop() && SchedModel->mustBeginGroup(SU->getInstr()))) {
LLVM_DEBUG(dbgs() << " Bump cycle to " << (isTop() ? "end" : "begin")
<< " group\n");
bumpCycle(++NextCycle);
}
while (CurrMOps >= SchedModel->getIssueWidth()) {
LLVM_DEBUG(dbgs() << " *** Max MOps " << CurrMOps << " at cycle "
<< CurrCycle << '\n');
bumpCycle(++NextCycle);
}
LLVM_DEBUG(dumpScheduledState());
}
/// Release pending ready nodes in to the available queue. This makes them
/// visible to heuristics.
void SchedBoundary::releasePending() {
// If the available queue is empty, it is safe to reset MinReadyCycle.
if (Available.empty())
MinReadyCycle = std::numeric_limits<unsigned>::max();
// Check to see if any of the pending instructions are ready to issue. If
// so, add them to the available queue.
for (unsigned I = 0, E = Pending.size(); I < E; ++I) {
SUnit *SU = *(Pending.begin() + I);
unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
if (ReadyCycle < MinReadyCycle)
MinReadyCycle = ReadyCycle;
if (Available.size() >= ReadyListLimit)
break;
releaseNode(SU, ReadyCycle, true, I);
if (E != Pending.size()) {
--I;
--E;
}
}
CheckPending = false;
}
/// Remove SU from the ready set for this boundary.
void SchedBoundary::removeReady(SUnit *SU) {
if (Available.isInQueue(SU))
Available.remove(Available.find(SU));
else {
assert(Pending.isInQueue(SU) && "bad ready count");
Pending.remove(Pending.find(SU));
}
}
/// If this queue only has one ready candidate, return it. As a side effect,
/// defer any nodes that now hit a hazard, and advance the cycle until at least
/// one node is ready. If multiple instructions are ready, return NULL.
SUnit *SchedBoundary::pickOnlyChoice() {
if (CheckPending)
releasePending();
// Defer any ready instrs that now have a hazard.
for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
if (checkHazard(*I)) {
Pending.push(*I);
I = Available.remove(I);
continue;
}
++I;
}
for (unsigned i = 0; Available.empty(); ++i) {
// FIXME: Re-enable assert once PR20057 is resolved.
// assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
// "permanent hazard");
(void)i;
bumpCycle(CurrCycle + 1);
releasePending();
}
LLVM_DEBUG(Pending.dump());
LLVM_DEBUG(Available.dump());
if (Available.size() == 1)
return *Available.begin();
return nullptr;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
// This is useful information to dump after bumpNode.
// Note that the Queue contents are more useful before pickNodeFromQueue.
LLVM_DUMP_METHOD void SchedBoundary::dumpScheduledState() const {
unsigned ResFactor;
unsigned ResCount;
if (ZoneCritResIdx) {
ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
ResCount = getResourceCount(ZoneCritResIdx);
} else {
ResFactor = SchedModel->getMicroOpFactor();
ResCount = RetiredMOps * ResFactor;
}
unsigned LFactor = SchedModel->getLatencyFactor();
dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
<< " Retired: " << RetiredMOps;
dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c";
dbgs() << "\n Critical: " << ResCount / LFactor << "c, "
<< ResCount / ResFactor << " "
<< SchedModel->getResourceName(ZoneCritResIdx)
<< "\n ExpectedLatency: " << ExpectedLatency << "c\n"
<< (IsResourceLimited ? " - Resource" : " - Latency")
<< " limited.\n";
}
#endif
//===----------------------------------------------------------------------===//
// GenericScheduler - Generic implementation of MachineSchedStrategy.
//===----------------------------------------------------------------------===//
void GenericSchedulerBase::SchedCandidate::
initResourceDelta(const ScheduleDAGMI *DAG,
const TargetSchedModel *SchedModel) {
if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
return;
const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
for (TargetSchedModel::ProcResIter
PI = SchedModel->getWriteProcResBegin(SC),
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
if (PI->ProcResourceIdx == Policy.ReduceResIdx)
ResDelta.CritResources += PI->Cycles;
if (PI->ProcResourceIdx == Policy.DemandResIdx)
ResDelta.DemandedResources += PI->Cycles;
}
}
/// Compute remaining latency. We need this both to determine whether the
/// overall schedule has become latency-limited and whether the instructions
/// outside this zone are resource or latency limited.
///
/// The "dependent" latency is updated incrementally during scheduling as the
/// max height/depth of scheduled nodes minus the cycles since it was
/// scheduled:
/// DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
///
/// The "independent" latency is the max ready queue depth:
/// ILat = max N.depth for N in Available|Pending
///
/// RemainingLatency is the greater of independent and dependent latency.
///
/// These computations are expensive, especially in DAGs with many edges, so
/// only do them if necessary.
static unsigned computeRemLatency(SchedBoundary &CurrZone) {
unsigned RemLatency = CurrZone.getDependentLatency();
RemLatency = std::max(RemLatency,
CurrZone.findMaxLatency(CurrZone.Available.elements()));
RemLatency = std::max(RemLatency,
CurrZone.findMaxLatency(CurrZone.Pending.elements()));
return RemLatency;
}
/// Returns true if the current cycle plus remaning latency is greater than
/// the critical path in the scheduling region.
bool GenericSchedulerBase::shouldReduceLatency(const CandPolicy &Policy,
SchedBoundary &CurrZone,
bool ComputeRemLatency,
unsigned &RemLatency) const {
// The current cycle is already greater than the critical path, so we are
// already latency limited and don't need to compute the remaining latency.
if (CurrZone.getCurrCycle() > Rem.CriticalPath)
return true;
// If we haven't scheduled anything yet, then we aren't latency limited.
if (CurrZone.getCurrCycle() == 0)
return false;
if (ComputeRemLatency)
RemLatency = computeRemLatency(CurrZone);
return RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath;
}
/// Set the CandPolicy given a scheduling zone given the current resources and
/// latencies inside and outside the zone.
void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA,
SchedBoundary &CurrZone,
SchedBoundary *OtherZone) {
// Apply preemptive heuristics based on the total latency and resources
// inside and outside this zone. Potential stalls should be considered before
// following this policy.
// Compute the critical resource outside the zone.
unsigned OtherCritIdx = 0;
unsigned OtherCount =
OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
bool OtherResLimited = false;
unsigned RemLatency = 0;
bool RemLatencyComputed = false;
if (SchedModel->hasInstrSchedModel() && OtherCount != 0) {
RemLatency = computeRemLatency(CurrZone);
RemLatencyComputed = true;
OtherResLimited = checkResourceLimit(SchedModel->getLatencyFactor(),
OtherCount, RemLatency, false);
}
// Schedule aggressively for latency in PostRA mode. We don't check for
// acyclic latency during PostRA, and highly out-of-order processors will
// skip PostRA scheduling.
if (!OtherResLimited &&
(IsPostRA || shouldReduceLatency(Policy, CurrZone, !RemLatencyComputed,
RemLatency))) {
Policy.ReduceLatency |= true;
LLVM_DEBUG(dbgs() << " " << CurrZone.Available.getName()
<< " RemainingLatency " << RemLatency << " + "
<< CurrZone.getCurrCycle() << "c > CritPath "
<< Rem.CriticalPath << "\n");
}
// If the same resource is limiting inside and outside the zone, do nothing.
if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
return;
LLVM_DEBUG(if (CurrZone.isResourceLimited()) {
dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: "
<< SchedModel->getResourceName(CurrZone.getZoneCritResIdx()) << "\n";
} if (OtherResLimited) dbgs()
<< " RemainingLimit: "
<< SchedModel->getResourceName(OtherCritIdx) << "\n";
if (!CurrZone.isResourceLimited() && !OtherResLimited) dbgs()
<< " Latency limited both directions.\n");
if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
if (OtherResLimited)
Policy.DemandResIdx = OtherCritIdx;
}
#ifndef NDEBUG
const char *GenericSchedulerBase::getReasonStr(
GenericSchedulerBase::CandReason Reason) {
switch (Reason) {
case NoCand: return "NOCAND ";
case Only1: return "ONLY1 ";
case PhysReg: return "PHYS-REG ";
case RegExcess: return "REG-EXCESS";
case RegCritical: return "REG-CRIT ";
case Stall: return "STALL ";
case Cluster: return "CLUSTER ";
case Weak: return "WEAK ";
case RegMax: return "REG-MAX ";
case ResourceReduce: return "RES-REDUCE";
case ResourceDemand: return "RES-DEMAND";
case TopDepthReduce: return "TOP-DEPTH ";
case TopPathReduce: return "TOP-PATH "<