MachineFunction: Return reference from getFunction(); NFC
The Function can never be nullptr so we can return a reference.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@320884 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
index 1135f0f..38a7e33 100644
--- a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
+++ b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
@@ -308,7 +308,7 @@
//===----------------------------------------------------------------------===//
bool AArch64A57FPLoadBalancing::runOnMachineFunction(MachineFunction &F) {
- if (skipFunction(*F.getFunction()))
+ if (skipFunction(F.getFunction()))
return false;
if (!F.getSubtarget<AArch64Subtarget>().balanceFPOps())
diff --git a/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp b/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
index 7da56ef..338daec 100644
--- a/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
+++ b/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
@@ -393,7 +393,7 @@
bool Changed = false;
DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n");
- if (skipFunction(*mf.getFunction()))
+ if (skipFunction(mf.getFunction()))
return false;
MRI = &mf.getRegInfo();
diff --git a/lib/Target/AArch64/AArch64CallLowering.cpp b/lib/Target/AArch64/AArch64CallLowering.cpp
index 8383058..08152c0 100644
--- a/lib/Target/AArch64/AArch64CallLowering.cpp
+++ b/lib/Target/AArch64/AArch64CallLowering.cpp
@@ -220,7 +220,7 @@
bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
const Value *Val, unsigned VReg) const {
MachineFunction &MF = MIRBuilder.getMF();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
assert(((Val && VReg) || (!Val && !VReg)) && "Return value without a vreg");
@@ -322,7 +322,7 @@
const ArgInfo &OrigRet,
ArrayRef<ArgInfo> OrigArgs) const {
MachineFunction &MF = MIRBuilder.getMF();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
MachineRegisterInfo &MRI = MF.getRegInfo();
auto &DL = F.getParent()->getDataLayout();
diff --git a/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp b/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
index bb750c5..b88fba4 100644
--- a/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
+++ b/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
@@ -42,7 +42,7 @@
}
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
diff --git a/lib/Target/AArch64/AArch64CollectLOH.cpp b/lib/Target/AArch64/AArch64CollectLOH.cpp
index d9d48e9..0a9167e 100644
--- a/lib/Target/AArch64/AArch64CollectLOH.cpp
+++ b/lib/Target/AArch64/AArch64CollectLOH.cpp
@@ -482,7 +482,7 @@
}
bool AArch64CollectLOH::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
DEBUG(dbgs() << "********** AArch64 Collect LOH **********\n"
diff --git a/lib/Target/AArch64/AArch64CondBrTuning.cpp b/lib/Target/AArch64/AArch64CondBrTuning.cpp
index 6fc5762..30cefba 100644
--- a/lib/Target/AArch64/AArch64CondBrTuning.cpp
+++ b/lib/Target/AArch64/AArch64CondBrTuning.cpp
@@ -290,7 +290,7 @@
}
bool AArch64CondBrTuning::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
DEBUG(dbgs() << "********** AArch64 Conditional Branch Tuning **********\n"
diff --git a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
index f765825..d14bde3 100644
--- a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
+++ b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
@@ -327,7 +327,7 @@
bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
<< "********** Function: " << MF.getName() << '\n');
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TII = MF.getSubtarget().getInstrInfo();
diff --git a/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/lib/Target/AArch64/AArch64ConditionalCompares.cpp
index f7c9711..b0bda7c 100644
--- a/lib/Target/AArch64/AArch64ConditionalCompares.cpp
+++ b/lib/Target/AArch64/AArch64ConditionalCompares.cpp
@@ -924,7 +924,7 @@
bool AArch64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
<< "********** Function: " << MF.getName() << '\n');
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TII = MF.getSubtarget().getInstrInfo();
@@ -936,7 +936,7 @@
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = nullptr;
- MinSize = MF.getFunction()->optForMinSize();
+ MinSize = MF.getFunction().optForMinSize();
bool Changed = false;
CmpConv.runOnMachineFunction(MF, MBPI);
diff --git a/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp b/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
index 0298c76..8e7e740 100644
--- a/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
+++ b/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
@@ -198,7 +198,7 @@
// Scan the function for instructions that have a dead definition of a
// register. Replace that register with the zero register when possible.
bool AArch64DeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TRI = MF.getSubtarget().getRegisterInfo();
diff --git a/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp b/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp
index 7b4ab7c..d1ddb2e 100644
--- a/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp
+++ b/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp
@@ -798,7 +798,7 @@
if (ST.getProcFamily() != AArch64Subtarget::Falkor)
return false;
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
TII = static_cast<const AArch64InstrInfo *>(ST.getInstrInfo());
diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp
index 72330d9..7394435 100644
--- a/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -174,7 +174,7 @@
return false;
// Don't use the red zone if the function explicitly asks us not to.
// This is typically used for kernel code.
- if (MF.getFunction()->hasFnAttribute(Attribute::NoRedZone))
+ if (MF.getFunction().hasFnAttribute(Attribute::NoRedZone))
return false;
const MachineFrameInfo &MFI = MF.getFrameInfo();
@@ -459,13 +459,13 @@
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
const MachineFrameInfo &MFI = MF.getFrameInfo();
- const Function *Fn = MF.getFunction();
+ const Function &F = MF.getFunction();
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineModuleInfo &MMI = MF.getMMI();
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
- bool needsFrameMoves = MMI.hasDebugInfo() || Fn->needsUnwindTableEntry();
+ bool needsFrameMoves = MMI.hasDebugInfo() || F.needsUnwindTableEntry();
bool HasFP = hasFP(MF);
// Debug location must be unknown since the first debug location is used
@@ -474,7 +474,7 @@
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
- if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;
int NumBytes = (int)MFI.getStackSize();
@@ -507,7 +507,7 @@
}
bool IsWin64 =
- Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv());
+ Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject;
@@ -716,7 +716,7 @@
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
- if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;
// Initial and residual are named for consistency with the prologue. Note that
@@ -765,7 +765,7 @@
// it as the 2nd argument of AArch64ISD::TC_RETURN.
bool IsWin64 =
- Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv());
+ Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject;
@@ -857,7 +857,7 @@
const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
bool IsWin64 =
- Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv());
+ Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
int FPOffset = MFI.getObjectOffset(FI) + FixedObject + 16;
int Offset = MFI.getObjectOffset(FI) + MFI.getStackSize();
@@ -928,7 +928,7 @@
static bool produceCompactUnwindFrame(MachineFunction &MF) {
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
- AttributeList Attrs = MF.getFunction()->getAttributes();
+ AttributeList Attrs = MF.getFunction().getAttributes();
return Subtarget.isTargetMachO() &&
!(Subtarget.getTargetLowering()->supportSwiftError() &&
Attrs.hasAttrSomewhere(Attribute::SwiftError));
@@ -959,7 +959,7 @@
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
MachineFrameInfo &MFI = MF.getFrameInfo();
- CallingConv::ID CC = MF.getFunction()->getCallingConv();
+ CallingConv::ID CC = MF.getFunction().getCallingConv();
unsigned Count = CSI.size();
(void)CC;
// MachO's compact unwind format relies on all registers being stored in
@@ -1154,7 +1154,7 @@
RegScavenger *RS) const {
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
- if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;
TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 06005f6..0b10246 100644
--- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -53,7 +53,7 @@
}
bool runOnMachineFunction(MachineFunction &MF) override {
- ForCodeSize = MF.getFunction()->optForSize();
+ ForCodeSize = MF.getFunction().optForSize();
Subtarget = &MF.getSubtarget<AArch64Subtarget>();
return SelectionDAGISel::runOnMachineFunction(MF);
}
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp
index aaf2811..1242cf5 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2731,7 +2731,7 @@
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
- bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv());
+ bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv());
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
@@ -2745,7 +2745,7 @@
// we use a special version of AnalyzeFormalArguments to pass in ValVT and
// LocVT.
unsigned NumArgs = Ins.size();
- Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin();
+ Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin();
unsigned CurArgIdx = 0;
for (unsigned i = 0; i != NumArgs; ++i) {
MVT ValVT = Ins[i].VT;
@@ -2935,7 +2935,7 @@
MachineFrameInfo &MFI = MF.getFrameInfo();
AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
auto PtrVT = getPointerTy(DAG.getDataLayout());
- bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv());
+ bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv());
SmallVector<SDValue, 8> MemOps;
@@ -3087,15 +3087,15 @@
return false;
MachineFunction &MF = DAG.getMachineFunction();
- const Function *CallerF = MF.getFunction();
- CallingConv::ID CallerCC = CallerF->getCallingConv();
+ const Function &CallerF = MF.getFunction();
+ CallingConv::ID CallerCC = CallerF.getCallingConv();
bool CCMatch = CallerCC == CalleeCC;
// Byval parameters hand the function a pointer directly into the stack area
// we want to reuse during a tail call. Working around this *is* possible (see
// X86) but less efficient and uglier in LowerCall.
- for (Function::const_arg_iterator i = CallerF->arg_begin(),
- e = CallerF->arg_end();
+ for (Function::const_arg_iterator i = CallerF.arg_begin(),
+ e = CallerF.arg_end();
i != e; ++i)
if (i->hasByValAttr())
return false;
@@ -4185,7 +4185,7 @@
}
SDValue AArch64TargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const {
- if (DAG.getMachineFunction().getFunction()->hasFnAttribute(
+ if (DAG.getMachineFunction().getFunction().hasFnAttribute(
Attribute::NoImplicitFloat))
return SDValue();
@@ -4668,7 +4668,7 @@
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
- if (Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv()))
+ if (Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()))
return LowerWin64_VASTART(Op, DAG);
else if (Subtarget->isTargetDarwin())
return LowerDarwin_VASTART(Op, DAG);
@@ -7909,9 +7909,9 @@
// instruction to materialize the v2i64 zero and one store (with restrictive
// addressing mode). Just do two i64 store of zero-registers.
bool Fast;
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
if (Subtarget->hasFPARMv8() && !IsMemset && Size >= 16 &&
- !F->hasFnAttribute(Attribute::NoImplicitFloat) &&
+ !F.hasFnAttribute(Attribute::NoImplicitFloat) &&
(memOpAlign(SrcAlign, DstAlign, 16) ||
(allowsMisalignedMemoryAccesses(MVT::f128, 0, 1, &Fast) && Fast)))
return MVT::f128;
@@ -8156,7 +8156,7 @@
AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
SelectionDAG &DAG,
std::vector<SDNode *> *Created) const {
- AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes();
+ AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
if (isIntDivCheap(N->getValueType(0), Attr))
return SDValue(N,0); // Lower SDIV as SDIV
@@ -9577,7 +9577,7 @@
return SDValue();
// Don't split at -Oz.
- if (DAG.getMachineFunction().getFunction()->optForMinSize())
+ if (DAG.getMachineFunction().getFunction().optForMinSize())
return SDValue();
// Don't split v2i64 vectors. Memcpy lowering produces those and splitting
@@ -10939,7 +10939,7 @@
// fine for CXX_FAST_TLS since the C++-style TLS access functions should be
// nounwind. If we want to generalize this later, we may need to emit
// CFI pseudo-instructions.
- assert(Entry->getParent()->getFunction()->hasFnAttribute(
+ assert(Entry->getParent()->getFunction().hasFnAttribute(
Attribute::NoUnwind) &&
"Function should be nounwind in insertCopiesSplitCSR!");
Entry->addLiveIn(*I);
diff --git a/lib/Target/AArch64/AArch64ISelLowering.h b/lib/Target/AArch64/AArch64ISelLowering.h
index f88c0ac..8d78b5b 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/lib/Target/AArch64/AArch64ISelLowering.h
@@ -415,7 +415,7 @@
// Do not merge to float value size (128 bytes) if no implicit
// float attribute is set.
- bool NoFloat = DAG.getMachineFunction().getFunction()->hasFnAttribute(
+ bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
Attribute::NoImplicitFloat);
if (NoFloat)
@@ -444,8 +444,8 @@
}
bool supportSplitCSR(MachineFunction *MF) const override {
- return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
- MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
+ return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
}
void initializeSplitCSR(MachineBasicBlock *Entry) const override;
void insertCopiesSplitCSR(
diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp
index 74aee12..e26f15b 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -4753,21 +4753,21 @@
bool AArch64InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF,
bool OutlineFromLinkOnceODRs) const {
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
// If F uses a redzone, then don't outline from it because it might mess up
// the stack.
- if (!F->hasFnAttribute(Attribute::NoRedZone))
+ if (!F.hasFnAttribute(Attribute::NoRedZone))
return false;
// If anyone is using the address of this function, don't outline from it.
- if (F->hasAddressTaken())
+ if (F.hasAddressTaken())
return false;
// Can F be deduplicated by the linker? If it can, don't outline from it.
- if (!OutlineFromLinkOnceODRs && F->hasLinkOnceODRLinkage())
+ if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
return false;
-
+
return true;
}
diff --git a/lib/Target/AArch64/AArch64InstrInfo.td b/lib/Target/AArch64/AArch64InstrInfo.td
index 841265c..79826ca 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/lib/Target/AArch64/AArch64InstrInfo.td
@@ -328,10 +328,10 @@
// the Function object through the <Target>Subtarget and objections were raised
// to that (see post-commit review comments for r301750).
let RecomputePerFunction = 1 in {
- def ForCodeSize : Predicate<"MF->getFunction()->optForSize()">;
- def NotForCodeSize : Predicate<"!MF->getFunction()->optForSize()">;
+ def ForCodeSize : Predicate<"MF->getFunction().optForSize()">;
+ def NotForCodeSize : Predicate<"!MF->getFunction().optForSize()">;
// Avoid generating STRQro if it is slow, unless we're optimizing for code size.
- def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction()->optForSize()">;
+ def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction().optForSize()">;
}
include "AArch64InstrFormats.td"
diff --git a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index c406228..8a29456 100644
--- a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -1759,7 +1759,7 @@
}
bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
diff --git a/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp b/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
index 9848083..e5822b1 100644
--- a/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
+++ b/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
@@ -485,7 +485,7 @@
bool AArch64RedundantCopyElimination::runOnMachineFunction(
MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TRI = MF.getSubtarget().getRegisterInfo();
MRI = &MF.getRegInfo();
diff --git a/lib/Target/AArch64/AArch64RegisterInfo.cpp b/lib/Target/AArch64/AArch64RegisterInfo.cpp
index 1059bc3..88dd297 100644
--- a/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -42,22 +42,22 @@
const MCPhysReg *
AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
assert(MF && "Invalid MachineFunction pointer.");
- if (MF->getFunction()->getCallingConv() == CallingConv::GHC)
+ if (MF->getFunction().getCallingConv() == CallingConv::GHC)
// GHC set of callee saved regs is empty as all those regs are
// used for passing STG regs around
return CSR_AArch64_NoRegs_SaveList;
- if (MF->getFunction()->getCallingConv() == CallingConv::AnyReg)
+ if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
return CSR_AArch64_AllRegs_SaveList;
- if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS)
+ if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS)
return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR() ?
CSR_AArch64_CXX_TLS_Darwin_PE_SaveList :
CSR_AArch64_CXX_TLS_Darwin_SaveList;
if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering()
->supportSwiftError() &&
- MF->getFunction()->getAttributes().hasAttrSomewhere(
+ MF->getFunction().getAttributes().hasAttrSomewhere(
Attribute::SwiftError))
return CSR_AArch64_AAPCS_SwiftError_SaveList;
- if (MF->getFunction()->getCallingConv() == CallingConv::PreserveMost)
+ if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
return CSR_AArch64_RT_MostRegs_SaveList;
else
return CSR_AArch64_AAPCS_SaveList;
@@ -66,7 +66,7 @@
const MCPhysReg *AArch64RegisterInfo::getCalleeSavedRegsViaCopy(
const MachineFunction *MF) const {
assert(MF && "Invalid MachineFunction pointer.");
- if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
MF->getInfo<AArch64FunctionInfo>()->isSplitCSR())
return CSR_AArch64_CXX_TLS_Darwin_ViaCopy_SaveList;
return nullptr;
@@ -84,7 +84,7 @@
return CSR_AArch64_CXX_TLS_Darwin_RegMask;
if (MF.getSubtarget<AArch64Subtarget>().getTargetLowering()
->supportSwiftError() &&
- MF.getFunction()->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
+ MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
return CSR_AArch64_AAPCS_SwiftError_RegMask;
if (CC == CallingConv::PreserveMost)
return CSR_AArch64_RT_MostRegs_RegMask;
diff --git a/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp b/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp
index 7d43905..e185187 100644
--- a/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp
+++ b/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp
@@ -690,7 +690,7 @@
}
bool AArch64SIMDInstrOpt::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TII = MF.getSubtarget().getInstrInfo();
diff --git a/lib/Target/AArch64/AArch64StorePairSuppress.cpp b/lib/Target/AArch64/AArch64StorePairSuppress.cpp
index 78fc322..571e61d 100644
--- a/lib/Target/AArch64/AArch64StorePairSuppress.cpp
+++ b/lib/Target/AArch64/AArch64StorePairSuppress.cpp
@@ -120,7 +120,7 @@
}
bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
const TargetSubtargetInfo &ST = MF.getSubtarget();
diff --git a/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index c53235d..bb628b8 100644
--- a/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -205,7 +205,7 @@
if (TM.getTargetTriple().getOS() != Triple::AMDHSA)
return;
- HSAMetadataStream.emitKernel(*MF->getFunction(),
+ HSAMetadataStream.emitKernel(MF->getFunction(),
getHSACodeProps(*MF, CurrentProgramInfo),
getHSADebugProps(*MF, CurrentProgramInfo));
}
@@ -215,14 +215,14 @@
const AMDGPUSubtarget &STM = MF->getSubtarget<AMDGPUSubtarget>();
if (MFI->isEntryFunction() && STM.isAmdCodeObjectV2(*MF)) {
SmallString<128> SymbolName;
- getNameWithPrefix(SymbolName, MF->getFunction()),
+ getNameWithPrefix(SymbolName, &MF->getFunction()),
getTargetStreamer()->EmitAMDGPUSymbolType(
SymbolName, ELF::STT_AMDGPU_HSA_KERNEL);
}
const AMDGPUSubtarget &STI = MF->getSubtarget<AMDGPUSubtarget>();
if (STI.dumpCode()) {
// Disassemble function name label to text.
- DisasmLines.push_back(MF->getFunction()->getName().str() + ":");
+ DisasmLines.push_back(MF->getName().str() + ":");
DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLines.back().size());
HexLines.push_back("");
}
@@ -314,7 +314,7 @@
getSIProgramInfo(CurrentProgramInfo, MF);
} else {
auto I = CallGraphResourceInfo.insert(
- std::make_pair(MF.getFunction(), SIFunctionResourceInfo()));
+ std::make_pair(&MF.getFunction(), SIFunctionResourceInfo()));
SIFunctionResourceInfo &Info = I.first->second;
assert(I.second && "should only be called once per function");
Info = analyzeResourceUsage(MF);
@@ -343,7 +343,7 @@
if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
if (!MFI->isEntryFunction()) {
OutStreamer->emitRawComment(" Function info:", false);
- SIFunctionResourceInfo &Info = CallGraphResourceInfo[MF.getFunction()];
+ SIFunctionResourceInfo &Info = CallGraphResourceInfo[&MF.getFunction()];
emitCommonFunctionComments(
Info.NumVGPR,
Info.getTotalNumSGPRs(MF.getSubtarget<SISubtarget>()),
@@ -469,7 +469,7 @@
unsigned RsrcReg;
if (STM.getGeneration() >= R600Subtarget::EVERGREEN) {
// Evergreen / Northern Islands
- switch (MF.getFunction()->getCallingConv()) {
+ switch (MF.getFunction().getCallingConv()) {
default: LLVM_FALLTHROUGH;
case CallingConv::AMDGPU_CS: RsrcReg = R_0288D4_SQ_PGM_RESOURCES_LS; break;
case CallingConv::AMDGPU_GS: RsrcReg = R_028878_SQ_PGM_RESOURCES_GS; break;
@@ -478,7 +478,7 @@
}
} else {
// R600 / R700
- switch (MF.getFunction()->getCallingConv()) {
+ switch (MF.getFunction().getCallingConv()) {
default: LLVM_FALLTHROUGH;
case CallingConv::AMDGPU_GS: LLVM_FALLTHROUGH;
case CallingConv::AMDGPU_CS: LLVM_FALLTHROUGH;
@@ -493,7 +493,7 @@
OutStreamer->EmitIntValue(R_02880C_DB_SHADER_CONTROL, 4);
OutStreamer->EmitIntValue(S_02880C_KILL_ENABLE(killPixel), 4);
- if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
+ if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
OutStreamer->EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4);
OutStreamer->EmitIntValue(alignTo(MFI->getLDSSize(), 4) >> 2, 4);
}
@@ -787,9 +787,9 @@
ProgInfo.DynamicCallStack = Info.HasDynamicallySizedStack || Info.HasRecursion;
if (!isUInt<32>(ProgInfo.ScratchSize)) {
- DiagnosticInfoStackSize DiagStackSize(*MF.getFunction(),
+ DiagnosticInfoStackSize DiagStackSize(MF.getFunction(),
ProgInfo.ScratchSize, DS_Error);
- MF.getFunction()->getContext().diagnose(DiagStackSize);
+ MF.getFunction().getContext().diagnose(DiagStackSize);
}
const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
@@ -808,8 +808,8 @@
unsigned MaxAddressableNumSGPRs = STM.getAddressableNumSGPRs();
if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) {
// This can happen due to a compiler bug or when using inline asm.
- LLVMContext &Ctx = MF.getFunction()->getContext();
- DiagnosticInfoResourceLimit Diag(*MF.getFunction(),
+ LLVMContext &Ctx = MF.getFunction().getContext();
+ DiagnosticInfoResourceLimit Diag(MF.getFunction(),
"addressable scalar registers",
ProgInfo.NumSGPR, DS_Error,
DK_ResourceLimit,
@@ -836,8 +836,8 @@
if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) {
// This can happen due to a compiler bug or when using inline asm to use
// the registers which are usually reserved for vcc etc.
- LLVMContext &Ctx = MF.getFunction()->getContext();
- DiagnosticInfoResourceLimit Diag(*MF.getFunction(),
+ LLVMContext &Ctx = MF.getFunction().getContext();
+ DiagnosticInfoResourceLimit Diag(MF.getFunction(),
"scalar registers",
ProgInfo.NumSGPR, DS_Error,
DK_ResourceLimit,
@@ -856,15 +856,15 @@
}
if (MFI->getNumUserSGPRs() > STM.getMaxNumUserSGPRs()) {
- LLVMContext &Ctx = MF.getFunction()->getContext();
- DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "user SGPRs",
+ LLVMContext &Ctx = MF.getFunction().getContext();
+ DiagnosticInfoResourceLimit Diag(MF.getFunction(), "user SGPRs",
MFI->getNumUserSGPRs(), DS_Error);
Ctx.diagnose(Diag);
}
if (MFI->getLDSSize() > static_cast<unsigned>(STM.getLocalMemorySize())) {
- LLVMContext &Ctx = MF.getFunction()->getContext();
- DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "local memory",
+ LLVMContext &Ctx = MF.getFunction().getContext();
+ DiagnosticInfoResourceLimit Diag(MF.getFunction(), "local memory",
MFI->getLDSSize(), DS_Error);
Ctx.diagnose(Diag);
}
@@ -977,9 +977,9 @@
const SIProgramInfo &CurrentProgramInfo) {
const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
- unsigned RsrcReg = getRsrcReg(MF.getFunction()->getCallingConv());
+ unsigned RsrcReg = getRsrcReg(MF.getFunction().getCallingConv());
- if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
+ if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
OutStreamer->EmitIntValue(R_00B848_COMPUTE_PGM_RSRC1, 4);
OutStreamer->EmitIntValue(CurrentProgramInfo.ComputePGMRSrc1, 4);
@@ -997,13 +997,13 @@
OutStreamer->EmitIntValue(S_00B028_VGPRS(CurrentProgramInfo.VGPRBlocks) |
S_00B028_SGPRS(CurrentProgramInfo.SGPRBlocks), 4);
unsigned Rsrc2Val = 0;
- if (STM.isVGPRSpillingEnabled(*MF.getFunction())) {
+ if (STM.isVGPRSpillingEnabled(MF.getFunction())) {
OutStreamer->EmitIntValue(R_0286E8_SPI_TMPRING_SIZE, 4);
OutStreamer->EmitIntValue(S_0286E8_WAVESIZE(CurrentProgramInfo.ScratchBlocks), 4);
if (TM.getTargetTriple().getOS() == Triple::AMDPAL)
Rsrc2Val = S_00B84C_SCRATCH_EN(CurrentProgramInfo.ScratchBlocks > 0);
}
- if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_PS) {
+ if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS) {
OutStreamer->EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4);
OutStreamer->EmitIntValue(MFI->getPSInputEnable(), 4);
OutStreamer->EmitIntValue(R_0286D0_SPI_PS_INPUT_ADDR, 4);
@@ -1036,13 +1036,13 @@
// we can use the same fixed value that .AMDGPU.config has for Mesa. Note
// that we use a register number rather than a byte offset, so we need to
// divide by 4.
- unsigned Rsrc1Reg = getRsrcReg(MF.getFunction()->getCallingConv()) / 4;
+ unsigned Rsrc1Reg = getRsrcReg(MF.getFunction().getCallingConv()) / 4;
unsigned Rsrc2Reg = Rsrc1Reg + 1;
// Also calculate the PAL metadata key for *S_SCRATCH_SIZE. It can be used
// with a constant offset to access any non-register shader-specific PAL
// metadata key.
unsigned ScratchSizeKey = PALMD::Key::CS_SCRATCH_SIZE;
- switch (MF.getFunction()->getCallingConv()) {
+ switch (MF.getFunction().getCallingConv()) {
case CallingConv::AMDGPU_PS:
ScratchSizeKey = PALMD::Key::PS_SCRATCH_SIZE;
break;
@@ -1068,7 +1068,7 @@
PALMD::Key::VS_NUM_USED_SGPRS - PALMD::Key::VS_SCRATCH_SIZE;
PALMetadataMap[NumUsedVgprsKey] = CurrentProgramInfo.NumVGPRsForWavesPerEU;
PALMetadataMap[NumUsedSgprsKey] = CurrentProgramInfo.NumSGPRsForWavesPerEU;
- if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
+ if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
PALMetadataMap[Rsrc1Reg] |= CurrentProgramInfo.ComputePGMRSrc1;
PALMetadataMap[Rsrc2Reg] |= CurrentProgramInfo.ComputePGMRSrc2;
// ScratchSize is in bytes, 16 aligned.
@@ -1083,7 +1083,7 @@
PALMetadataMap[ScratchSizeKey] |=
alignTo(CurrentProgramInfo.ScratchSize, 16);
}
- if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_PS) {
+ if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS) {
PALMetadataMap[Rsrc2Reg] |=
S_00B02C_EXTRA_LDS_SIZE(CurrentProgramInfo.LDSBlocks);
PALMetadataMap[R_0286CC_SPI_PS_INPUT_ENA / 4] |= MFI->getPSInputEnable();
diff --git a/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index 6d6fccb..5a91387 100644
--- a/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -43,7 +43,7 @@
MachineFunction &MF = MIRBuilder.getMF();
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
MachineRegisterInfo &MRI = MF.getRegInfo();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
const DataLayout &DL = F.getParent()->getDataLayout();
PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUASI.CONSTANT_ADDRESS);
LLT PtrType = getLLTForType(*PtrTy, DL);
@@ -64,7 +64,7 @@
Type *ParamTy, unsigned Offset,
unsigned DstReg) const {
MachineFunction &MF = MIRBuilder.getMF();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
const DataLayout &DL = F.getParent()->getDataLayout();
PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUASI.CONSTANT_ADDRESS);
MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
diff --git a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index dd97c5c..4992944 100644
--- a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -1069,7 +1069,7 @@
SDValue Callee = CLI.Callee;
SelectionDAG &DAG = CLI.DAG;
- const Function &Fn = *DAG.getMachineFunction().getFunction();
+ const Function &Fn = DAG.getMachineFunction().getFunction();
StringRef FuncName("<unknown>");
@@ -1097,7 +1097,7 @@
SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const {
- const Function &Fn = *DAG.getMachineFunction().getFunction();
+ const Function &Fn = DAG.getMachineFunction().getFunction();
DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
SDLoc(Op).getDebugLoc());
@@ -1190,7 +1190,7 @@
}
}
- const Function &Fn = *DAG.getMachineFunction().getFunction();
+ const Function &Fn = DAG.getMachineFunction().getFunction();
DiagnosticInfoUnsupported BadInit(
Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc());
DAG.getContext()->diagnose(BadInit);
diff --git a/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp b/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp
index c15b37f..23fd811 100644
--- a/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp
+++ b/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp
@@ -153,7 +153,7 @@
int MCOpcode = TII->pseudoToMCOpcode(Opcode);
if (MCOpcode == -1) {
- LLVMContext &C = MI->getParent()->getParent()->getFunction()->getContext();
+ LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext();
C.emitError("AMDGPUMCInstLower::lower - Pseudo instruction doesn't have "
"a target-specific version: " + Twine(MI->getOpcode()));
}
@@ -205,7 +205,7 @@
StringRef Err;
if (!STI.getInstrInfo()->verifyInstruction(*MI, Err)) {
- LLVMContext &C = MI->getParent()->getParent()->getFunction()->getContext();
+ LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext();
C.emitError("Illegal instruction detected: " + Err);
MI->print(errs());
}
diff --git a/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp b/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
index 9fb7f5f..b7c8c12 100644
--- a/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
+++ b/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
@@ -19,7 +19,7 @@
MaxKernArgAlign(0),
LDSSize(0),
ABIArgOffset(0),
- IsEntryFunction(AMDGPU::isEntryFunctionCC(MF.getFunction()->getCallingConv())),
+ IsEntryFunction(AMDGPU::isEntryFunctionCC(MF.getFunction().getCallingConv())),
NoSignedZerosFPMath(MF.getTarget().Options.NoSignedZerosFPMath) {
// FIXME: Should initialize KernArgSize based on ExplicitKernelArgOffset,
// except reserved size is not correctly aligned.
diff --git a/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp b/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp
index 8454ded..5e4d33a 100644
--- a/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp
@@ -43,7 +43,7 @@
// Forced to be here by one .inc
const MCPhysReg *SIRegisterInfo::getCalleeSavedRegs(
const MachineFunction *MF) const {
- CallingConv::ID CC = MF->getFunction()->getCallingConv();
+ CallingConv::ID CC = MF->getFunction().getCallingConv();
switch (CC) {
case CallingConv::C:
case CallingConv::Fast:
diff --git a/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
index ca04097..80feaa4 100644
--- a/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
+++ b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
@@ -468,7 +468,7 @@
}
unsigned SISubtarget::getMaxNumSGPRs(const MachineFunction &MF) const {
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
// Compute maximum number of SGPRs function can use using default/requested
@@ -518,7 +518,7 @@
}
unsigned SISubtarget::getMaxNumVGPRs(const MachineFunction &MF) const {
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
// Compute maximum number of VGPRs function can use using default/requested
diff --git a/lib/Target/AMDGPU/AMDGPUSubtarget.h b/lib/Target/AMDGPU/AMDGPUSubtarget.h
index 09ad88f..cf4a691 100644
--- a/lib/Target/AMDGPU/AMDGPUSubtarget.h
+++ b/lib/Target/AMDGPU/AMDGPUSubtarget.h
@@ -382,7 +382,7 @@
unsigned getOccupancyWithLocalMemSize(const MachineFunction &MF) const {
const auto *MFI = MF.getInfo<SIMachineFunctionInfo>();
- return getOccupancyWithLocalMemSize(MFI->getLDSSize(), *MF.getFunction());
+ return getOccupancyWithLocalMemSize(MFI->getLDSSize(), MF.getFunction());
}
bool hasFP16Denormals() const {
@@ -410,7 +410,7 @@
}
bool enableIEEEBit(const MachineFunction &MF) const {
- return AMDGPU::isCompute(MF.getFunction()->getCallingConv());
+ return AMDGPU::isCompute(MF.getFunction().getCallingConv());
}
bool useFlatForGlobal() const {
@@ -482,12 +482,12 @@
}
bool isMesaKernel(const MachineFunction &MF) const {
- return isMesa3DOS() && !AMDGPU::isShader(MF.getFunction()->getCallingConv());
+ return isMesa3DOS() && !AMDGPU::isShader(MF.getFunction().getCallingConv());
}
// Covers VS/PS/CS graphics shaders
bool isMesaGfxShader(const MachineFunction &MF) const {
- return isMesa3DOS() && AMDGPU::isShader(MF.getFunction()->getCallingConv());
+ return isMesa3DOS() && AMDGPU::isShader(MF.getFunction().getCallingConv());
}
bool isAmdCodeObjectV2(const MachineFunction &MF) const {
diff --git a/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
index 223fdf7..0a0e431 100644
--- a/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
+++ b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
@@ -1641,7 +1641,7 @@
FuncRep->push_back(DummyExitBlk); //insert to function
SHOWNEWBLK(DummyExitBlk, "DummyExitBlock to normalize infiniteLoop: ");
DEBUG(dbgs() << "Old branch instr: " << *BranchMI << "\n";);
- LLVMContext &Ctx = LoopHeader->getParent()->getFunction()->getContext();
+ LLVMContext &Ctx = LoopHeader->getParent()->getFunction().getContext();
Ctx.emitError("Extra register needed to handle CFG");
return nullptr;
}
diff --git a/lib/Target/AMDGPU/GCNIterativeScheduler.cpp b/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
index 178d993..a0e4f7f 100644
--- a/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
+++ b/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
@@ -566,7 +566,7 @@
bool TryMaximizeOccupancy) {
const auto &ST = MF.getSubtarget<SISubtarget>();
auto TgtOcc = std::min(ST.getOccupancyWithLocalMemSize(MF),
- ST.getWavesPerEU(*MF.getFunction()).second);
+ ST.getWavesPerEU(MF.getFunction()).second);
sortRegionsByPressure(TgtOcc);
auto Occ = Regions.front()->MaxPressure.getOccupancy(ST);
diff --git a/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 0e80e93..d414b89 100644
--- a/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -37,7 +37,7 @@
ST.getOccupancyWithNumVGPRs(VGPRs));
return std::min(MinRegOccupancy,
ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
- *MF.getFunction()));
+ MF.getFunction()));
}
void GCNMaxOccupancySchedStrategy::initialize(ScheduleDAGMI *DAG) {
@@ -315,7 +315,7 @@
ST(MF.getSubtarget<SISubtarget>()),
MFI(*MF.getInfo<SIMachineFunctionInfo>()),
StartingOccupancy(ST.getOccupancyWithLocalMemSize(MFI.getLDSSize(),
- *MF.getFunction())),
+ MF.getFunction())),
MinOccupancy(StartingOccupancy), Stage(0), RegionIdx(0) {
DEBUG(dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n");
diff --git a/lib/Target/AMDGPU/R600ClauseMergePass.cpp b/lib/Target/AMDGPU/R600ClauseMergePass.cpp
index 8db66e6..5e1ba6b 100644
--- a/lib/Target/AMDGPU/R600ClauseMergePass.cpp
+++ b/lib/Target/AMDGPU/R600ClauseMergePass.cpp
@@ -180,7 +180,7 @@
}
bool R600ClauseMergePass::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
diff --git a/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp b/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
index be6a45d..0e788df 100644
--- a/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
+++ b/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
@@ -512,14 +512,14 @@
R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
- CFStack CFStack(ST, MF.getFunction()->getCallingConv());
+ CFStack CFStack(ST, MF.getFunction().getCallingConv());
for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
++MB) {
MachineBasicBlock &MBB = *MB;
unsigned CfCount = 0;
std::vector<std::pair<unsigned, std::set<MachineInstr *>>> LoopStack;
std::vector<MachineInstr * > IfThenElseStack;
- if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_VS) {
+ if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_VS) {
BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
getHWInstrDesc(CF_CALL_FS));
CfCount++;
diff --git a/lib/Target/AMDGPU/R600InstrInfo.cpp b/lib/Target/AMDGPU/R600InstrInfo.cpp
index 21945c4..23e646c 100644
--- a/lib/Target/AMDGPU/R600InstrInfo.cpp
+++ b/lib/Target/AMDGPU/R600InstrInfo.cpp
@@ -197,7 +197,7 @@
bool R600InstrInfo::usesVertexCache(const MachineInstr &MI) const {
const MachineFunction *MF = MI.getParent()->getParent();
- return !AMDGPU::isCompute(MF->getFunction()->getCallingConv()) &&
+ return !AMDGPU::isCompute(MF->getFunction().getCallingConv()) &&
usesVertexCache(MI.getOpcode());
}
@@ -207,7 +207,7 @@
bool R600InstrInfo::usesTextureCache(const MachineInstr &MI) const {
const MachineFunction *MF = MI.getParent()->getParent();
- return (AMDGPU::isCompute(MF->getFunction()->getCallingConv()) &&
+ return (AMDGPU::isCompute(MF->getFunction().getCallingConv()) &&
usesVertexCache(MI.getOpcode())) ||
usesTextureCache(MI.getOpcode());
}
diff --git a/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp b/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
index 95bc7ca..4a14d95 100644
--- a/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
+++ b/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
@@ -336,7 +336,7 @@
}
bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) {
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
const R600Subtarget &ST = Fn.getSubtarget<R600Subtarget>();
diff --git a/lib/Target/AMDGPU/SIFoldOperands.cpp b/lib/Target/AMDGPU/SIFoldOperands.cpp
index 0766eba..7831819 100644
--- a/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -926,7 +926,7 @@
}
bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
MRI = &MF.getRegInfo();
diff --git a/lib/Target/AMDGPU/SIFrameLowering.cpp b/lib/Target/AMDGPU/SIFrameLowering.cpp
index 08a7419..89bb98d 100644
--- a/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -394,7 +394,7 @@
// We now have the GIT ptr - now get the scratch descriptor from the entry
// at offset 0.
PointerType *PtrTy =
- PointerType::get(Type::getInt64Ty(MF.getFunction()->getContext()),
+ PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()),
AMDGPUAS::CONSTANT_ADDRESS);
MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM);
@@ -425,7 +425,7 @@
if (MFI->hasImplicitBufferPtr()) {
unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
- if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
+ if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64);
BuildMI(MBB, I, DL, Mov64, Rsrc01)
@@ -435,7 +435,7 @@
const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM);
PointerType *PtrTy =
- PointerType::get(Type::getInt64Ty(MF.getFunction()->getContext()),
+ PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()),
AMDGPUAS::CONSTANT_ADDRESS);
MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
auto MMO = MF.getMachineMemOperand(PtrInfo,
diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp
index d3e2e11..50ee88f 100644
--- a/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1460,14 +1460,14 @@
const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
MachineFunction &MF = DAG.getMachineFunction();
- FunctionType *FType = MF.getFunction()->getFunctionType();
+ FunctionType *FType = MF.getFunction().getFunctionType();
SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
- const Function *Fn = MF.getFunction();
+ const Function &Fn = MF.getFunction();
DiagnosticInfoUnsupported NoGraphicsHSA(
- *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
+ Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
DAG.getContext()->diagnose(NoGraphicsHSA);
return DAG.getEntryNode();
}
@@ -1696,7 +1696,7 @@
auto &ArgUsageInfo =
DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
- ArgUsageInfo.setFuncArgInfo(*MF.getFunction(), Info->getArgInfo());
+ ArgUsageInfo.setFuncArgInfo(MF.getFunction(), Info->getArgInfo());
unsigned StackArgSize = CCInfo.getNextStackOffset();
Info->setBytesInStackArgArea(StackArgSize);
@@ -2032,8 +2032,8 @@
return false;
MachineFunction &MF = DAG.getMachineFunction();
- const Function *CallerF = MF.getFunction();
- CallingConv::ID CallerCC = CallerF->getCallingConv();
+ const Function &CallerF = MF.getFunction();
+ CallingConv::ID CallerCC = CallerF.getCallingConv();
const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
@@ -2054,7 +2054,7 @@
if (IsVarArg)
return false;
- for (const Argument &Arg : CallerF->args()) {
+ for (const Argument &Arg : CallerF.args()) {
if (Arg.hasByValAttr())
return false;
}
@@ -3594,11 +3594,11 @@
case SISubtarget::TrapIDLLVMTrap:
return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
case SISubtarget::TrapIDLLVMDebugTrap: {
- DiagnosticInfoUnsupported NoTrap(*MF.getFunction(),
+ DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
"debugtrap handler not supported",
Op.getDebugLoc(),
DS_Warning);
- LLVMContext &Ctx = MF.getFunction()->getContext();
+ LLVMContext &Ctx = MF.getFunction().getContext();
Ctx.diagnose(NoTrap);
return Chain;
}
@@ -3711,7 +3711,7 @@
const MachineFunction &MF = DAG.getMachineFunction();
DiagnosticInfoUnsupported InvalidAddrSpaceCast(
- *MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
+ MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
DAG.getContext()->diagnose(InvalidAddrSpaceCast);
return DAG.getUNDEF(ASC->getValueType(0));
@@ -3913,7 +3913,7 @@
static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
EVT VT) {
- DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
+ DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
"non-hsa intrinsic with hsa target",
DL.getDebugLoc());
DAG.getContext()->diagnose(BadIntrin);
@@ -3922,7 +3922,7 @@
static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
EVT VT) {
- DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
+ DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
"intrinsic not supported on subtarget",
DL.getDebugLoc());
DAG.getContext()->diagnose(BadIntrin);
@@ -3951,7 +3951,7 @@
case Intrinsic::amdgcn_queue_ptr: {
if (!Subtarget->isAmdCodeObjectV2(MF)) {
DiagnosticInfoUnsupported BadIntrin(
- *MF.getFunction(), "unsupported hsa intrinsic without hsa target",
+ MF.getFunction(), "unsupported hsa intrinsic without hsa target",
DL.getDebugLoc());
DAG.getContext()->diagnose(BadIntrin);
return DAG.getUNDEF(VT);
@@ -4129,7 +4129,7 @@
return SDValue();
DiagnosticInfoUnsupported BadIntrin(
- *MF.getFunction(), "intrinsic not supported on subtarget",
+ MF.getFunction(), "intrinsic not supported on subtarget",
DL.getDebugLoc());
DAG.getContext()->diagnose(BadIntrin);
return DAG.getUNDEF(VT);
@@ -4559,7 +4559,7 @@
case Intrinsic::amdgcn_s_barrier: {
if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
- unsigned WGSize = ST.getFlatWorkGroupSizes(*MF.getFunction()).second;
+ unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
if (WGSize <= ST.getWavefrontSize())
return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
Op.getOperand(0)), 0);
diff --git a/lib/Target/AMDGPU/SIInsertSkips.cpp b/lib/Target/AMDGPU/SIInsertSkips.cpp
index 1b8c9f2..a2f844d 100644
--- a/lib/Target/AMDGPU/SIInsertSkips.cpp
+++ b/lib/Target/AMDGPU/SIInsertSkips.cpp
@@ -166,7 +166,7 @@
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction *MF = MBB.getParent();
- if (MF->getFunction()->getCallingConv() != CallingConv::AMDGPU_PS ||
+ if (MF->getFunction().getCallingConv() != CallingConv::AMDGPU_PS ||
!shouldSkip(MBB, MBB.getParent()->back()))
return false;
diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp
index 6ec5667..6196760 100644
--- a/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -375,7 +375,7 @@
if (!Base1 || !Base2)
return false;
const MachineFunction &MF = *MI1.getParent()->getParent();
- const DataLayout &DL = MF.getFunction()->getParent()->getDataLayout();
+ const DataLayout &DL = MF.getFunction().getParent()->getDataLayout();
Base1 = GetUnderlyingObject(Base1, DL);
Base2 = GetUnderlyingObject(Base1, DL);
@@ -442,10 +442,10 @@
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) {
MachineFunction *MF = MBB.getParent();
- DiagnosticInfoUnsupported IllegalCopy(*MF->getFunction(),
+ DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(),
"illegal SGPR to VGPR copy",
DL, DS_Error);
- LLVMContext &C = MF->getFunction()->getContext();
+ LLVMContext &C = MF->getFunction().getContext();
C.diagnose(IllegalCopy);
BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg)
@@ -873,8 +873,8 @@
return;
}
- if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
- LLVMContext &Ctx = MF->getFunction()->getContext();
+ if (!ST.isVGPRSpillingEnabled(MF->getFunction())) {
+ LLVMContext &Ctx = MF->getFunction().getContext();
Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to"
" spill register");
BuildMI(MBB, MI, DL, get(AMDGPU::KILL))
@@ -975,8 +975,8 @@
return;
}
- if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
- LLVMContext &Ctx = MF->getFunction()->getContext();
+ if (!ST.isVGPRSpillingEnabled(MF->getFunction())) {
+ LLVMContext &Ctx = MF->getFunction().getContext();
Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to"
" restore register");
BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg);
@@ -1017,7 +1017,7 @@
if (TIDReg == AMDGPU::NoRegister)
return TIDReg;
- if (!AMDGPU::isShader(MF->getFunction()->getCallingConv()) &&
+ if (!AMDGPU::isShader(MF->getFunction().getCallingConv()) &&
WorkGroupSize > WavefrontSize) {
unsigned TIDIGXReg
= MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
@@ -3444,7 +3444,7 @@
// scratch memory access. In both cases, the legalization never involves
// conversion to the addr64 form.
if (isMIMG(MI) ||
- (AMDGPU::isShader(MF.getFunction()->getCallingConv()) &&
+ (AMDGPU::isShader(MF.getFunction().getCallingConv()) &&
(isMUBUF(MI) || isMTBUF(MI)))) {
MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc);
if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) {
diff --git a/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index d9fdb81..84cd47a 100644
--- a/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -913,7 +913,7 @@
}
bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
STM = &MF.getSubtarget<SISubtarget>();
diff --git a/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index 0a92cd1..6013ebc 100644
--- a/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
+++ b/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -51,9 +51,9 @@
ImplicitArgPtr(false),
GITPtrHigh(0xffffffff) {
const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
- const Function *F = MF.getFunction();
- FlatWorkGroupSizes = ST.getFlatWorkGroupSizes(*F);
- WavesPerEU = ST.getWavesPerEU(*F);
+ const Function &F = MF.getFunction();
+ FlatWorkGroupSizes = ST.getFlatWorkGroupSizes(F);
+ WavesPerEU = ST.getWavesPerEU(F);
if (!isEntryFunction()) {
// Non-entry functions have no special inputs for now, other registers
@@ -68,21 +68,21 @@
ArgInfo.PrivateSegmentWaveByteOffset =
ArgDescriptor::createRegister(ScratchWaveOffsetReg);
- if (F->hasFnAttribute("amdgpu-implicitarg-ptr"))
+ if (F.hasFnAttribute("amdgpu-implicitarg-ptr"))
ImplicitArgPtr = true;
} else {
- if (F->hasFnAttribute("amdgpu-implicitarg-ptr"))
+ if (F.hasFnAttribute("amdgpu-implicitarg-ptr"))
KernargSegmentPtr = true;
}
- CallingConv::ID CC = F->getCallingConv();
+ CallingConv::ID CC = F.getCallingConv();
if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL) {
- if (!F->arg_empty())
+ if (!F.arg_empty())
KernargSegmentPtr = true;
WorkGroupIDX = true;
WorkItemIDX = true;
} else if (CC == CallingConv::AMDGPU_PS) {
- PSInputAddr = AMDGPU::getInitialPSInputAddr(*F);
+ PSInputAddr = AMDGPU::getInitialPSInputAddr(F);
}
if (ST.debuggerEmitPrologue()) {
@@ -94,27 +94,27 @@
WorkItemIDY = true;
WorkItemIDZ = true;
} else {
- if (F->hasFnAttribute("amdgpu-work-group-id-x"))
+ if (F.hasFnAttribute("amdgpu-work-group-id-x"))
WorkGroupIDX = true;
- if (F->hasFnAttribute("amdgpu-work-group-id-y"))
+ if (F.hasFnAttribute("amdgpu-work-group-id-y"))
WorkGroupIDY = true;
- if (F->hasFnAttribute("amdgpu-work-group-id-z"))
+ if (F.hasFnAttribute("amdgpu-work-group-id-z"))
WorkGroupIDZ = true;
- if (F->hasFnAttribute("amdgpu-work-item-id-x"))
+ if (F.hasFnAttribute("amdgpu-work-item-id-x"))
WorkItemIDX = true;
- if (F->hasFnAttribute("amdgpu-work-item-id-y"))
+ if (F.hasFnAttribute("amdgpu-work-item-id-y"))
WorkItemIDY = true;
- if (F->hasFnAttribute("amdgpu-work-item-id-z"))
+ if (F.hasFnAttribute("amdgpu-work-item-id-z"))
WorkItemIDZ = true;
}
const MachineFrameInfo &FrameInfo = MF.getFrameInfo();
- bool MaySpill = ST.isVGPRSpillingEnabled(*F);
+ bool MaySpill = ST.isVGPRSpillingEnabled(F);
bool HasStackObjects = FrameInfo.hasStackObjects();
if (isEntryFunction()) {
@@ -139,30 +139,30 @@
if (HasStackObjects || MaySpill)
PrivateSegmentBuffer = true;
- if (F->hasFnAttribute("amdgpu-dispatch-ptr"))
+ if (F.hasFnAttribute("amdgpu-dispatch-ptr"))
DispatchPtr = true;
- if (F->hasFnAttribute("amdgpu-queue-ptr"))
+ if (F.hasFnAttribute("amdgpu-queue-ptr"))
QueuePtr = true;
- if (F->hasFnAttribute("amdgpu-dispatch-id"))
+ if (F.hasFnAttribute("amdgpu-dispatch-id"))
DispatchID = true;
} else if (ST.isMesaGfxShader(MF)) {
if (HasStackObjects || MaySpill)
ImplicitBufferPtr = true;
}
- if (F->hasFnAttribute("amdgpu-kernarg-segment-ptr"))
+ if (F.hasFnAttribute("amdgpu-kernarg-segment-ptr"))
KernargSegmentPtr = true;
if (ST.hasFlatAddressSpace() && isEntryFunction() && IsCOV2) {
// TODO: This could be refined a lot. The attribute is a poor way of
// detecting calls that may require it before argument lowering.
- if (HasStackObjects || F->hasFnAttribute("amdgpu-flat-scratch"))
+ if (HasStackObjects || F.hasFnAttribute("amdgpu-flat-scratch"))
FlatScratchInit = true;
}
- Attribute A = F->getFnAttribute("amdgpu-git-ptr-high");
+ Attribute A = F.getFnAttribute("amdgpu-git-ptr-high");
StringRef S = A.getValueAsString();
if (!S.empty())
S.consumeInteger(0, GITPtrHigh);
diff --git a/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
index c66aed9..c73fb10 100644
--- a/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
+++ b/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
@@ -340,9 +340,9 @@
/* static */
void SIMemOpInfo::reportUnknownSyncScope(
const MachineBasicBlock::iterator &MI) {
- DiagnosticInfoUnsupported Diag(*MI->getParent()->getParent()->getFunction(),
+ DiagnosticInfoUnsupported Diag(MI->getParent()->getParent()->getFunction(),
"Unsupported synchronization scope");
- LLVMContext *CTX = &MI->getParent()->getParent()->getFunction()->getContext();
+ LLVMContext *CTX = &MI->getParent()->getParent()->getFunction().getContext();
CTX->diagnose(Diag);
}
diff --git a/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp b/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
index aa95161..2dc6f27 100644
--- a/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
+++ b/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
@@ -205,7 +205,7 @@
}
bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
diff --git a/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp b/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
index 5533ba1..8307477 100644
--- a/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
+++ b/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
@@ -103,7 +103,7 @@
}
bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
diff --git a/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 7b4652e..5ed7fdf 100644
--- a/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -1050,7 +1050,7 @@
bool SIPeepholeSDWA::runOnMachineFunction(MachineFunction &MF) {
const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
- if (!ST.hasSDWA() || skipFunction(*MF.getFunction()))
+ if (!ST.hasSDWA() || skipFunction(MF.getFunction()))
return false;
MRI = &MF.getRegInfo();
diff --git a/lib/Target/AMDGPU/SIRegisterInfo.cpp b/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 1b813a3..65cdc13 100644
--- a/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -1514,7 +1514,7 @@
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
- *MF.getFunction());
+ MF.getFunction());
switch (RC->getID()) {
default:
return AMDGPURegisterInfo::getRegPressureLimit(RC, MF);
diff --git a/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index 874fbad..41f989a 100644
--- a/lib/Target/AMDGPU/SIShrinkInstructions.cpp
+++ b/lib/Target/AMDGPU/SIShrinkInstructions.cpp
@@ -286,7 +286,7 @@
}
bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
MachineRegisterInfo &MRI = MF.getRegInfo();
diff --git a/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/lib/Target/AMDGPU/SIWholeQuadMode.cpp
index 23464e0..53aefe8 100644
--- a/lib/Target/AMDGPU/SIWholeQuadMode.cpp
+++ b/lib/Target/AMDGPU/SIWholeQuadMode.cpp
@@ -307,7 +307,7 @@
char SIWholeQuadMode::scanInstructions(MachineFunction &MF,
std::vector<WorkItem> &Worklist) {
char GlobalFlags = 0;
- bool WQMOutputs = MF.getFunction()->hasFnAttribute("amdgpu-ps-wqm-outputs");
+ bool WQMOutputs = MF.getFunction().hasFnAttribute("amdgpu-ps-wqm-outputs");
SmallVector<MachineInstr *, 4> SetInactiveInstrs;
// We need to visit the basic blocks in reverse post-order so that we visit
@@ -842,7 +842,7 @@
Blocks.clear();
LiveMaskQueries.clear();
LowerToCopyInstrs.clear();
- CallingConv = MF.getFunction()->getCallingConv();
+ CallingConv = MF.getFunction().getCallingConv();
const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
diff --git a/lib/Target/ARC/ARCBranchFinalize.cpp b/lib/Target/ARC/ARCBranchFinalize.cpp
index e5b0f8f..9341e7b 100644
--- a/lib/Target/ARC/ARCBranchFinalize.cpp
+++ b/lib/Target/ARC/ARCBranchFinalize.cpp
@@ -142,7 +142,7 @@
bool ARCBranchFinalize::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "Running ARC Branch Finalize on "
- << MF.getFunction()->getName() << "\n");
+ << MF.getName() << "\n");
std::vector<MachineInstr *> Branches;
bool Changed = false;
unsigned MaxSize = 0;
@@ -172,7 +172,7 @@
isInt<9>(MaxSize) ? replaceWithBRcc(P.first) : replaceWithCmpBcc(P.first);
}
- DEBUG(dbgs() << "Estimated function size for " << MF.getFunction()->getName()
+ DEBUG(dbgs() << "Estimated function size for " << MF.getName()
<< ": " << MaxSize << "\n");
return Changed;
diff --git a/lib/Target/ARC/ARCFrameLowering.cpp b/lib/Target/ARC/ARCFrameLowering.cpp
index 2976798..195a781 100644
--- a/lib/Target/ARC/ARCFrameLowering.cpp
+++ b/lib/Target/ARC/ARCFrameLowering.cpp
@@ -88,7 +88,7 @@
void ARCFrameLowering::determineCalleeSaves(MachineFunction &MF,
BitVector &SavedRegs,
RegScavenger *RS) const {
- DEBUG(dbgs() << "Determine Callee Saves: " << MF.getFunction()->getName()
+ DEBUG(dbgs() << "Determine Callee Saves: " << MF.getName()
<< "\n");
TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
SavedRegs.set(ARC::BLINK);
@@ -115,7 +115,7 @@
/// registers onto the stack, when enough callee saved registers are required.
void ARCFrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
- DEBUG(dbgs() << "Emit Prologue: " << MF.getFunction()->getName() << "\n");
+ DEBUG(dbgs() << "Emit Prologue: " << MF.getName() << "\n");
auto *AFI = MF.getInfo<ARCFunctionInfo>();
MachineModuleInfo &MMI = MF.getMMI();
MCContext &Context = MMI.getContext();
@@ -131,7 +131,7 @@
unsigned StackSlotsUsedByFunclet = 0;
bool SavedBlink = false;
unsigned AlreadyAdjusted = 0;
- if (MF.getFunction()->isVarArg()) {
+ if (MF.getFunction().isVarArg()) {
// Add in the varargs area here first.
DEBUG(dbgs() << "Varargs\n");
unsigned VarArgsBytes = MFI.getObjectSize(AFI->getVarArgsFrameIndex());
@@ -235,7 +235,7 @@
/// registers onto the stack, when enough callee saved registers are required.
void ARCFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
- DEBUG(dbgs() << "Emit Epilogue: " << MF.getFunction()->getName() << "\n");
+ DEBUG(dbgs() << "Emit Epilogue: " << MF.getName() << "\n");
auto *AFI = MF.getInfo<ARCFunctionInfo>();
const ARCInstrInfo *TII = MF.getSubtarget<ARCSubtarget>().getInstrInfo();
MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
@@ -302,7 +302,7 @@
}
// Relieve the varargs area if necessary.
- if (MF.getFunction()->isVarArg()) {
+ if (MF.getFunction().isVarArg()) {
// Add in the varargs area here first.
DEBUG(dbgs() << "Varargs\n");
unsigned VarArgsBytes = MFI.getObjectSize(AFI->getVarArgsFrameIndex());
@@ -383,7 +383,7 @@
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const {
DEBUG(dbgs() << "Spill callee saved registers: "
- << MBB.getParent()->getFunction()->getName() << "\n");
+ << MBB.getParent()->getName() << "\n");
// There are routines for saving at least 3 registers (r13 to r15, etc.)
unsigned Last = determineLastCalleeSave(CSI);
if (UseSaveRestoreFunclet && Last > ARC::R14) {
@@ -400,7 +400,7 @@
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
std::vector<CalleeSavedInfo> &CSI, const TargetRegisterInfo *TRI) const {
DEBUG(dbgs() << "Restore callee saved registers: "
- << MBB.getParent()->getFunction()->getName() << "\n");
+ << MBB.getParent()->getName() << "\n");
// There are routines for saving at least 3 registers (r13 to r15, etc.)
unsigned Last = determineLastCalleeSave(CSI);
if (UseSaveRestoreFunclet && Last > ARC::R14) {
@@ -415,7 +415,7 @@
MachineFunction &MF, RegScavenger *RS) const {
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
DEBUG(dbgs() << "Process function before frame finalized: "
- << MF.getFunction()->getName() << "\n");
+ << MF.getName() << "\n");
MachineFrameInfo &MFI = MF.getFrameInfo();
DEBUG(dbgs() << "Current stack size: " << MFI.getStackSize() << "\n");
const TargetRegisterClass *RC = &ARC::GPR32RegClass;
@@ -440,8 +440,7 @@
MachineBasicBlock::iterator ARCFrameLowering::eliminateCallFramePseudoInstr(
MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- DEBUG(dbgs() << "EmitCallFramePseudo: " << MF.getFunction()->getName()
- << "\n");
+ DEBUG(dbgs() << "EmitCallFramePseudo: " << MF.getName() << "\n");
const ARCInstrInfo *TII = MF.getSubtarget<ARCSubtarget>().getInstrInfo();
MachineInstr &Old = *I;
DebugLoc dl = Old.getDebugLoc();
diff --git a/lib/Target/ARC/ARCRegisterInfo.cpp b/lib/Target/ARC/ARCRegisterInfo.cpp
index 59b22c5..cb9f89d 100644
--- a/lib/Target/ARC/ARCRegisterInfo.cpp
+++ b/lib/Target/ARC/ARCRegisterInfo.cpp
@@ -125,8 +125,7 @@
ARCRegisterInfo::ARCRegisterInfo() : ARCGenRegisterInfo(ARC::BLINK) {}
bool ARCRegisterInfo::needsFrameMoves(const MachineFunction &MF) {
- return MF.getMMI().hasDebugInfo() ||
- MF.getFunction()->needsUnwindTableEntry();
+ return MF.getMMI().hasDebugInfo() || MF.getFunction().needsUnwindTableEntry();
}
const MCPhysReg *
diff --git a/lib/Target/ARM/A15SDOptimizer.cpp b/lib/Target/ARM/A15SDOptimizer.cpp
index 34e41ba..16d5f74 100644
--- a/lib/Target/ARM/A15SDOptimizer.cpp
+++ b/lib/Target/ARM/A15SDOptimizer.cpp
@@ -655,7 +655,7 @@
}
bool A15SDOptimizer::runOnMachineFunction(MachineFunction &Fn) {
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
const ARMSubtarget &STI = Fn.getSubtarget<ARMSubtarget>();
diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp
index 1779c89..d3d79fe 100644
--- a/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -109,7 +109,7 @@
Subtarget = &MF.getSubtarget<ARMSubtarget>();
SetupMachineFunction(MF);
- const Function* F = MF.getFunction();
+ const Function &F = MF.getFunction();
const TargetMachine& TM = MF.getTarget();
// Collect all globals that had their storage promoted to a constant pool.
@@ -120,13 +120,13 @@
// Calculate this function's optimization goal.
unsigned OptimizationGoal;
- if (F->hasFnAttribute(Attribute::OptimizeNone))
+ if (F.hasFnAttribute(Attribute::OptimizeNone))
// For best debugging illusion, speed and small size sacrificed
OptimizationGoal = 6;
- else if (F->optForMinSize())
+ else if (F.optForMinSize())
// Aggressively for small size, speed and debug illusion sacrificed
OptimizationGoal = 4;
- else if (F->optForSize())
+ else if (F.optForSize())
// For small size, but speed and debugging illusion preserved
OptimizationGoal = 3;
else if (TM.getOptLevel() == CodeGenOpt::Aggressive)
@@ -146,7 +146,7 @@
OptimizationGoals = 0;
if (Subtarget->isTargetCOFF()) {
- bool Internal = F->hasInternalLinkage();
+ bool Internal = F.hasInternalLinkage();
COFF::SymbolStorageClass Scl = Internal ? COFF::IMAGE_SYM_CLASS_STATIC
: COFF::IMAGE_SYM_CLASS_EXTERNAL;
int Type = COFF::IMAGE_SYM_DTYPE_FUNCTION << COFF::SCT_COMPLEX_TYPE_SHIFT;
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp
index a92a916..8c17277 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -1512,18 +1512,18 @@
4, ACPV->getModifier(), ACPV->mustAddCurrentAddress());
else if (ACPV->isExtSymbol())
NewCPV = ARMConstantPoolSymbol::
- Create(MF.getFunction()->getContext(),
+ Create(MF.getFunction().getContext(),
cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4);
else if (ACPV->isBlockAddress())
NewCPV = ARMConstantPoolConstant::
Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId,
ARMCP::CPBlockAddress, 4);
else if (ACPV->isLSDA())
- NewCPV = ARMConstantPoolConstant::Create(MF.getFunction(), PCLabelId,
+ NewCPV = ARMConstantPoolConstant::Create(&MF.getFunction(), PCLabelId,
ARMCP::CPLSDA, 4);
else if (ACPV->isMachineBasicBlock())
NewCPV = ARMConstantPoolMBB::
- Create(MF.getFunction()->getContext(),
+ Create(MF.getFunction().getContext(),
cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4);
else
llvm_unreachable("Unexpected ARM constantpool value type!!");
@@ -1843,7 +1843,7 @@
// If we are optimizing for size, see if the branch in the predecessor can be
// lowered to cbn?z by the constant island lowering pass, and return false if
// so. This results in a shorter instruction sequence.
- if (MBB.getParent()->getFunction()->optForSize()) {
+ if (MBB.getParent()->getFunction().optForSize()) {
MachineBasicBlock *Pred = *MBB.pred_begin();
if (!Pred->empty()) {
MachineInstr *LastMI = &*Pred->rbegin();
@@ -2210,7 +2210,7 @@
unsigned NumBytes) {
// This optimisation potentially adds lots of load and store
// micro-operations, it's only really a great benefit to code-size.
- if (!MF.getFunction()->optForMinSize())
+ if (!MF.getFunction().optForMinSize())
return false;
// If only one register is pushed/popped, LLVM can use an LDR/STR
@@ -3982,7 +3982,7 @@
if (Latency > 0 && Subtarget.isThumb2()) {
const MachineFunction *MF = DefMI.getParent()->getParent();
// FIXME: Use Function::optForSize().
- if (MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize))
+ if (MF->getFunction().hasFnAttribute(Attribute::OptimizeForSize))
--Latency;
}
return Latency;
diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index 0aec874..4b9a437 100644
--- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -71,17 +71,17 @@
? CSR_iOS_SaveList
: (UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList);
- const Function *F = MF->getFunction();
- if (F->getCallingConv() == CallingConv::GHC) {
+ const Function &F = MF->getFunction();
+ if (F.getCallingConv() == CallingConv::GHC) {
// GHC set of callee saved regs is empty as all those regs are
// used for passing STG regs around
return CSR_NoRegs_SaveList;
- } else if (F->hasFnAttribute("interrupt")) {
+ } else if (F.hasFnAttribute("interrupt")) {
if (STI.isMClass()) {
// M-class CPUs have hardware which saves the registers needed to allow a
// function conforming to the AAPCS to function as a handler.
return UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList;
- } else if (F->getFnAttribute("interrupt").getValueAsString() == "FIQ") {
+ } else if (F.getFnAttribute("interrupt").getValueAsString() == "FIQ") {
// Fast interrupt mode gives the handler a private copy of R8-R14, so less
// need to be saved to restore user-mode state.
return CSR_FIQ_SaveList;
@@ -93,7 +93,7 @@
}
if (STI.getTargetLowering()->supportSwiftError() &&
- F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
+ F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
if (STI.isTargetDarwin())
return CSR_iOS_SwiftError_SaveList;
@@ -101,7 +101,7 @@
CSR_AAPCS_SwiftError_SaveList;
}
- if (STI.isTargetDarwin() && F->getCallingConv() == CallingConv::CXX_FAST_TLS)
+ if (STI.isTargetDarwin() && F.getCallingConv() == CallingConv::CXX_FAST_TLS)
return MF->getInfo<ARMFunctionInfo>()->isSplitCSR()
? CSR_iOS_CXX_TLS_PE_SaveList
: CSR_iOS_CXX_TLS_SaveList;
@@ -111,7 +111,7 @@
const MCPhysReg *ARMBaseRegisterInfo::getCalleeSavedRegsViaCopy(
const MachineFunction *MF) const {
assert(MF && "Invalid MachineFunction pointer.");
- if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
MF->getInfo<ARMFunctionInfo>()->isSplitCSR())
return CSR_iOS_CXX_TLS_ViaCopy_SaveList;
return nullptr;
@@ -126,7 +126,7 @@
return CSR_NoRegs_RegMask;
if (STI.getTargetLowering()->supportSwiftError() &&
- MF.getFunction()->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
+ MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
return STI.isTargetDarwin() ? CSR_iOS_SwiftError_RegMask
: CSR_AAPCS_SwiftError_RegMask;
@@ -440,7 +440,7 @@
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
MachineConstantPool *ConstantPool = MF.getConstantPool();
const Constant *C =
- ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
+ ConstantInt::get(Type::getInt32Ty(MF.getFunction().getContext()), Val);
unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
diff --git a/lib/Target/ARM/ARMCallLowering.cpp b/lib/Target/ARM/ARMCallLowering.cpp
index 7338ac8..eab4b3b 100644
--- a/lib/Target/ARM/ARMCallLowering.cpp
+++ b/lib/Target/ARM/ARMCallLowering.cpp
@@ -190,7 +190,7 @@
LLVMContext &Ctx = OrigArg.Ty->getContext();
const DataLayout &DL = MF.getDataLayout();
MachineRegisterInfo &MRI = MF.getRegInfo();
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
SmallVector<EVT, 4> SplitVTs;
SmallVector<uint64_t, 4> Offsets;
@@ -218,7 +218,7 @@
bool NeedsConsecutiveRegisters =
TLI.functionArgumentNeedsConsecutiveRegisters(
- SplitTy, F->getCallingConv(), F->isVarArg());
+ SplitTy, F.getCallingConv(), F.isVarArg());
if (NeedsConsecutiveRegisters) {
Flags.setInConsecutiveRegs();
if (i == e - 1)
@@ -244,7 +244,7 @@
return true;
auto &MF = MIRBuilder.getMF();
- const auto &F = *MF.getFunction();
+ const auto &F = MF.getFunction();
auto DL = MF.getDataLayout();
auto &TLI = *getTLI<ARMTargetLowering>();
diff --git a/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index f1def98..b14b2c6 100644
--- a/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -1259,7 +1259,7 @@
MachineConstantPool *MCP = MF->getConstantPool();
unsigned PCLabelID = AFI->createPICLabelUId();
MachineConstantPoolValue *CPV =
- ARMConstantPoolSymbol::Create(MF->getFunction()->getContext(),
+ ARMConstantPoolSymbol::Create(MF->getFunction().getContext(),
"__aeabi_read_tp", PCLabelID, 0);
unsigned Reg = MI.getOperand(0).getReg();
MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index 1090f62..0ea4350 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -2958,7 +2958,7 @@
unsigned Align, MVT VT) {
bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
- LLVMContext *Context = &MF->getFunction()->getContext();
+ LLVMContext *Context = &MF->getFunction().getContext();
unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(
diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp
index e9a13b9..4ff864a 100644
--- a/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/lib/Target/ARM/ARMFrameLowering.cpp
@@ -203,10 +203,10 @@
static bool WindowsRequiresStackProbe(const MachineFunction &MF,
size_t StackSizeInBytes) {
const MachineFrameInfo &MFI = MF.getFrameInfo();
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
unsigned StackProbeSize = (MFI.getStackProtectorIndex() > 0) ? 4080 : 4096;
- if (F->hasFnAttribute("stack-probe-size"))
- F->getFnAttribute("stack-probe-size")
+ if (F.hasFnAttribute("stack-probe-size"))
+ F.getFnAttribute("stack-probe-size")
.getValueAsString()
.getAsInteger(0, StackProbeSize);
return StackSizeInBytes >= StackProbeSize;
@@ -370,7 +370,7 @@
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
- if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;
StackAdjustingInsts DefCFAOffsetCandidates;
@@ -448,7 +448,7 @@
int FramePtrOffsetInPush = 0;
if (HasFP) {
int FPOffset = MFI.getObjectOffset(FramePtrSpillFI);
- assert(getMaxFPOffset(*MF.getFunction(), *AFI) <= FPOffset &&
+ assert(getMaxFPOffset(MF.getFunction(), *AFI) <= FPOffset &&
"Max FP estimation is wrong");
FramePtrOffsetInPush = FPOffset + ArgRegsSaveSize;
AFI->setFramePtrSpillOffset(MFI.getObjectOffset(FramePtrSpillFI) +
@@ -766,7 +766,7 @@
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
- if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;
// First put ourselves on the first (from top) terminator instructions.
@@ -1533,7 +1533,7 @@
return;
// Naked functions don't spill callee-saved registers.
- if (MF.getFunction()->hasFnAttribute(Attribute::Naked))
+ if (MF.getFunction().hasFnAttribute(Attribute::Naked))
return;
// We are planning to use NEON instructions vst1 / vld1.
@@ -1744,7 +1744,7 @@
EstimatedStackSize += 16; // For possible paddings.
unsigned EstimatedRSStackSizeLimit = estimateRSStackSizeLimit(MF, this);
- int MaxFPOffset = getMaxFPOffset(*MF.getFunction(), *AFI);
+ int MaxFPOffset = getMaxFPOffset(MF.getFunction(), *AFI);
bool BigFrameOffsets = EstimatedStackSize >= EstimatedRSStackSizeLimit ||
MFI.hasVarSizedObjects() ||
(MFI.adjustsStack() && !canSimplifyCallFramePseudos(MF)) ||
@@ -2102,7 +2102,7 @@
// Sadly, this currently doesn't support varargs, platforms other than
// android/linux. Note that thumb1/thumb2 are support for android/linux.
- if (MF.getFunction()->isVarArg())
+ if (MF.getFunction().isVarArg())
report_fatal_error("Segmented stacks do not support vararg functions.");
if (!ST->isTargetAndroid() && !ST->isTargetLinux())
report_fatal_error("Segmented stacks not supported on this platform.");
@@ -2250,7 +2250,7 @@
if (Thumb && ST->isThumb1Only()) {
unsigned PCLabelId = ARMFI->createPICLabelUId();
ARMConstantPoolValue *NewCPV = ARMConstantPoolSymbol::Create(
- MF.getFunction()->getContext(), "__STACK_LIMIT", PCLabelId, 0);
+ MF.getFunction().getContext(), "__STACK_LIMIT", PCLabelId, 0);
MachineConstantPool *MCP = MF.getConstantPool();
unsigned CPI = MCP->getConstantPoolIndex(NewCPV, 4);
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index f60500d..1b4d7ff 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -1773,7 +1773,7 @@
bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
bool isThisReturn = false;
bool isSibCall = false;
- auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
+ auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
// Disable tail calls if they're not supported.
if (!Subtarget->supportsTailCall() || Attr.getValueAsString() == "true")
@@ -1782,7 +1782,7 @@
if (isTailCall) {
// Check if it's really possible to do a tail call.
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
- isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(),
+ isVarArg, isStructRet, MF.getFunction().hasStructRetAttr(),
Outs, OutVals, Ins, DAG);
if (!isTailCall && CLI.CS && CLI.CS.isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
@@ -1981,7 +1981,7 @@
bool isDirect = false;
const TargetMachine &TM = getTargetMachine();
- const Module *Mod = MF.getFunction()->getParent();
+ const Module *Mod = MF.getFunction().getParent();
const GlobalValue *GV = nullptr;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
GV = G->getGlobal();
@@ -2033,7 +2033,7 @@
auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
auto *BB = CLI.CS.getParent();
bool PreferIndirect =
- Subtarget->isThumb() && MF.getFunction()->optForMinSize() &&
+ Subtarget->isThumb() && MF.getFunction().optForMinSize() &&
count_if(GV->users(), [&BB](const User *U) {
return isa<Instruction>(U) && cast<Instruction>(U)->getParent() == BB;
}) > 2;
@@ -2105,7 +2105,7 @@
CallOpc = ARMISD::CALL_NOLINK;
else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
// Emit regular call when code size is the priority
- !MF.getFunction()->optForMinSize())
+ !MF.getFunction().optForMinSize())
// "mov lr, pc; b _foo" to avoid confusing the RSP
CallOpc = ARMISD::CALL_NOLINK;
else
@@ -2280,8 +2280,8 @@
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
- const Function *CallerF = MF.getFunction();
- CallingConv::ID CallerCC = CallerF->getCallingConv();
+ const Function &CallerF = MF.getFunction();
+ CallingConv::ID CallerCC = CallerF.getCallingConv();
assert(Subtarget->supportsTailCall());
@@ -2298,7 +2298,7 @@
// Exception-handling functions need a special set of instructions to indicate
// a return to the hardware. Tail-calling another function would probably
// break this.
- if (CallerF->hasFnAttribute("interrupt"))
+ if (CallerF.hasFnAttribute("interrupt"))
return false;
// Also avoid sibcall optimization if either caller or callee uses struct
@@ -2410,9 +2410,9 @@
static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
const SDLoc &DL, SelectionDAG &DAG) {
const MachineFunction &MF = DAG.getMachineFunction();
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
- StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString();
+ StringRef IntKind = F.getFnAttribute("interrupt").getValueAsString();
// See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
// version of the "preferred return address". These offsets affect the return
@@ -2553,7 +2553,7 @@
//
// M-class CPUs actually use a normal return sequence with a special
// (hardware-provided) value in LR, so the normal code path works.
- if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") &&
+ if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt") &&
!Subtarget->isMClass()) {
if (Subtarget->isThumb1Only())
report_fatal_error("interrupt attribute is not supported in Thumb1");
@@ -2691,7 +2691,7 @@
auto T = const_cast<Type*>(CP->getType());
auto C = const_cast<Constant*>(CP->getConstVal());
auto M = const_cast<Module*>(DAG.getMachineFunction().
- getFunction()->getParent());
+ getFunction().getParent());
auto GV = new GlobalVariable(
*M, T, /*isConst=*/true, GlobalVariable::InternalLinkage, C,
Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" +
@@ -2800,7 +2800,7 @@
// trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be
// silly).
auto TRI =
- getTargetMachine().getSubtargetImpl(*F.getFunction())->getRegisterInfo();
+ getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo();
auto ARI = static_cast<const ARMRegisterInfo *>(TRI);
const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction());
@@ -3055,7 +3055,7 @@
// This is a win if the constant is only used in one function (so it doesn't
// need to be duplicated) or duplicating the constant wouldn't increase code
// size (implying the constant is no larger than 4 bytes).
- const Function *F = DAG.getMachineFunction().getFunction();
+ const Function &F = DAG.getMachineFunction().getFunction();
// We rely on this decision to inline being idemopotent and unrelated to the
// use-site. We know that if we inline a variable at one use site, we'll
@@ -3113,7 +3113,7 @@
// in multiple functions but it no larger than a pointer. We also check if
// GVar has constant (non-ConstantExpr) users. If so, it essentially has its
// address taken.
- if (!allUsersAreInFunction(GVar, F) &&
+ if (!allUsersAreInFunction(GVar, &F) &&
!(Size <= 4 && allUsersAreInFunctions(GVar)))
return SDValue();
@@ -3322,7 +3322,7 @@
bool IsPositionIndependent = isPositionIndependent();
unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
ARMConstantPoolValue *CPV =
- ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex,
+ ARMConstantPoolConstant::Create(&MF.getFunction(), ARMPCLabelIndex,
ARMCP::CPLSDA, PCAdj);
CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
@@ -3598,7 +3598,7 @@
SmallVector<SDValue, 16> ArgValues;
SDValue ArgValue;
- Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin();
+ Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin();
unsigned CurArgIdx = 0;
// Initially ArgRegsSaveSize is zero.
@@ -7754,9 +7754,9 @@
SDValue InChain = DAG.getEntryNode();
SDValue TCChain = InChain;
- const auto *F = DAG.getMachineFunction().getFunction();
+ const Function &F = DAG.getMachineFunction().getFunction();
bool IsTC = TLI.isInTailCallPosition(DAG, Op.getNode(), TCChain) &&
- F->getReturnType() == LCRTy;
+ F.getReturnType() == LCRTy;
if (IsTC)
InChain = TCChain;
@@ -7954,7 +7954,7 @@
MachineRegisterInfo *MRI = &MF->getRegInfo();
MachineConstantPool *MCP = MF->getConstantPool();
ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>();
- const Function *F = MF->getFunction();
+ const Function &F = MF->getFunction();
bool isThumb = Subtarget->isThumb();
bool isThumb2 = Subtarget->isThumb2();
@@ -7962,7 +7962,7 @@
unsigned PCLabelId = AFI->createPICLabelUId();
unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
ARMConstantPoolValue *CPV =
- ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj);
+ ARMConstantPoolMBB::Create(F.getContext(), DispatchBB, PCLabelId, PCAdj);
unsigned CPI = MCP->getConstantPoolIndex(CPV, 4);
const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass
@@ -8248,7 +8248,7 @@
.add(predOps(ARMCC::AL));
} else {
MachineConstantPool *ConstantPool = MF->getConstantPool();
- Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
+ Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
// MachineConstantPool wants an explicit alignment.
@@ -8349,7 +8349,7 @@
.add(predOps(ARMCC::AL));
} else {
MachineConstantPool *ConstantPool = MF->getConstantPool();
- Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
+ Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
// MachineConstantPool wants an explicit alignment.
@@ -8645,7 +8645,7 @@
UnitSize = 2;
} else {
// Check whether we can use NEON instructions.
- if (!MF->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat) &&
+ if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) &&
Subtarget->hasNEON()) {
if ((Align % 16 == 0) && SizeVal >= 16)
UnitSize = 16;
@@ -8751,7 +8751,7 @@
.add(predOps(ARMCC::AL));
} else {
MachineConstantPool *ConstantPool = MF->getConstantPool();
- Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
+ Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
const Constant *C = ConstantInt::get(Int32Ty, LoopSize);
// MachineConstantPool wants an explicit alignment.
@@ -12417,11 +12417,11 @@
bool IsMemset, bool ZeroMemset,
bool MemcpyStrSrc,
MachineFunction &MF) const {
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
// See if we can use NEON instructions for this...
if ((!IsMemset || ZeroMemset) && Subtarget->hasNEON() &&
- !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
+ !F.hasFnAttribute(Attribute::NoImplicitFloat)) {
bool Fast;
if (Size >= 16 &&
(memOpAlign(SrcAlign, DstAlign, 16) ||
@@ -14364,7 +14364,7 @@
// fine for CXX_FAST_TLS since the C++-style TLS access functions should be
// nounwind. If we want to generalize this later, we may need to emit
// CFI pseudo-instructions.
- assert(Entry->getParent()->getFunction()->hasFnAttribute(
+ assert(Entry->getParent()->getFunction().hasFnAttribute(
Attribute::NoUnwind) &&
"Function should be nounwind in insertCopiesSplitCSR!");
Entry->addLiveIn(*I);
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index 5cffed2..0a1af8d 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -692,8 +692,8 @@
SDValue ThisVal) const;
bool supportSplitCSR(MachineFunction *MF) const override {
- return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
- MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
+ return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
}
void initializeSplitCSR(MachineBasicBlock *Entry) const override;
diff --git a/lib/Target/ARM/ARMLegalizerInfo.cpp b/lib/Target/ARM/ARMLegalizerInfo.cpp
index 6a54199..ddcdb1f 100644
--- a/lib/Target/ARM/ARMLegalizerInfo.cpp
+++ b/lib/Target/ARM/ARMLegalizerInfo.cpp
@@ -318,7 +318,7 @@
// Our divmod libcalls return a struct containing the quotient and the
// remainder. We need to create a virtual register for it.
- auto &Ctx = MIRBuilder.getMF().getFunction()->getContext();
+ auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
Type *ArgTy = Type::getInt32Ty(Ctx);
StructType *RetTy = StructType::get(Ctx, {ArgTy, ArgTy}, /* Packed */ true);
auto RetVal = MRI.createGenericVirtualRegister(
@@ -359,7 +359,7 @@
return true;
}
- auto &Ctx = MIRBuilder.getMF().getFunction()->getContext();
+ auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
assert((OpSize == 32 || OpSize == 64) && "Unsupported operand size");
auto *ArgTy = OpSize == 32 ? Type::getFloatTy(Ctx) : Type::getDoubleTy(Ctx);
auto *RetTy = Type::getInt32Ty(Ctx);
diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index c61e72e..8b3a2e2 100644
--- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -1273,7 +1273,7 @@
// can still change to a writeback form as that will save us 2 bytes
// of code size. It can create WAW hazards though, so only do it if
// we're minimizing code size.
- if (!MBB.getParent()->getFunction()->optForMinSize() || !BaseKill)
+ if (!MBB.getParent()->getFunction().optForMinSize() || !BaseKill)
return false;
bool HighRegsUsed = false;
@@ -1953,7 +1953,7 @@
}
bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
MF = &Fn;
@@ -2035,7 +2035,7 @@
ARM_PREALLOC_LOAD_STORE_OPT_NAME, false, false)
bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
- if (AssumeMisalignedLoadStores || skipFunction(*Fn.getFunction()))
+ if (AssumeMisalignedLoadStores || skipFunction(Fn.getFunction()))
return false;
TD = &Fn.getDataLayout();
@@ -2130,9 +2130,9 @@
return false;
unsigned Align = (*Op0->memoperands_begin())->getAlignment();
- const Function *Func = MF->getFunction();
+ const Function &Func = MF->getFunction();
unsigned ReqAlign = STI->hasV6Ops()
- ? TD->getABITypeAlignment(Type::getInt64Ty(Func->getContext()))
+ ? TD->getABITypeAlignment(Type::getInt64Ty(Func.getContext()))
: 8; // Pre-v6 need 8-byte align
if (Align < ReqAlign)
return false;
diff --git a/lib/Target/ARM/ARMOptimizeBarriersPass.cpp b/lib/Target/ARM/ARMOptimizeBarriersPass.cpp
index 7e4d598..cff4a25 100644
--- a/lib/Target/ARM/ARMOptimizeBarriersPass.cpp
+++ b/lib/Target/ARM/ARMOptimizeBarriersPass.cpp
@@ -49,7 +49,7 @@
}
bool ARMOptimizeBarriersPass::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
// Vector to store the DMBs we will remove after the first iteration
diff --git a/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/lib/Target/ARM/ARMSelectionDAGInfo.cpp
index 33dcf9b..d4fbf76 100644
--- a/lib/Target/ARM/ARMSelectionDAGInfo.cpp
+++ b/lib/Target/ARM/ARMSelectionDAGInfo.cpp
@@ -171,7 +171,7 @@
// Code size optimisation: do not inline memcpy if expansion results in
// more instructions than the libary call.
- if (NumMEMCPYs > 1 && DAG.getMachineFunction().getFunction()->optForMinSize()) {
+ if (NumMEMCPYs > 1 && DAG.getMachineFunction().getFunction().optForMinSize()) {
return SDValue();
}
diff --git a/lib/Target/ARM/ARMSubtarget.cpp b/lib/Target/ARM/ARMSubtarget.cpp
index b6875e7..4d4a881 100644
--- a/lib/Target/ARM/ARMSubtarget.cpp
+++ b/lib/Target/ARM/ARMSubtarget.cpp
@@ -373,7 +373,7 @@
// For general targets, the prologue can grow when VFPs are allocated with
// stride 4 (more vpush instructions). But WatchOS uses a compact unwind
// format which it's more important to get right.
- return isTargetWatchABI() || (isSwift() && !MF.getFunction()->optForMinSize());
+ return isTargetWatchABI() || (isSwift() && !MF.getFunction().optForMinSize());
}
bool ARMSubtarget::useMovt(const MachineFunction &MF) const {
@@ -381,7 +381,7 @@
// immediates as it is inherently position independent, and may be out of
// range otherwise.
return !NoMovt && hasV8MBaselineOps() &&
- (isTargetWindows() || !MF.getFunction()->optForMinSize() || genExecuteOnly());
+ (isTargetWindows() || !MF.getFunction().optForMinSize() || genExecuteOnly());
}
bool ARMSubtarget::useFastISel() const {
diff --git a/lib/Target/ARM/MLxExpansionPass.cpp b/lib/Target/ARM/MLxExpansionPass.cpp
index 00c41c4..153e7b1 100644
--- a/lib/Target/ARM/MLxExpansionPass.cpp
+++ b/lib/Target/ARM/MLxExpansionPass.cpp
@@ -371,7 +371,7 @@
}
bool MLxExpansion::runOnMachineFunction(MachineFunction &Fn) {
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
TII = static_cast<const ARMBaseInstrInfo *>(Fn.getSubtarget().getInstrInfo());
diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp
index a0b98a4..3920c73 100644
--- a/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -449,7 +449,7 @@
break;
case ARM::t2LDR_POST:
case ARM::t2STR_POST: {
- if (!MBB.getParent()->getFunction()->optForMinSize())
+ if (!MBB.getParent()->getFunction().optForMinSize())
return false;
if (!MI->hasOneMemOperand() ||
@@ -1084,7 +1084,7 @@
}
bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
- if (PredicateFtor && !PredicateFtor(*MF.getFunction()))
+ if (PredicateFtor && !PredicateFtor(MF.getFunction()))
return false;
STI = &static_cast<const ARMSubtarget &>(MF.getSubtarget());
@@ -1094,8 +1094,8 @@
TII = static_cast<const Thumb2InstrInfo *>(STI->getInstrInfo());
// Optimizing / minimizing size? Minimizing size implies optimizing for size.
- OptimizeSize = MF.getFunction()->optForSize();
- MinimizeSize = MF.getFunction()->optForMinSize();
+ OptimizeSize = MF.getFunction().optForSize();
+ MinimizeSize = MF.getFunction().optForMinSize();
BlockInfo.clear();
BlockInfo.resize(MF.getNumBlockIDs());
diff --git a/lib/Target/ARM/ThumbRegisterInfo.cpp b/lib/Target/ARM/ThumbRegisterInfo.cpp
index d2bebb9..d190edf 100644
--- a/lib/Target/ARM/ThumbRegisterInfo.cpp
+++ b/lib/Target/ARM/ThumbRegisterInfo.cpp
@@ -70,7 +70,7 @@
const TargetInstrInfo &TII = *STI.getInstrInfo();
MachineConstantPool *ConstantPool = MF.getConstantPool();
const Constant *C = ConstantInt::get(
- Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
+ Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), Val);
unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
BuildMI(MBB, MBBI, dl, TII.get(ARM::tLDRpci))
@@ -89,7 +89,7 @@
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
MachineConstantPool *ConstantPool = MF.getConstantPool();
const Constant *C = ConstantInt::get(
- Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
+ Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), Val);
unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
BuildMI(MBB, MBBI, dl, TII.get(ARM::t2LDRpci))
diff --git a/lib/Target/AVR/AVRFrameLowering.cpp b/lib/Target/AVR/AVRFrameLowering.cpp
index 5101cf5..3b73223 100644
--- a/lib/Target/AVR/AVRFrameLowering.cpp
+++ b/lib/Target/AVR/AVRFrameLowering.cpp
@@ -53,7 +53,7 @@
void AVRFrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
- CallingConv::ID CallConv = MF.getFunction()->getCallingConv();
+ CallingConv::ID CallConv = MF.getFunction().getCallingConv();
DebugLoc DL = (MBBI != MBB.end()) ? MBBI->getDebugLoc() : DebugLoc();
const AVRSubtarget &STI = MF.getSubtarget<AVRSubtarget>();
const AVRInstrInfo &TII = *STI.getInstrInfo();
@@ -143,7 +143,7 @@
void AVRFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
- CallingConv::ID CallConv = MF.getFunction()->getCallingConv();
+ CallingConv::ID CallConv = MF.getFunction().getCallingConv();
bool isHandler = (CallConv == CallingConv::AVR_INTR ||
CallConv == CallingConv::AVR_SIGNAL);
diff --git a/lib/Target/AVR/AVRISelLowering.cpp b/lib/Target/AVR/AVRISelLowering.cpp
index 69d9748..d9e27e9 100644
--- a/lib/Target/AVR/AVRISelLowering.cpp
+++ b/lib/Target/AVR/AVRISelLowering.cpp
@@ -1039,7 +1039,7 @@
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
- analyzeArguments(nullptr, MF.getFunction(), &DL, 0, &Ins, CallConv, ArgLocs, CCInfo,
+ analyzeArguments(nullptr, &MF.getFunction(), &DL, 0, &Ins, CallConv, ArgLocs, CCInfo,
false, isVarArg);
SDValue ArgValue;
@@ -1391,7 +1391,7 @@
// Don't emit the ret/reti instruction when the naked attribute is present in
// the function being compiled.
- if (MF.getFunction()->getAttributes().hasAttribute(
+ if (MF.getFunction().getAttributes().hasAttribute(
AttributeList::FunctionIndex, Attribute::Naked)) {
return Chain;
}
diff --git a/lib/Target/AVR/AVRRegisterInfo.cpp b/lib/Target/AVR/AVRRegisterInfo.cpp
index b6ac934..d171a62 100644
--- a/lib/Target/AVR/AVRRegisterInfo.cpp
+++ b/lib/Target/AVR/AVRRegisterInfo.cpp
@@ -34,7 +34,7 @@
const uint16_t *
AVRRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
- CallingConv::ID CC = MF->getFunction()->getCallingConv();
+ CallingConv::ID CC = MF->getFunction().getCallingConv();
return ((CC == CallingConv::AVR_INTR || CC == CallingConv::AVR_SIGNAL)
? CSR_Interrupts_SaveList
diff --git a/lib/Target/BPF/BPFISelLowering.cpp b/lib/Target/BPF/BPFISelLowering.cpp
index 7d53556..3ea96e3 100644
--- a/lib/Target/BPF/BPFISelLowering.cpp
+++ b/lib/Target/BPF/BPFISelLowering.cpp
@@ -36,7 +36,7 @@
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg) {
MachineFunction &MF = DAG.getMachineFunction();
DAG.getContext()->diagnose(
- DiagnosticInfoUnsupported(*MF.getFunction(), Msg, DL.getDebugLoc()));
+ DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
}
static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg,
@@ -48,7 +48,7 @@
Val->print(OS);
OS.flush();
DAG.getContext()->diagnose(
- DiagnosticInfoUnsupported(*MF.getFunction(), Str, DL.getDebugLoc()));
+ DiagnosticInfoUnsupported(MF.getFunction(), Str, DL.getDebugLoc()));
}
BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM,
@@ -227,7 +227,7 @@
}
}
- if (IsVarArg || MF.getFunction()->hasStructRetAttr()) {
+ if (IsVarArg || MF.getFunction().hasStructRetAttr()) {
fail(DL, DAG, "functions with VarArgs or StructRet are not supported");
}
@@ -382,7 +382,7 @@
// CCState - Info about the registers and stack slot.
CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
- if (MF.getFunction()->getReturnType()->isAggregateType()) {
+ if (MF.getFunction().getReturnType()->isAggregateType()) {
fail(DL, DAG, "only integer returns supported");
return DAG.getNode(Opc, DL, MVT::Other, Chain);
}
diff --git a/lib/Target/BPF/BPFRegisterInfo.cpp b/lib/Target/BPF/BPFRegisterInfo.cpp
index 00d609e..6f70678 100644
--- a/lib/Target/BPF/BPFRegisterInfo.cpp
+++ b/lib/Target/BPF/BPFRegisterInfo.cpp
@@ -45,12 +45,12 @@
static void WarnSize(int Offset, MachineFunction &MF, DebugLoc& DL)
{
if (Offset <= -512) {
- auto F = MF.getFunction();
- DiagnosticInfoUnsupported DiagStackSize(*F,
+ const Function &F = MF.getFunction();
+ DiagnosticInfoUnsupported DiagStackSize(F,
"Looks like the BPF stack limit of 512 bytes is exceeded. "
"Please move large on stack variables into BPF per-cpu array map.\n",
DL);
- F->getContext().diagnose(DiagStackSize);
+ F.getContext().diagnose(DiagStackSize);
}
}
diff --git a/lib/Target/Hexagon/HexagonBitSimplify.cpp b/lib/Target/Hexagon/HexagonBitSimplify.cpp
index f14beaa..9e73766 100644
--- a/lib/Target/Hexagon/HexagonBitSimplify.cpp
+++ b/lib/Target/Hexagon/HexagonBitSimplify.cpp
@@ -2631,7 +2631,7 @@
}
bool HexagonBitSimplify::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
auto &HST = MF.getSubtarget<HexagonSubtarget>();
@@ -3181,7 +3181,7 @@
}
bool HexagonLoopRescheduling::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
auto &HST = MF.getSubtarget<HexagonSubtarget>();
diff --git a/lib/Target/Hexagon/HexagonBitTracker.cpp b/lib/Target/Hexagon/HexagonBitTracker.cpp
index 8297c47..b6e220b 100644
--- a/lib/Target/Hexagon/HexagonBitTracker.cpp
+++ b/lib/Target/Hexagon/HexagonBitTracker.cpp
@@ -61,7 +61,7 @@
// passed via registers.
unsigned InVirtReg, InPhysReg = 0;
- for (const Argument &Arg : MF.getFunction()->args()) {
+ for (const Argument &Arg : MF.getFunction().args()) {
Type *ATy = Arg.getType();
unsigned Width = 0;
if (ATy->isIntegerTy())
diff --git a/lib/Target/Hexagon/HexagonCFGOptimizer.cpp b/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
index 6e43574..a22ac8c 100644
--- a/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
+++ b/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
@@ -114,7 +114,7 @@
}
bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
// Loop over all of the basic blocks.
diff --git a/lib/Target/Hexagon/HexagonConstExtenders.cpp b/lib/Target/Hexagon/HexagonConstExtenders.cpp
index 1e55c4b..294a6da 100644
--- a/lib/Target/Hexagon/HexagonConstExtenders.cpp
+++ b/lib/Target/Hexagon/HexagonConstExtenders.cpp
@@ -1831,7 +1831,7 @@
}
bool HCE::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
DEBUG(MF.print(dbgs() << "Before " << getPassName() << '\n', nullptr));
diff --git a/lib/Target/Hexagon/HexagonConstPropagation.cpp b/lib/Target/Hexagon/HexagonConstPropagation.cpp
index c59cc50..8ac96f3 100644
--- a/lib/Target/Hexagon/HexagonConstPropagation.cpp
+++ b/lib/Target/Hexagon/HexagonConstPropagation.cpp
@@ -280,7 +280,7 @@
public:
MachineConstEvaluator(MachineFunction &Fn)
: TRI(*Fn.getSubtarget().getRegisterInfo()),
- MF(Fn), CX(Fn.getFunction()->getContext()) {}
+ MF(Fn), CX(Fn.getFunction().getContext()) {}
virtual ~MachineConstEvaluator() = default;
// The required interface:
@@ -1890,10 +1890,8 @@
}
bool runOnMachineFunction(MachineFunction &MF) override {
- const Function *F = MF.getFunction();
- if (!F)
- return false;
- if (skipFunction(*F))
+ const Function &F = MF.getFunction();
+ if (skipFunction(F))
return false;
HexagonConstEvaluator HCE(MF);
@@ -2925,7 +2923,7 @@
DEBUG({
if (!NewInstrs.empty()) {
MachineFunction &MF = *MI.getParent()->getParent();
- dbgs() << "In function: " << MF.getFunction()->getName() << "\n";
+ dbgs() << "In function: " << MF.getName() << "\n";
dbgs() << "Rewrite: for " << MI << " created " << *NewInstrs[0];
for (unsigned i = 1; i < NewInstrs.size(); ++i)
dbgs() << " " << *NewInstrs[i];
diff --git a/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/lib/Target/Hexagon/HexagonCopyToCombine.cpp
index d8135e9..087a772 100644
--- a/lib/Target/Hexagon/HexagonCopyToCombine.cpp
+++ b/lib/Target/Hexagon/HexagonCopyToCombine.cpp
@@ -459,7 +459,7 @@
}
bool HexagonCopyToCombine::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
if (IsCombinesDisabled) return false;
@@ -471,8 +471,8 @@
TRI = ST->getRegisterInfo();
TII = ST->getInstrInfo();
- const Function *F = MF.getFunction();
- bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
+ const Function &F = MF.getFunction();
+ bool OptForSize = F.hasFnAttribute(Attribute::OptimizeForSize);
// Combine aggressively (for code size)
ShouldCombineAggressively =
diff --git a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
index 93ad2e7..0f1b9a4 100644
--- a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
+++ b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
@@ -1047,7 +1047,7 @@
}
bool HexagonEarlyIfConversion::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
auto &ST = MF.getSubtarget<HexagonSubtarget>();
diff --git a/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/lib/Target/Hexagon/HexagonExpandCondsets.cpp
index 458a48e..c2feaf5 100644
--- a/lib/Target/Hexagon/HexagonExpandCondsets.cpp
+++ b/lib/Target/Hexagon/HexagonExpandCondsets.cpp
@@ -1243,7 +1243,7 @@
}
bool HexagonExpandCondsets::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
HII = static_cast<const HexagonInstrInfo*>(MF.getSubtarget().getInstrInfo());
@@ -1253,7 +1253,7 @@
MRI = &MF.getRegInfo();
DEBUG(LIS->print(dbgs() << "Before expand-condsets\n",
- MF.getFunction()->getParent()));
+ MF.getFunction().getParent()));
bool Changed = false;
std::set<unsigned> CoalUpd, PredUpd;
@@ -1281,7 +1281,7 @@
KillUpd.insert(Op.getReg());
updateLiveness(KillUpd, false, true, false);
DEBUG(LIS->print(dbgs() << "After coalescing\n",
- MF.getFunction()->getParent()));
+ MF.getFunction().getParent()));
// First, simply split all muxes into a pair of conditional transfers
// and update the live intervals to reflect the new arrangement. The
@@ -1298,7 +1298,7 @@
// (because of predicated defs), so make sure they are left untouched.
// Predication does not use live intervals.
DEBUG(LIS->print(dbgs() << "After splitting\n",
- MF.getFunction()->getParent()));
+ MF.getFunction().getParent()));
// Traverse all blocks and collapse predicable instructions feeding
// conditional transfers into predicated instructions.
@@ -1307,7 +1307,7 @@
for (auto &B : MF)
Changed |= predicateInBlock(B, PredUpd);
DEBUG(LIS->print(dbgs() << "After predicating\n",
- MF.getFunction()->getParent()));
+ MF.getFunction().getParent()));
PredUpd.insert(CoalUpd.begin(), CoalUpd.end());
updateLiveness(PredUpd, true, true, true);
@@ -1315,7 +1315,7 @@
DEBUG({
if (Changed)
LIS->print(dbgs() << "After expand-condsets\n",
- MF.getFunction()->getParent());
+ MF.getFunction().getParent());
});
return Changed;
diff --git a/lib/Target/Hexagon/HexagonFixupHwLoops.cpp b/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
index 6336075..a842b67 100644
--- a/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
+++ b/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
@@ -89,7 +89,7 @@
}
bool HexagonFixupHwLoops::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
return fixupLoopInstrs(MF);
}
diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 3d8d561..65a2fc3 100644
--- a/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -225,7 +225,7 @@
bool HexagonCallFrameInformation::runOnMachineFunction(MachineFunction &MF) {
auto &HFI = *MF.getSubtarget<HexagonSubtarget>().getFrameLowering();
bool NeedCFI = MF.getMMI().hasDebugInfo() ||
- MF.getFunction()->needsUnwindTableEntry();
+ MF.getFunction().needsUnwindTableEntry();
if (!NeedCFI)
return false;
@@ -375,17 +375,17 @@
}
static inline bool isOptNone(const MachineFunction &MF) {
- return MF.getFunction()->hasFnAttribute(Attribute::OptimizeNone) ||
+ return MF.getFunction().hasFnAttribute(Attribute::OptimizeNone) ||
MF.getTarget().getOptLevel() == CodeGenOpt::None;
}
static inline bool isOptSize(const MachineFunction &MF) {
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
return F.optForSize() && !F.optForMinSize();
}
static inline bool isMinSize(const MachineFunction &MF) {
- return MF.getFunction()->optForMinSize();
+ return MF.getFunction().optForMinSize();
}
/// Implements shrink-wrapping of the stack frame. By default, stack frame
@@ -960,7 +960,7 @@
}
bool HexagonFrameLowering::hasFP(const MachineFunction &MF) const {
- if (MF.getFunction()->hasFnAttribute(Attribute::Naked))
+ if (MF.getFunction().hasFnAttribute(Attribute::Naked))
return false;
auto &MFI = MF.getFrameInfo();
@@ -1396,8 +1396,7 @@
bool HexagonFrameLowering::assignCalleeSavedSpillSlots(MachineFunction &MF,
const TargetRegisterInfo *TRI, std::vector<CalleeSavedInfo> &CSI) const {
- DEBUG(dbgs() << __func__ << " on "
- << MF.getFunction()->getName() << '\n');
+ DEBUG(dbgs() << __func__ << " on " << MF.getName() << '\n');
MachineFrameInfo &MFI = MF.getFrameInfo();
BitVector SRegs(Hexagon::NUM_TARGET_REGS);
diff --git a/lib/Target/Hexagon/HexagonGenInsert.cpp b/lib/Target/Hexagon/HexagonGenInsert.cpp
index 99f3a2e..c1841d7 100644
--- a/lib/Target/Hexagon/HexagonGenInsert.cpp
+++ b/lib/Target/Hexagon/HexagonGenInsert.cpp
@@ -1482,7 +1482,7 @@
}
bool HexagonGenInsert::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
bool Timing = OptTiming, TimingDetail = Timing && OptTimingDetail;
diff --git a/lib/Target/Hexagon/HexagonGenMux.cpp b/lib/Target/Hexagon/HexagonGenMux.cpp
index dc1cdc8..5a001d6 100644
--- a/lib/Target/Hexagon/HexagonGenMux.cpp
+++ b/lib/Target/Hexagon/HexagonGenMux.cpp
@@ -368,7 +368,7 @@
}
bool HexagonGenMux::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
HII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
HRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
diff --git a/lib/Target/Hexagon/HexagonGenPredicate.cpp b/lib/Target/Hexagon/HexagonGenPredicate.cpp
index 4eb24e0..9288ed0 100644
--- a/lib/Target/Hexagon/HexagonGenPredicate.cpp
+++ b/lib/Target/Hexagon/HexagonGenPredicate.cpp
@@ -492,7 +492,7 @@
}
bool HexagonGenPredicate::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index d814fa7..715fd52 100644
--- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -377,7 +377,7 @@
bool HexagonHardwareLoops::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********* Hexagon Hardware Loops *********\n");
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
bool Changed = false;
diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index 1101b23..a6ac4e3 100644
--- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -1746,10 +1746,10 @@
return GAUsesInFunction[V];
unsigned Result = 0;
- const Function *CurF = CurDAG->getMachineFunction().getFunction();
+ const Function &CurF = CurDAG->getMachineFunction().getFunction();
for (const User *U : V->users()) {
if (isa<Instruction>(U) &&
- cast<Instruction>(U)->getParent()->getParent() == CurF)
+ cast<Instruction>(U)->getParent()->getParent() == &CurF)
++Result;
}
diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp
index 718e09a..6ae5270 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -717,12 +717,12 @@
else
CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
- auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
+ auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
if (Attr.getValueAsString() == "true")
IsTailCall = false;
if (IsTailCall) {
- bool StructAttrFlag = MF.getFunction()->hasStructRetAttr();
+ bool StructAttrFlag = MF.getFunction().hasStructRetAttr();
IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
IsVarArg, IsStructRet,
StructAttrFlag,
@@ -3006,8 +3006,8 @@
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const {
- const Function *CallerF = DAG.getMachineFunction().getFunction();
- CallingConv::ID CallerCC = CallerF->getCallingConv();
+ const Function &CallerF = DAG.getMachineFunction().getFunction();
+ CallingConv::ID CallerCC = CallerF.getCallingConv();
bool CCMatch = CallerCC == CalleeCC;
// ***************************************************************************
diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
index 8765fc9..b1c549a 100644
--- a/lib/Target/Hexagon/HexagonMachineScheduler.cpp
+++ b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
@@ -188,7 +188,7 @@
void VLIWMachineScheduler::schedule() {
DEBUG(dbgs() << "********** MI Converging Scheduling VLIW "
<< printMBBReference(*BB) << " " << BB->getName() << " in_func "
- << BB->getParent()->getFunction()->getName() << " at loop depth "
+ << BB->getParent()->getName() << " at loop depth "
<< MLI->getLoopDepth(BB) << " \n");
buildDAGWithRegPressure();
diff --git a/lib/Target/Hexagon/HexagonNewValueJump.cpp b/lib/Target/Hexagon/HexagonNewValueJump.cpp
index 99c16f1..ffa447c 100644
--- a/lib/Target/Hexagon/HexagonNewValueJump.cpp
+++ b/lib/Target/Hexagon/HexagonNewValueJump.cpp
@@ -434,7 +434,7 @@
DEBUG(dbgs() << "********** Hexagon New Value Jump **********\n"
<< "********** Function: " << MF.getName() << "\n");
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
// If we move NewValueJump before register allocation we'll need live variable
diff --git a/lib/Target/Hexagon/HexagonOptAddrMode.cpp b/lib/Target/Hexagon/HexagonOptAddrMode.cpp
index d97ed48..4738a4d 100644
--- a/lib/Target/Hexagon/HexagonOptAddrMode.cpp
+++ b/lib/Target/Hexagon/HexagonOptAddrMode.cpp
@@ -595,7 +595,7 @@
}
bool HexagonOptAddrMode::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
bool Changed = false;
diff --git a/lib/Target/Hexagon/HexagonPeephole.cpp b/lib/Target/Hexagon/HexagonPeephole.cpp
index 581761c..3c588a8 100644
--- a/lib/Target/Hexagon/HexagonPeephole.cpp
+++ b/lib/Target/Hexagon/HexagonPeephole.cpp
@@ -108,7 +108,7 @@
false, false)
bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
QII = static_cast<const HexagonInstrInfo *>(MF.getSubtarget().getInstrInfo());
diff --git a/lib/Target/Hexagon/HexagonRDFOpt.cpp b/lib/Target/Hexagon/HexagonRDFOpt.cpp
index c73a230..413bc8e 100644
--- a/lib/Target/Hexagon/HexagonRDFOpt.cpp
+++ b/lib/Target/Hexagon/HexagonRDFOpt.cpp
@@ -280,7 +280,7 @@
}
bool HexagonRDFOpt::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
if (RDFLimit.getPosition()) {
diff --git a/lib/Target/Hexagon/HexagonSplitDouble.cpp b/lib/Target/Hexagon/HexagonSplitDouble.cpp
index 68b5ddd..c9f5400 100644
--- a/lib/Target/Hexagon/HexagonSplitDouble.cpp
+++ b/lib/Target/Hexagon/HexagonSplitDouble.cpp
@@ -1163,7 +1163,7 @@
DEBUG(dbgs() << "Splitting double registers in function: "
<< MF.getName() << '\n');
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
auto &ST = MF.getSubtarget<HexagonSubtarget>();
diff --git a/lib/Target/Hexagon/HexagonStoreWidening.cpp b/lib/Target/Hexagon/HexagonStoreWidening.cpp
index fb3e6a0..300f6de 100644
--- a/lib/Target/Hexagon/HexagonStoreWidening.cpp
+++ b/lib/Target/Hexagon/HexagonStoreWidening.cpp
@@ -585,7 +585,7 @@
}
bool HexagonStoreWidening::runOnMachineFunction(MachineFunction &MFn) {
- if (skipFunction(*MFn.getFunction()))
+ if (skipFunction(MFn.getFunction()))
return false;
MF = &MFn;
diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index e745447..c240423 100644
--- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -199,7 +199,7 @@
}
bool HexagonPacketizer::runOnMachineFunction(MachineFunction &MF) {
- if (DisablePacketizer || skipFunction(*MF.getFunction()))
+ if (DisablePacketizer || skipFunction(MF.getFunction()))
return false;
HII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
diff --git a/lib/Target/Hexagon/RDFGraph.cpp b/lib/Target/Hexagon/RDFGraph.cpp
index 8513ebd..d1f6e5a 100644
--- a/lib/Target/Hexagon/RDFGraph.cpp
+++ b/lib/Target/Hexagon/RDFGraph.cpp
@@ -766,7 +766,7 @@
RegisterSet DataFlowGraph::getLandingPadLiveIns() const {
RegisterSet LR;
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
const Constant *PF = F.hasPersonalityFn() ? F.getPersonalityFn()
: nullptr;
const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
diff --git a/lib/Target/Lanai/LanaiISelLowering.cpp b/lib/Target/Lanai/LanaiISelLowering.cpp
index 7e4fd24..1756743 100644
--- a/lib/Target/Lanai/LanaiISelLowering.cpp
+++ b/lib/Target/Lanai/LanaiISelLowering.cpp
@@ -513,7 +513,7 @@
// The Lanai ABI for returning structs by value requires that we copy
// the sret argument into rv for the return. Save the argument into
// a virtual register so that we can access it from the return points.
- if (MF.getFunction()->hasStructRetAttr()) {
+ if (MF.getFunction().hasStructRetAttr()) {
unsigned Reg = LanaiMFI->getSRetReturnReg();
if (!Reg) {
Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i32));
@@ -568,7 +568,7 @@
// the sret argument into rv for the return. We saved the argument into
// a virtual register in the entry block, so now we copy the value out
// and into rv.
- if (DAG.getMachineFunction().getFunction()->hasStructRetAttr()) {
+ if (DAG.getMachineFunction().getFunction().hasStructRetAttr()) {
MachineFunction &MF = DAG.getMachineFunction();
LanaiMachineFunctionInfo *LanaiMFI = MF.getInfo<LanaiMachineFunctionInfo>();
unsigned Reg = LanaiMFI->getSRetReturnReg();
diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp
index 7cfcb96..f5b2bda 100644
--- a/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -746,7 +746,7 @@
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
}
- if (MF.getFunction()->hasStructRetAttr()) {
+ if (MF.getFunction().hasStructRetAttr()) {
MSP430MachineFunctionInfo *FuncInfo = MF.getInfo<MSP430MachineFunctionInfo>();
unsigned Reg = FuncInfo->getSRetReturnReg();
diff --git a/lib/Target/MSP430/MSP430RegisterInfo.cpp b/lib/Target/MSP430/MSP430RegisterInfo.cpp
index 7a3b7a8..54e53e1 100644
--- a/lib/Target/MSP430/MSP430RegisterInfo.cpp
+++ b/lib/Target/MSP430/MSP430RegisterInfo.cpp
@@ -38,7 +38,7 @@
const MCPhysReg*
MSP430RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const MSP430FrameLowering *TFI = getFrameLowering(*MF);
- const Function* F = MF->getFunction();
+ const Function* F = &MF->getFunction();
static const MCPhysReg CalleeSavedRegs[] = {
MSP430::FP, MSP430::R5, MSP430::R6, MSP430::R7,
MSP430::R8, MSP430::R9, MSP430::R10,
diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp
index fbf7b5e..f9de78d 100644
--- a/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -381,7 +381,7 @@
MCInstLowering.Initialize(&MF->getContext());
- bool IsNakedFunction = MF->getFunction()->hasFnAttribute(Attribute::Naked);
+ bool IsNakedFunction = MF->getFunction().hasFnAttribute(Attribute::Naked);
if (!IsNakedFunction)
emitFrameDirective();
diff --git a/lib/Target/Mips/MipsCCState.cpp b/lib/Target/Mips/MipsCCState.cpp
index 6a03ee9..81a1cce 100644
--- a/lib/Target/Mips/MipsCCState.cpp
+++ b/lib/Target/Mips/MipsCCState.cpp
@@ -101,9 +101,9 @@
const MachineFunction &MF = getMachineFunction();
for (unsigned i = 0; i < Outs.size(); ++i) {
OriginalArgWasF128.push_back(
- originalTypeIsF128(MF.getFunction()->getReturnType(), nullptr));
+ originalTypeIsF128(MF.getFunction().getReturnType(), nullptr));
OriginalArgWasFloat.push_back(
- MF.getFunction()->getReturnType()->isFloatingPointTy());
+ MF.getFunction().getReturnType()->isFloatingPointTy());
}
}
@@ -149,7 +149,7 @@
const SmallVectorImpl<ISD::InputArg> &Ins) {
const MachineFunction &MF = getMachineFunction();
for (unsigned i = 0; i < Ins.size(); ++i) {
- Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
+ Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
// SRet arguments cannot originate from f128 or {f128} returns so we just
// push false. We have to handle this specially since SRet arguments
@@ -161,7 +161,7 @@
continue;
}
- assert(Ins[i].getOrigArgIndex() < MF.getFunction()->arg_size());
+ assert(Ins[i].getOrigArgIndex() < MF.getFunction().arg_size());
std::advance(FuncArg, Ins[i].getOrigArgIndex());
OriginalArgWasF128.push_back(
diff --git a/lib/Target/Mips/MipsConstantIslandPass.cpp b/lib/Target/Mips/MipsConstantIslandPass.cpp
index 4dad98b..a9abc17 100644
--- a/lib/Target/Mips/MipsConstantIslandPass.cpp
+++ b/lib/Target/Mips/MipsConstantIslandPass.cpp
@@ -1661,7 +1661,7 @@
int64_t V = Literal.getImm();
DEBUG(dbgs() << "literal " << V << "\n");
Type *Int32Ty =
- Type::getInt32Ty(MF->getFunction()->getContext());
+ Type::getInt32Ty(MF->getFunction().getContext());
const Constant *C = ConstantInt::get(Int32Ty, V);
unsigned index = MCP->getConstantPoolIndex(C, 4);
I->getOperand(2).ChangeToImmediate(index);
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index 4acae99..6448fd9 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -3359,10 +3359,10 @@
MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1);
- const Function *Func = DAG.getMachineFunction().getFunction();
- Function::const_arg_iterator FuncArg = Func->arg_begin();
+ const Function &Func = DAG.getMachineFunction().getFunction();
+ Function::const_arg_iterator FuncArg = Func.arg_begin();
- if (Func->hasFnAttribute("interrupt") && !Func->arg_empty())
+ if (Func.hasFnAttribute("interrupt") && !Func.arg_empty())
report_fatal_error(
"Functions with the interrupt attribute cannot have arguments!");
@@ -3600,7 +3600,7 @@
// the sret argument into $v0 for the return. We saved the argument into
// a virtual register in the entry block, so now we copy the value out
// and into $v0.
- if (MF.getFunction()->hasStructRetAttr()) {
+ if (MF.getFunction().hasStructRetAttr()) {
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
unsigned Reg = MipsFI->getSRetReturnReg();
@@ -3622,7 +3622,7 @@
RetOps.push_back(Flag);
// ISRs must use "eret".
- if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt"))
+ if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt"))
return LowerInterruptReturn(RetOps, DL, DAG);
// Standard return on Mips is a "jr $ra"
diff --git a/lib/Target/Mips/MipsRegisterInfo.cpp b/lib/Target/Mips/MipsRegisterInfo.cpp
index d0dc043..0e0d822 100644
--- a/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -93,8 +93,8 @@
const MCPhysReg *
MipsRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const MipsSubtarget &Subtarget = MF->getSubtarget<MipsSubtarget>();
- const Function *F = MF->getFunction();
- if (F->hasFnAttribute("interrupt")) {
+ const Function &F = MF->getFunction();
+ if (F.hasFnAttribute("interrupt")) {
if (Subtarget.hasMips64())
return Subtarget.hasMips64r6() ? CSR_Interrupt_64R6_SaveList
: CSR_Interrupt_64_SaveList;
@@ -238,7 +238,7 @@
Reserved.set(Mips::RA_64);
Reserved.set(Mips::T0);
Reserved.set(Mips::T1);
- if (MF.getFunction()->hasFnAttribute("saveS2") || MipsFI->hasSaveS2())
+ if (MF.getFunction().hasFnAttribute("saveS2") || MipsFI->hasSaveS2())
Reserved.set(Mips::S2);
}
diff --git a/lib/Target/Mips/MipsSEFrameLowering.cpp b/lib/Target/Mips/MipsSEFrameLowering.cpp
index 5d4fbff..eb1eea7 100644
--- a/lib/Target/Mips/MipsSEFrameLowering.cpp
+++ b/lib/Target/Mips/MipsSEFrameLowering.cpp
@@ -434,7 +434,7 @@
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex);
- if (MF.getFunction()->hasFnAttribute("interrupt"))
+ if (MF.getFunction().hasFnAttribute("interrupt"))
emitInterruptPrologueStub(MF, MBB);
const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
@@ -582,7 +582,7 @@
// Perform ISR handling like GCC
StringRef IntKind =
- MF.getFunction()->getFnAttribute("interrupt").getValueAsString();
+ MF.getFunction().getFnAttribute("interrupt").getValueAsString();
const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass;
// EIC interrupt handling needs to read the Cause register to disable
@@ -726,7 +726,7 @@
}
}
- if (MF.getFunction()->hasFnAttribute("interrupt"))
+ if (MF.getFunction().hasFnAttribute("interrupt"))
emitInterruptEpilogueStub(MF, MBB);
// Get the number of bytes from FrameInfo
@@ -809,8 +809,8 @@
// spilled to the stack frame.
bool IsLOHI = (Reg == Mips::LO0 || Reg == Mips::LO0_64 ||
Reg == Mips::HI0 || Reg == Mips::HI0_64);
- const Function *Func = MBB.getParent()->getFunction();
- if (IsLOHI && Func->hasFnAttribute("interrupt")) {
+ const Function &Func = MBB.getParent()->getFunction();
+ if (IsLOHI && Func.hasFnAttribute("interrupt")) {
DebugLoc DL = MI->getDebugLoc();
unsigned Op = 0;
diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
index 3c6a7d7..893cae9 100644
--- a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
+++ b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
@@ -161,7 +161,7 @@
// lui $v0, %hi(%neg(%gp_rel(fname)))
// daddu $v1, $v0, $t9
// daddiu $globalbasereg, $v1, %lo(%neg(%gp_rel(fname)))
- const GlobalValue *FName = MF.getFunction();
+ const GlobalValue *FName = &MF.getFunction();
BuildMI(MBB, I, DL, TII.get(Mips::LUi64), V0)
.addGlobalAddress(FName, 0, MipsII::MO_GPOFF_HI);
BuildMI(MBB, I, DL, TII.get(Mips::DADDu), V1).addReg(V0)
@@ -190,7 +190,7 @@
// lui $v0, %hi(%neg(%gp_rel(fname)))
// addu $v1, $v0, $t9
// addiu $globalbasereg, $v1, %lo(%neg(%gp_rel(fname)))
- const GlobalValue *FName = MF.getFunction();
+ const GlobalValue *FName = &MF.getFunction();
BuildMI(MBB, I, DL, TII.get(Mips::LUi), V0)
.addGlobalAddress(FName, 0, MipsII::MO_GPOFF_HI);
BuildMI(MBB, I, DL, TII.get(Mips::ADDu), V1).addReg(V0).addReg(Mips::T9);
@@ -1247,7 +1247,7 @@
// handled by the ldi case.
if (ResNonZero) {
IntegerType *Int32Ty =
- IntegerType::get(MF->getFunction()->getContext(), 32);
+ IntegerType::get(MF->getFunction().getContext(), 32);
const ConstantInt *Const32 = ConstantInt::get(Int32Ty, 32);
SDValue Ops[4] = {HiResNonZero ? SDValue(HiRes, 0) : Zero64Val,
CurDAG->getConstant(*Const32, DL, MVT::i32),
diff --git a/lib/Target/Mips/MipsSEInstrInfo.cpp b/lib/Target/Mips/MipsSEInstrInfo.cpp
index 798d866..59b7679 100644
--- a/lib/Target/Mips/MipsSEInstrInfo.cpp
+++ b/lib/Target/Mips/MipsSEInstrInfo.cpp
@@ -231,8 +231,8 @@
// Hi, Lo are normally caller save but they are callee save
// for interrupt handling.
- const Function *Func = MBB.getParent()->getFunction();
- if (Func->hasFnAttribute("interrupt")) {
+ const Function &Func = MBB.getParent()->getFunction();
+ if (Func.hasFnAttribute("interrupt")) {
if (Mips::HI32RegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(Mips::MFHI), Mips::K0);
SrcReg = Mips::K0;
@@ -262,8 +262,8 @@
MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad);
unsigned Opc = 0;
- const Function *Func = MBB.getParent()->getFunction();
- bool ReqIndirectLoad = Func->hasFnAttribute("interrupt") &&
+ const Function &Func = MBB.getParent()->getFunction();
+ bool ReqIndirectLoad = Func.hasFnAttribute("interrupt") &&
(DestReg == Mips::LO0 || DestReg == Mips::LO0_64 ||
DestReg == Mips::HI0 || DestReg == Mips::HI0_64);
diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp
index 9a12b98..85193bf 100644
--- a/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/lib/Target/Mips/MipsTargetMachine.cpp
@@ -200,7 +200,7 @@
void MipsTargetMachine::resetSubtarget(MachineFunction *MF) {
DEBUG(dbgs() << "resetSubtarget\n");
- Subtarget = const_cast<MipsSubtarget *>(getSubtargetImpl(*MF->getFunction()));
+ Subtarget = const_cast<MipsSubtarget *>(getSubtargetImpl(MF->getFunction()));
MF->setSubtarget(Subtarget);
}
diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index d0b47f6..2aa3956 100644
--- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -457,8 +457,8 @@
void NVPTXAsmPrinter::printReturnValStr(const MachineFunction &MF,
raw_ostream &O) {
- const Function *F = MF.getFunction();
- printReturnValStr(F, O);
+ const Function &F = MF.getFunction();
+ printReturnValStr(&F, O);
}
// Return true if MBB is the header of a loop marked with
@@ -502,13 +502,13 @@
raw_svector_ostream O(Str);
if (!GlobalsEmitted) {
- emitGlobals(*MF->getFunction()->getParent());
+ emitGlobals(*MF->getFunction().getParent());
GlobalsEmitted = true;
}
// Set up
MRI = &MF->getRegInfo();
- F = MF->getFunction();
+ F = &MF->getFunction();
emitLinkageDirective(F, O);
if (isKernelFunction(*F))
O << ".entry ";
@@ -536,7 +536,7 @@
SmallString<128> Str;
raw_svector_ostream O(Str);
- emitDemotedVars(MF->getFunction(), O);
+ emitDemotedVars(&MF->getFunction(), O);
OutStreamer->EmitRawText(O.str());
}
@@ -1708,8 +1708,8 @@
void NVPTXAsmPrinter::emitFunctionParamList(const MachineFunction &MF,
raw_ostream &O) {
- const Function *F = MF.getFunction();
- emitFunctionParamList(F, O);
+ const Function &F = MF.getFunction();
+ emitFunctionParamList(&F, O);
}
void NVPTXAsmPrinter::setAndEmitFunctionVirtualRegisters(
@@ -2156,7 +2156,7 @@
raw_string_ostream OS(S);
OS << "Unsupported expression in static initializer: ";
CE->printAsOperand(OS, /*PrintType=*/false,
- !MF ? nullptr : MF->getFunction()->getParent());
+ !MF ? nullptr : MF->getFunction().getParent());
report_fatal_error(OS.str());
}
@@ -2170,7 +2170,7 @@
raw_string_ostream OS(S);
OS << "Unsupported expression in static initializer: ";
CE->printAsOperand(OS, /*PrintType=*/ false,
- !MF ? nullptr : MF->getFunction()->getParent());
+ !MF ? nullptr : MF->getFunction().getParent());
report_fatal_error(OS.str());
}
diff --git a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 714260d..57e2acc 100644
--- a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -1003,7 +1003,7 @@
return true;
// Load wasn't explicitly invariant. Attempt to infer invariance.
- if (!isKernelFunction(*F->getFunction()))
+ if (!isKernelFunction(F->getFunction()))
return false;
// We use GetUnderlyingObjects() here instead of
diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 2138189..f1e4251 100644
--- a/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -123,10 +123,10 @@
// If nvptx-f32ftz is used on the command-line, always honor it
return FtzEnabled;
} else {
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
// Otherwise, check for an nvptx-f32ftz attribute on the function
- if (F->hasFnAttribute("nvptx-f32ftz"))
- return F->getFnAttribute("nvptx-f32ftz").getValueAsString() == "true";
+ if (F.hasFnAttribute("nvptx-f32ftz"))
+ return F.getFnAttribute("nvptx-f32ftz").getValueAsString() == "true";
else
return false;
}
@@ -2329,7 +2329,7 @@
const DataLayout &DL = DAG.getDataLayout();
auto PtrVT = getPointerTy(DAG.getDataLayout());
- const Function *F = MF.getFunction();
+ const Function *F = &MF.getFunction();
const AttributeList &PAL = F->getAttributes();
const TargetLowering *TLI = STI.getTargetLowering();
@@ -2525,7 +2525,7 @@
const SmallVectorImpl<SDValue> &OutVals,
const SDLoc &dl, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
- Type *RetTy = MF.getFunction()->getReturnType();
+ Type *RetTy = MF.getFunction().getReturnType();
bool isABI = (STI.getSmVersion() >= 20);
assert(isABI && "Non-ABI compilation is not supported");
@@ -4022,9 +4022,9 @@
return true;
// Allow unsafe math if unsafe-fp-math attribute explicitly says so.
- const Function *F = MF.getFunction();
- if (F->hasFnAttribute("unsafe-fp-math")) {
- Attribute Attr = F->getFnAttribute("unsafe-fp-math");
+ const Function &F = MF.getFunction();
+ if (F.hasFnAttribute("unsafe-fp-math")) {
+ Attribute Attr = F.getFnAttribute("unsafe-fp-math");
StringRef Val = Attr.getValueAsString();
if (Val == "true")
return true;
diff --git a/lib/Target/NVPTX/NVPTXPeephole.cpp b/lib/Target/NVPTX/NVPTXPeephole.cpp
index 415889d..02c32c6 100644
--- a/lib/Target/NVPTX/NVPTXPeephole.cpp
+++ b/lib/Target/NVPTX/NVPTXPeephole.cpp
@@ -125,7 +125,7 @@
}
bool NVPTXPeephole::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
bool Changed = false;
diff --git a/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp b/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
index 2022cac..82befe4 100644
--- a/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
+++ b/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
@@ -158,7 +158,7 @@
unsigned Param = atoi(Sym.data()+ParamBaseName.size());
std::string NewSym;
raw_string_ostream NewSymStr(NewSym);
- NewSymStr << MF.getFunction()->getName() << "_param_" << Param;
+ NewSymStr << MF.getName() << "_param_" << Param;
InstrsToRemove.insert(&TexHandleDef);
Idx = MFI->getImageHandleSymbolIndex(NewSymStr.str().c_str());
diff --git a/lib/Target/PowerPC/PPCAsmPrinter.cpp b/lib/Target/PowerPC/PPCAsmPrinter.cpp
index 545f0aa..1745190 100644
--- a/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -507,7 +507,7 @@
MCInst TmpInst;
bool isPPC64 = Subtarget->isPPC64();
bool isDarwin = TM.getTargetTriple().isOSDarwin();
- const Module *M = MF->getFunction()->getParent();
+ const Module *M = MF->getFunction().getParent();
PICLevel::Level PL = M->getPICLevel();
// Lower multi-instruction pseudo operations.
@@ -1228,7 +1228,7 @@
// linux/ppc32 - Normal entry label.
if (!Subtarget->isPPC64() &&
(!isPositionIndependent() ||
- MF->getFunction()->getParent()->getPICLevel() == PICLevel::SmallPIC))
+ MF->getFunction().getParent()->getPICLevel() == PICLevel::SmallPIC))
return AsmPrinter::EmitFunctionEntryLabel();
if (!Subtarget->isPPC64()) {
diff --git a/lib/Target/PowerPC/PPCBranchCoalescing.cpp b/lib/Target/PowerPC/PPCBranchCoalescing.cpp
index 48b94a5..32d801b 100644
--- a/lib/Target/PowerPC/PPCBranchCoalescing.cpp
+++ b/lib/Target/PowerPC/PPCBranchCoalescing.cpp
@@ -714,7 +714,7 @@
bool PPCBranchCoalescing::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()) || MF.empty())
+ if (skipFunction(MF.getFunction()) || MF.empty())
return false;
bool didSomething = false;
diff --git a/lib/Target/PowerPC/PPCEarlyReturn.cpp b/lib/Target/PowerPC/PPCEarlyReturn.cpp
index 811e4dd..1699463 100644
--- a/lib/Target/PowerPC/PPCEarlyReturn.cpp
+++ b/lib/Target/PowerPC/PPCEarlyReturn.cpp
@@ -173,7 +173,7 @@
public:
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TII = MF.getSubtarget().getInstrInfo();
diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp
index 0a01fdf..c870a22 100644
--- a/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -434,7 +434,7 @@
const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned LR = RegInfo->getRARegister();
- bool DisableRedZone = MF.getFunction()->hasFnAttribute(Attribute::NoRedZone);
+ bool DisableRedZone = MF.getFunction().hasFnAttribute(Attribute::NoRedZone);
bool CanUseRedZone = !MFI.hasVarSizedObjects() && // No dynamic alloca.
!MFI.adjustsStack() && // No calls.
!MustSaveLR(MF, LR) && // No need to save LR.
@@ -499,7 +499,7 @@
// Naked functions have no stack frame pushed, so we don't have a frame
// pointer.
- if (MF.getFunction()->hasFnAttribute(Attribute::Naked))
+ if (MF.getFunction().hasFnAttribute(Attribute::Naked))
return false;
return MF.getTarget().Options.DisableFramePointerElim(MF) ||
@@ -692,7 +692,7 @@
const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
DebugLoc dl;
bool needsCFI = MMI.hasDebugInfo() ||
- MF.getFunction()->needsUnwindTableEntry();
+ MF.getFunction().needsUnwindTableEntry();
// Get processor type.
bool isPPC64 = Subtarget.isPPC64();
@@ -1505,7 +1505,7 @@
unsigned RetOpcode = MBBI->getOpcode();
if (MF.getTarget().Options.GuaranteedTailCallOpt &&
(RetOpcode == PPC::BLR || RetOpcode == PPC::BLR8) &&
- MF.getFunction()->getCallingConv() == CallingConv::Fast) {
+ MF.getFunction().getCallingConv() == CallingConv::Fast) {
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
unsigned CallerAllocatedAmt = FI->getMinReservedArea();
diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 5e7a933..d3a223f 100644
--- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -391,7 +391,7 @@
// Insert the set of GlobalBaseReg into the first MBB of the function
MachineBasicBlock &FirstMBB = MF->front();
MachineBasicBlock::iterator MBBI = FirstMBB.begin();
- const Module *M = MF->getFunction()->getParent();
+ const Module *M = MF->getFunction().getParent();
DebugLoc dl;
if (PPCLowering->getPointerTy(CurDAG->getDataLayout()) == MVT::i32) {
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index ac864ba..18e567f 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -2573,7 +2573,7 @@
const GlobalValue *GV = GA->getGlobal();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
bool is64bit = Subtarget.isPPC64();
- const Module *M = DAG.getMachineFunction().getFunction()->getParent();
+ const Module *M = DAG.getMachineFunction().getFunction().getParent();
PICLevel::Level picLevel = M->getPICLevel();
TLSModel::Model Model = getTargetMachine().getTLSModel(GV);
@@ -3542,7 +3542,7 @@
unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
unsigned &QFPR_idx = FPR_idx;
SmallVector<SDValue, 8> MemOps;
- Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
+ Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
unsigned CurArgIdx = 0;
for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
SDValue ArgVal;
@@ -3986,7 +3986,7 @@
SmallVector<SDValue, 8> MemOps;
unsigned nAltivecParamsAtEnd = 0;
- Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
+ Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
unsigned CurArgIdx = 0;
for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
SDValue ArgVal;
@@ -4422,9 +4422,9 @@
// Variadic argument functions are not supported.
if (isVarArg) return false;
- auto *Caller = DAG.getMachineFunction().getFunction();
+ auto &Caller = DAG.getMachineFunction().getFunction();
// Check that the calling conventions are compatible for tco.
- if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(), CalleeCC))
+ if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
return false;
// Caller contains any byval parameter is not supported.
@@ -4446,7 +4446,7 @@
// If the caller and callee potentially have different TOC bases then we
// cannot tail call since we need to restore the TOC pointer after the call.
// ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
- if (!callsShareTOCBase(Caller, Callee, getTargetMachine()))
+ if (!callsShareTOCBase(&Caller, Callee, getTargetMachine()))
return false;
// TCO allows altering callee ABI, so we don't have to check further.
@@ -4458,7 +4458,7 @@
// If callee use the same argument list that caller is using, then we can
// apply SCO on this case. If it is not, then we need to check if callee needs
// stack for passing arguments.
- if (!hasSameArgumentList(Caller, CS) &&
+ if (!hasSameArgumentList(&Caller, CS) &&
needStackSlotPassParameters(Subtarget, Outs)) {
return false;
}
@@ -4483,7 +4483,7 @@
return false;
MachineFunction &MF = DAG.getMachineFunction();
- CallingConv::ID CallerCC = MF.getFunction()->getCallingConv();
+ CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
// Functions containing by val parameters are not supported.
for (unsigned i = 0; i != Ins.size(); i++) {
@@ -4735,7 +4735,7 @@
// we're building with the leopard linker or later, which automatically
// synthesizes these stubs.
const TargetMachine &TM = DAG.getTarget();
- const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
+ const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
const GlobalValue *GV = nullptr;
if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee))
GV = G->getGlobal();
@@ -5028,7 +5028,7 @@
// any other variadic arguments).
Ops.insert(std::next(Ops.begin()), AddTOC);
} else if (CallOpc == PPCISD::CALL &&
- !callsShareTOCBase(MF.getFunction(), Callee, DAG.getTarget())) {
+ !callsShareTOCBase(&MF.getFunction(), Callee, DAG.getTarget())) {
// Otherwise insert NOP for non-local calls.
CallOpc = PPCISD::CALL_NOP;
}
@@ -9797,7 +9797,7 @@
// Naked functions never have a base pointer, and so we use r1. For all
// other functions, this decision must be delayed until during PEI.
unsigned BaseReg;
- if (MF->getFunction()->hasFnAttribute(Attribute::Naked))
+ if (MF->getFunction().hasFnAttribute(Attribute::Naked))
BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
else
BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
@@ -13251,7 +13251,7 @@
// Naked functions never have a frame pointer, and so we use r1. For all
// other functions, this decision must be delayed until during PEI.
unsigned FrameReg;
- if (MF.getFunction()->hasFnAttribute(Attribute::Naked))
+ if (MF.getFunction().hasFnAttribute(Attribute::Naked))
FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
else
FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
@@ -13495,12 +13495,12 @@
bool MemcpyStrSrc,
MachineFunction &MF) const {
if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
// When expanding a memset, require at least two QPX instructions to cover
// the cost of loading the value to be stored from the constant pool.
if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) &&
(!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) &&
- !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
+ !F.hasFnAttribute(Attribute::NoImplicitFloat)) {
return MVT::v4f64;
}
@@ -13719,7 +13719,7 @@
// fine for CXX_FAST_TLS since the C++-style TLS access functions should be
// nounwind. If we want to generalize this later, we may need to emit
// CFI pseudo-instructions.
- assert(Entry->getParent()->getFunction()->hasFnAttribute(
+ assert(Entry->getParent()->getFunction().hasFnAttribute(
Attribute::NoUnwind) &&
"Function should be nounwind in insertCopiesSplitCSR!");
Entry->addLiveIn(*I);
diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h
index cd843e3..b119e5b 100644
--- a/lib/Target/PowerPC/PPCISelLowering.h
+++ b/lib/Target/PowerPC/PPCISelLowering.h
@@ -586,8 +586,8 @@
bool supportSplitCSR(MachineFunction *MF) const override {
return
- MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
- MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
+ MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
}
void initializeSplitCSR(MachineBasicBlock *Entry) const override;
diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp
index 031e668..ffb5cc8 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -3133,7 +3133,7 @@
const PPCFunctionInfo *FuncInfo = MF->getInfo<PPCFunctionInfo>();
// We check the ZExt/SExt flags for a method parameter.
if (MI.getParent()->getBasicBlock() ==
- &MF->getFunction()->getEntryBlock()) {
+ &MF->getFunction().getEntryBlock()) {
unsigned VReg = MI.getOperand(0).getReg();
if (MF->getRegInfo().isLiveIn(VReg))
return SignExt ? FuncInfo->isLiveInSExt(VReg) :
diff --git a/lib/Target/PowerPC/PPCMIPeephole.cpp b/lib/Target/PowerPC/PPCMIPeephole.cpp
index 64c5e4e..27ded63 100644
--- a/lib/Target/PowerPC/PPCMIPeephole.cpp
+++ b/lib/Target/PowerPC/PPCMIPeephole.cpp
@@ -106,7 +106,7 @@
// Main entry point for this pass.
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
initialize(MF);
return simplifyCode();
diff --git a/lib/Target/PowerPC/PPCPreEmitPeephole.cpp b/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
index df0e9f3..d524c35 100644
--- a/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
+++ b/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
@@ -56,7 +56,7 @@
}
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()) || !RunPreEmitPeephole)
+ if (skipFunction(MF.getFunction()) || !RunPreEmitPeephole)
return false;
bool Changed = false;
const PPCInstrInfo *TII = MF.getSubtarget<PPCSubtarget>().getInstrInfo();
diff --git a/lib/Target/PowerPC/PPCQPXLoadSplat.cpp b/lib/Target/PowerPC/PPCQPXLoadSplat.cpp
index 544c7f2..25b2b54 100644
--- a/lib/Target/PowerPC/PPCQPXLoadSplat.cpp
+++ b/lib/Target/PowerPC/PPCQPXLoadSplat.cpp
@@ -60,7 +60,7 @@
}
bool PPCQPXLoadSplat::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
bool MadeChange = false;
diff --git a/lib/Target/PowerPC/PPCReduceCRLogicals.cpp b/lib/Target/PowerPC/PPCReduceCRLogicals.cpp
index 7ad50a6..5b2d719 100644
--- a/lib/Target/PowerPC/PPCReduceCRLogicals.cpp
+++ b/lib/Target/PowerPC/PPCReduceCRLogicals.cpp
@@ -211,7 +211,7 @@
MachineInstr *lookThroughCRCopy(unsigned Reg, unsigned &Subreg,
MachineInstr *&CpDef);
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
// If the subtarget doesn't use CR bits, there's nothing to do.
diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 78467e8..6b62a82 100644
--- a/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -123,7 +123,7 @@
const MCPhysReg*
PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const PPCSubtarget &Subtarget = MF->getSubtarget<PPCSubtarget>();
- if (MF->getFunction()->getCallingConv() == CallingConv::AnyReg) {
+ if (MF->getFunction().getCallingConv() == CallingConv::AnyReg) {
if (Subtarget.hasVSX())
return CSR_64_AllRegs_VSX_SaveList;
if (Subtarget.hasAltivec())
@@ -161,7 +161,7 @@
return nullptr;
if (!TM.isPPC64())
return nullptr;
- if (MF->getFunction()->getCallingConv() != CallingConv::CXX_FAST_TLS)
+ if (MF->getFunction().getCallingConv() != CallingConv::CXX_FAST_TLS)
return nullptr;
if (!MF->getInfo<PPCFunctionInfo>()->isSplitCSR())
return nullptr;
@@ -901,7 +901,7 @@
// Naked functions have stack size 0, although getStackSize may not reflect
// that because we didn't call all the pieces that compute it for naked
// functions.
- if (!MF.getFunction()->hasFnAttribute(Attribute::Naked)) {
+ if (!MF.getFunction().hasFnAttribute(Attribute::Naked)) {
if (!(hasBasePointer(MF) && FrameIndex < 0))
Offset += MFI.getStackSize();
}
diff --git a/lib/Target/PowerPC/PPCVSXFMAMutate.cpp b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
index 04fa358..f15af79 100644
--- a/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
+++ b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
@@ -343,7 +343,7 @@
public:
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
// If we don't have VSX then go ahead and return without doing
diff --git a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
index 0320eca..8a5fb9f 100644
--- a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
+++ b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
@@ -191,7 +191,7 @@
public:
// Main entry point for this pass.
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
// If we don't have VSX on the subtarget, don't do anything.
diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp
index d011ec8..d9548ff 100644
--- a/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/lib/Target/Sparc/SparcISelLowering.cpp
@@ -264,7 +264,7 @@
unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
// If the function returns a struct, copy the SRetReturnReg to I0
- if (MF.getFunction()->hasStructRetAttr()) {
+ if (MF.getFunction().hasStructRetAttr()) {
SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
unsigned Reg = SFI->getSRetReturnReg();
if (!Reg)
@@ -519,7 +519,7 @@
InVals.push_back(Load);
}
- if (MF.getFunction()->hasStructRetAttr()) {
+ if (MF.getFunction().hasStructRetAttr()) {
// Copy the SRet Argument to SRetReturnReg.
SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
unsigned Reg = SFI->getSRetReturnReg();
@@ -701,8 +701,8 @@
CalleeFn = dyn_cast<Function>(G->getGlobal());
} else if (ExternalSymbolSDNode *E =
dyn_cast<ExternalSymbolSDNode>(Callee)) {
- const Function *Fn = DAG.getMachineFunction().getFunction();
- const Module *M = Fn->getParent();
+ const Function &Fn = DAG.getMachineFunction().getFunction();
+ const Module *M = Fn.getParent();
const char *CalleeName = E->getSymbol();
CalleeFn = M->getFunction(CalleeName);
}
@@ -1057,8 +1057,8 @@
CalleeFn = dyn_cast<Function>(G->getGlobal());
} else if (ExternalSymbolSDNode *E =
dyn_cast<ExternalSymbolSDNode>(Callee)) {
- const Function *Fn = DAG.getMachineFunction().getFunction();
- const Module *M = Fn->getParent();
+ const Function &F = DAG.getMachineFunction().getFunction();
+ const Module *M = F.getParent();
const char *CalleeName = E->getSymbol();
CalleeFn = M->getFunction(CalleeName);
if (!CalleeFn && isFP128ABICall(CalleeName))
diff --git a/lib/Target/SystemZ/SystemZElimCompare.cpp b/lib/Target/SystemZ/SystemZElimCompare.cpp
index ca82740..55f7a7b 100644
--- a/lib/Target/SystemZ/SystemZElimCompare.cpp
+++ b/lib/Target/SystemZ/SystemZElimCompare.cpp
@@ -593,7 +593,7 @@
}
bool SystemZElimCompare::runOnMachineFunction(MachineFunction &F) {
- if (skipFunction(*F.getFunction()))
+ if (skipFunction(F.getFunction()))
return false;
TII = static_cast<const SystemZInstrInfo *>(F.getSubtarget().getInstrInfo());
diff --git a/lib/Target/SystemZ/SystemZFrameLowering.cpp b/lib/Target/SystemZ/SystemZFrameLowering.cpp
index 3183c3a..b600aa6 100644
--- a/lib/Target/SystemZ/SystemZFrameLowering.cpp
+++ b/lib/Target/SystemZ/SystemZFrameLowering.cpp
@@ -71,7 +71,7 @@
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
bool HasFP = hasFP(MF);
SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>();
- bool IsVarArg = MF.getFunction()->isVarArg();
+ bool IsVarArg = MF.getFunction().isVarArg();
// va_start stores incoming FPR varargs in the normal way, but delegates
// the saving of incoming GPR varargs to spillCalleeSavedRegisters().
@@ -139,7 +139,7 @@
MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
- bool IsVarArg = MF.getFunction()->isVarArg();
+ bool IsVarArg = MF.getFunction().isVarArg();
DebugLoc DL;
// Scan the call-saved GPRs and find the bounds of the register spill area.
@@ -374,7 +374,7 @@
uint64_t StackSize = getAllocatedStackSize(MF);
if (StackSize) {
// Determine if we want to store a backchain.
- bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain");
+ bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
// If we need backchain, save current stack pointer. R1 is free at this
// point.
diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp
index c239cd5..adf3683 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -3039,8 +3039,8 @@
lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
MachineFunction &MF = DAG.getMachineFunction();
- bool RealignOpt = !MF.getFunction()-> hasFnAttribute("no-realign-stack");
- bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain");
+ bool RealignOpt = !MF.getFunction().hasFnAttribute("no-realign-stack");
+ bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
@@ -3572,7 +3572,7 @@
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
- bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain");
+ bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
SDValue Chain = Op.getOperand(0);
SDValue NewSP = Op.getOperand(1);
diff --git a/lib/Target/SystemZ/SystemZLDCleanup.cpp b/lib/Target/SystemZ/SystemZLDCleanup.cpp
index 0f75943..f532e9e 100644
--- a/lib/Target/SystemZ/SystemZLDCleanup.cpp
+++ b/lib/Target/SystemZ/SystemZLDCleanup.cpp
@@ -64,7 +64,7 @@
}
bool SystemZLDCleanup::runOnMachineFunction(MachineFunction &F) {
- if (skipFunction(*F.getFunction()))
+ if (skipFunction(F.getFunction()))
return false;
TII = static_cast<const SystemZInstrInfo *>(F.getSubtarget().getInstrInfo());
diff --git a/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/lib/Target/SystemZ/SystemZRegisterInfo.cpp
index 173f5b4..856505e 100644
--- a/lib/Target/SystemZ/SystemZRegisterInfo.cpp
+++ b/lib/Target/SystemZ/SystemZRegisterInfo.cpp
@@ -109,7 +109,7 @@
const MCPhysReg *
SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
if (MF->getSubtarget().getTargetLowering()->supportSwiftError() &&
- MF->getFunction()->getAttributes().hasAttrSomewhere(
+ MF->getFunction().getAttributes().hasAttrSomewhere(
Attribute::SwiftError))
return CSR_SystemZ_SwiftError_SaveList;
return CSR_SystemZ_SaveList;
@@ -119,7 +119,7 @@
SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID CC) const {
if (MF.getSubtarget().getTargetLowering()->supportSwiftError() &&
- MF.getFunction()->getAttributes().hasAttrSomewhere(
+ MF.getFunction().getAttributes().hasAttrSomewhere(
Attribute::SwiftError))
return CSR_SystemZ_SwiftError_RegMask;
return CSR_SystemZ_RegMask;
diff --git a/lib/Target/SystemZ/SystemZShortenInst.cpp b/lib/Target/SystemZ/SystemZShortenInst.cpp
index d9c8fab..195fa20 100644
--- a/lib/Target/SystemZ/SystemZShortenInst.cpp
+++ b/lib/Target/SystemZ/SystemZShortenInst.cpp
@@ -309,7 +309,7 @@
}
bool SystemZShortenInst::runOnMachineFunction(MachineFunction &F) {
- if (skipFunction(*F.getFunction()))
+ if (skipFunction(F.getFunction()))
return false;
const SystemZSubtarget &ST = F.getSubtarget<SystemZSubtarget>();
diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp
index 4881928..71526dd 100644
--- a/lib/Target/X86/X86AsmPrinter.cpp
+++ b/lib/Target/X86/X86AsmPrinter.cpp
@@ -63,7 +63,7 @@
SetupMachineFunction(MF);
if (Subtarget->isTargetCOFF()) {
- bool Local = MF.getFunction()->hasLocalLinkage();
+ bool Local = MF.getFunction().hasLocalLinkage();
OutStreamer->BeginCOFFSymbolDef(CurrentFnSym);
OutStreamer->EmitCOFFSymbolStorageClass(
Local ? COFF::IMAGE_SYM_CLASS_STATIC : COFF::IMAGE_SYM_CLASS_EXTERNAL);
diff --git a/lib/Target/X86/X86CallFrameOptimization.cpp b/lib/Target/X86/X86CallFrameOptimization.cpp
index b420279..522dc79 100644
--- a/lib/Target/X86/X86CallFrameOptimization.cpp
+++ b/lib/Target/X86/X86CallFrameOptimization.cpp
@@ -148,7 +148,7 @@
// is a danger of that being generated.
if (STI->isTargetDarwin() &&
(!MF.getLandingPads().empty() ||
- (MF.getFunction()->needsUnwindTableEntry() && !TFL->hasFP(MF))))
+ (MF.getFunction().needsUnwindTableEntry() && !TFL->hasFP(MF))))
return false;
// It is not valid to change the stack pointer outside the prolog/epilog
@@ -243,7 +243,7 @@
assert(isPowerOf2_32(SlotSize) && "Expect power of 2 stack slot size");
Log2SlotSize = Log2_32(SlotSize);
- if (skipFunction(*MF.getFunction()) || !isLegal(MF))
+ if (skipFunction(MF.getFunction()) || !isLegal(MF))
return false;
unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
diff --git a/lib/Target/X86/X86CallLowering.cpp b/lib/Target/X86/X86CallLowering.cpp
index 3e1f340..ccb982f 100644
--- a/lib/Target/X86/X86CallLowering.cpp
+++ b/lib/Target/X86/X86CallLowering.cpp
@@ -177,7 +177,7 @@
MachineFunction &MF = MIRBuilder.getMF();
MachineRegisterInfo &MRI = MF.getRegInfo();
auto &DL = MF.getDataLayout();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
ArgInfo OrigArg{VReg, Val->getType()};
setArgFlags(OrigArg, AttributeList::ReturnIndex, DL, F);
@@ -334,7 +334,7 @@
const ArgInfo &OrigRet,
ArrayRef<ArgInfo> OrigArgs) const {
MachineFunction &MF = MIRBuilder.getMF();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
MachineRegisterInfo &MRI = MF.getRegInfo();
auto &DL = F.getParent()->getDataLayout();
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
diff --git a/lib/Target/X86/X86CmovConversion.cpp b/lib/Target/X86/X86CmovConversion.cpp
index a4bb989..489d9d8 100644
--- a/lib/Target/X86/X86CmovConversion.cpp
+++ b/lib/Target/X86/X86CmovConversion.cpp
@@ -164,7 +164,7 @@
}
bool X86CmovConverterPass::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
if (!EnableCmovConverter)
return false;
diff --git a/lib/Target/X86/X86DomainReassignment.cpp b/lib/Target/X86/X86DomainReassignment.cpp
index f32fb9c..f9e1ac3 100644
--- a/lib/Target/X86/X86DomainReassignment.cpp
+++ b/lib/Target/X86/X86DomainReassignment.cpp
@@ -678,7 +678,7 @@
}
bool X86DomainReassignment::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
if (DisableX86DomainReassignment)
return false;
diff --git a/lib/Target/X86/X86ExpandPseudo.cpp b/lib/Target/X86/X86ExpandPseudo.cpp
index 5dfd95f..ab2ef26 100644
--- a/lib/Target/X86/X86ExpandPseudo.cpp
+++ b/lib/Target/X86/X86ExpandPseudo.cpp
@@ -222,7 +222,7 @@
case X86::EH_RESTORE: {
// Restore ESP and EBP, and optionally ESI if required.
bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(
- MBB.getParent()->getFunction()->getPersonalityFn()));
+ MBB.getParent()->getFunction().getPersonalityFn()));
X86FL->restoreWin32EHStackPointers(MBB, MBBI, DL, /*RestoreSP=*/IsSEH);
MBBI->eraseFromParent();
return true;
diff --git a/lib/Target/X86/X86FixupBWInsts.cpp b/lib/Target/X86/X86FixupBWInsts.cpp
index 2f7dd58..01d10fe 100644
--- a/lib/Target/X86/X86FixupBWInsts.cpp
+++ b/lib/Target/X86/X86FixupBWInsts.cpp
@@ -146,12 +146,12 @@
FunctionPass *llvm::createX86FixupBWInsts() { return new FixupBWInstPass(); }
bool FixupBWInstPass::runOnMachineFunction(MachineFunction &MF) {
- if (!FixupBWInsts || skipFunction(*MF.getFunction()))
+ if (!FixupBWInsts || skipFunction(MF.getFunction()))
return false;
this->MF = &MF;
TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
- OptForSize = MF.getFunction()->optForSize();
+ OptForSize = MF.getFunction().optForSize();
MLI = &getAnalysis<MachineLoopInfo>();
LiveRegs.init(TII->getRegisterInfo());
diff --git a/lib/Target/X86/X86FixupLEAs.cpp b/lib/Target/X86/X86FixupLEAs.cpp
index d27974f..b41bf99 100644
--- a/lib/Target/X86/X86FixupLEAs.cpp
+++ b/lib/Target/X86/X86FixupLEAs.cpp
@@ -191,12 +191,12 @@
FunctionPass *llvm::createX86FixupLEAs() { return new FixupLEAPass(); }
bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) {
- if (skipFunction(*Func.getFunction()))
+ if (skipFunction(Func.getFunction()))
return false;
MF = &Func;
const X86Subtarget &ST = Func.getSubtarget<X86Subtarget>();
- OptIncDec = !ST.slowIncDec() || Func.getFunction()->optForMinSize();
+ OptIncDec = !ST.slowIncDec() || Func.getFunction().optForMinSize();
OptLEA = ST.LEAusesAG() || ST.slowLEA() || ST.slow3OpsLEA();
if (!OptLEA && !OptIncDec)
diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp
index b73a088..9a72e71 100644
--- a/lib/Target/X86/X86FloatingPoint.cpp
+++ b/lib/Target/X86/X86FloatingPoint.cpp
@@ -349,7 +349,7 @@
// In regcall convention, some FP registers may not be passed through
// the stack, so they will need to be assigned to the stack first
- if ((Entry->getParent()->getFunction()->getCallingConv() ==
+ if ((Entry->getParent()->getFunction().getCallingConv() ==
CallingConv::X86_RegCall) && (Bundle.Mask && !Bundle.FixCount)) {
// In the register calling convention, up to one FP argument could be
// saved in the first FP register.
@@ -973,7 +973,7 @@
unsigned R = MO.getReg() - X86::FP0;
if (R < 8) {
- if (MF->getFunction()->getCallingConv() != CallingConv::X86_RegCall) {
+ if (MF->getFunction().getCallingConv() != CallingConv::X86_RegCall) {
assert(MO.isDef() && MO.isImplicit());
}
diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp
index ead877a..80b1cc1 100644
--- a/lib/Target/X86/X86FrameLowering.cpp
+++ b/lib/Target/X86/X86FrameLowering.cpp
@@ -148,8 +148,7 @@
const X86RegisterInfo *TRI,
bool Is64Bit) {
const MachineFunction *MF = MBB.getParent();
- const Function *F = MF->getFunction();
- if (!F || MF->callsEHReturn())
+ if (MF->callsEHReturn())
return 0;
const TargetRegisterClass &AvailableRegs = *TRI->getGPRsForTailCall(*MF);
@@ -820,7 +819,7 @@
const MachineFrameInfo &MFI = MF.getFrameInfo();
uint64_t MaxAlign = MFI.getMaxAlignment(); // Desired stack alignment.
unsigned StackAlign = getStackAlignment();
- if (MF.getFunction()->hasFnAttribute("stackrealign")) {
+ if (MF.getFunction().hasFnAttribute("stackrealign")) {
if (MFI.hasCalls())
MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
else if (MaxAlign < SlotSize)
@@ -935,28 +934,28 @@
"MF used frame lowering for wrong subtarget");
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo &MFI = MF.getFrameInfo();
- const Function *Fn = MF.getFunction();
+ const Function &Fn = MF.getFunction();
MachineModuleInfo &MMI = MF.getMMI();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate.
bool IsFunclet = MBB.isEHFuncletEntry();
EHPersonality Personality = EHPersonality::Unknown;
- if (Fn->hasPersonalityFn())
- Personality = classifyEHPersonality(Fn->getPersonalityFn());
+ if (Fn.hasPersonalityFn())
+ Personality = classifyEHPersonality(Fn.getPersonalityFn());
bool FnHasClrFunclet =
MF.hasEHFunclets() && Personality == EHPersonality::CoreCLR;
bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
bool HasFP = hasFP(MF);
- bool IsWin64CC = STI.isCallingConvWin64(Fn->getCallingConv());
+ bool IsWin64CC = STI.isCallingConvWin64(Fn.getCallingConv());
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
- bool NeedsWin64CFI = IsWin64Prologue && Fn->needsUnwindTableEntry();
+ bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry();
// FIXME: Emit FPO data for EH funclets.
bool NeedsWinFPO =
!IsFunclet && STI.isTargetWin32() && MMI.getModule()->getCodeViewFlag();
bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO;
bool NeedsDwarfCFI =
- !IsWin64Prologue && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
+ !IsWin64Prologue && (MMI.hasDebugInfo() || Fn.needsUnwindTableEntry());
unsigned FramePtr = TRI->getFrameRegister(MF);
const unsigned MachineFramePtr =
STI.isTarget64BitILP32()
@@ -982,16 +981,16 @@
// The default stack probe size is 4096 if the function has no stackprobesize
// attribute.
unsigned StackProbeSize = 4096;
- if (Fn->hasFnAttribute("stack-probe-size"))
- Fn->getFnAttribute("stack-probe-size")
+ if (Fn.hasFnAttribute("stack-probe-size"))
+ Fn.getFnAttribute("stack-probe-size")
.getValueAsString()
.getAsInteger(0, StackProbeSize);
// Re-align the stack on 64-bit if the x86-interrupt calling convention is
// used and an error code was pushed, since the x86-64 ABI requires a 16-byte
// stack alignment.
- if (Fn->getCallingConv() == CallingConv::X86_INTR && Is64Bit &&
- Fn->arg_size() == 2) {
+ if (Fn.getCallingConv() == CallingConv::X86_INTR && Is64Bit &&
+ Fn.arg_size() == 2) {
StackSize += 8;
MFI.setStackSize(StackSize);
emitSPUpdate(MBB, MBBI, -8, /*InEpilogue=*/false);
@@ -1002,7 +1001,7 @@
// pointer, calls, or dynamic alloca then we do not need to adjust the
// stack pointer (we fit in the Red Zone). We also check that we don't
// push and pop from the stack.
- if (Is64Bit && !Fn->hasFnAttribute(Attribute::NoRedZone) &&
+ if (Is64Bit && !Fn.hasFnAttribute(Attribute::NoRedZone) &&
!TRI->needsStackRealignment(MF) &&
!MFI.hasVarSizedObjects() && // No dynamic alloca.
!MFI.adjustsStack() && // No calls.
@@ -1447,7 +1446,7 @@
// 1. The interrupt handling function uses any of the "rep" instructions.
// 2. Interrupt handling function calls another function.
//
- if (Fn->getCallingConv() == CallingConv::X86_INTR)
+ if (Fn.getCallingConv() == CallingConv::X86_INTR)
BuildMI(MBB, MBBI, DL, TII.get(X86::CLD))
.setMIFlag(MachineInstr::FrameSetup);
@@ -1508,7 +1507,7 @@
// This is the amount of stack a funclet needs to allocate.
unsigned UsedSize;
EHPersonality Personality =
- classifyEHPersonality(MF.getFunction()->getPersonalityFn());
+ classifyEHPersonality(MF.getFunction().getPersonalityFn());
if (Personality == EHPersonality::CoreCLR) {
// CLR funclets need to hold enough space to include the PSPSym, at the
// same offset from the stack pointer (immediately after the prolog) as it
@@ -1551,7 +1550,7 @@
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
bool NeedsWin64CFI =
- IsWin64Prologue && MF.getFunction()->needsUnwindTableEntry();
+ IsWin64Prologue && MF.getFunction().needsUnwindTableEntry();
bool IsFunclet = MBBI == MBB.end() ? false : isFuncletReturnInstr(*MBBI);
// Get the number of bytes to allocate from the FrameInfo.
@@ -1981,7 +1980,7 @@
MachineInstr *CatchRet) const {
// SEH shouldn't use catchret.
assert(!isAsynchronousEHPersonality(classifyEHPersonality(
- MBB.getParent()->getFunction()->getPersonalityFn())) &&
+ MBB.getParent()->getFunction().getPersonalityFn())) &&
"SEH should not use CATCHRET");
DebugLoc DL = CatchRet->getDebugLoc();
MachineBasicBlock *CatchRetTarget = CatchRet->getOperand(0).getMBB();
@@ -2021,9 +2020,9 @@
// Don't restore CSRs before an SEH catchret. SEH except blocks do not form
// funclets. emitEpilogue transforms these to normal jumps.
if (MI->getOpcode() == X86::CATCHRET) {
- const Function *Func = MBB.getParent()->getFunction();
+ const Function &F = MBB.getParent()->getFunction();
bool IsSEH = isAsynchronousEHPersonality(
- classifyEHPersonality(Func->getPersonalityFn()));
+ classifyEHPersonality(F.getPersonalityFn()));
if (IsSEH)
return true;
}
@@ -2095,8 +2094,8 @@
static bool
HasNestArgument(const MachineFunction *MF) {
- const Function *F = MF->getFunction();
- for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
+ const Function &F = MF->getFunction();
+ for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
I != E; I++) {
if (I->hasNestAttr())
return true;
@@ -2110,7 +2109,7 @@
/// needed. Set primary to true for the first register, false for the second.
static unsigned
GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {
- CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
+ CallingConv::ID CallingConvention = MF.getFunction().getCallingConv();
// Erlang stuff.
if (CallingConvention == CallingConv::HiPE) {
@@ -2160,7 +2159,7 @@
assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
"Scratch register is live-in");
- if (MF.getFunction()->isVarArg())
+ if (MF.getFunction().isVarArg())
report_fatal_error("Segmented stacks do not support vararg functions.");
if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
!STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
@@ -2434,8 +2433,8 @@
Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS");
const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
const unsigned Guaranteed = HipeLeafWords * SlotSize;
- unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
- MF.getFunction()->arg_size() - CCRegisteredArgs : 0;
+ unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs ?
+ MF.getFunction().arg_size() - CCRegisteredArgs : 0;
unsigned MaxStack = MFI.getStackSize() + CallerStkArity*SlotSize + SlotSize;
assert(STI.isTargetLinux() &&
@@ -2649,10 +2648,10 @@
Amount = alignTo(Amount, StackAlign);
MachineModuleInfo &MMI = MF.getMMI();
- const Function *Fn = MF.getFunction();
+ const Function &F = MF.getFunction();
bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
- bool DwarfCFI = !WindowsCFI &&
- (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
+ bool DwarfCFI = !WindowsCFI &&
+ (MMI.hasDebugInfo() || F.needsUnwindTableEntry());
// If we have any exception handlers in this function, and we adjust
// the SP before calls, we may need to indicate this to the unwinder
@@ -2694,7 +2693,7 @@
StackAdjustment += mergeSPUpdates(MBB, InsertPos, false);
if (StackAdjustment) {
- if (!(Fn->optForMinSize() &&
+ if (!(F.optForMinSize() &&
adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment)))
BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment,
/*InEpilogue=*/false);
@@ -2767,13 +2766,13 @@
bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
// If we may need to emit frameless compact unwind information, give
// up as this is currently broken: PR25614.
- return (MF.getFunction()->hasFnAttribute(Attribute::NoUnwind) || hasFP(MF)) &&
+ return (MF.getFunction().hasFnAttribute(Attribute::NoUnwind) || hasFP(MF)) &&
// The lowering of segmented stack and HiPE only support entry blocks
// as prologue blocks: PR26107.
// This limitation may be lifted if we fix:
// - adjustForSegmentedStacks
// - adjustForHiPEPrologue
- MF.getFunction()->getCallingConv() != CallingConv::HiPE &&
+ MF.getFunction().getCallingConv() != CallingConv::HiPE &&
!MF.shouldSplitStack();
}
@@ -3003,9 +3002,9 @@
// If this function isn't doing Win64-style C++ EH, we don't need to do
// anything.
- const Function *Fn = MF.getFunction();
+ const Function &F = MF.getFunction();
if (!STI.is64Bit() || !MF.hasEHFunclets() ||
- classifyEHPersonality(Fn->getPersonalityFn()) != EHPersonality::MSVC_CXX)
+ classifyEHPersonality(F.getPersonalityFn()) != EHPersonality::MSVC_CXX)
return;
// Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index 8df8098..a6c7c5f 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -619,8 +619,8 @@
void X86DAGToDAGISel::PreprocessISelDAG() {
// OptFor[Min]Size are used in pattern predicates that isel is matching.
- OptForSize = MF->getFunction()->optForSize();
- OptForMinSize = MF->getFunction()->optForMinSize();
+ OptForSize = MF->getFunction().optForSize();
+ OptForMinSize = MF->getFunction().optForMinSize();
assert((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize");
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
@@ -753,9 +753,9 @@
void X86DAGToDAGISel::EmitFunctionEntryCode() {
// If this is main, emit special code for main.
- if (const Function *Fn = MF->getFunction())
- if (Fn->hasExternalLinkage() && Fn->getName() == "main")
- emitSpecialCodeForMain();
+ const Function &F = MF->getFunction();
+ if (F.hasExternalLinkage() && F.getName() == "main")
+ emitSpecialCodeForMain();
}
static bool isDispSafeForFrameIndex(int64_t Val) {
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 94714bf..43971c3 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -94,7 +94,7 @@
const char *Msg) {
MachineFunction &MF = DAG.getMachineFunction();
DAG.getContext()->diagnose(
- DiagnosticInfoUnsupported(*MF.getFunction(), Msg, dl.getDebugLoc()));
+ DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
}
X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
@@ -1843,8 +1843,8 @@
bool IsMemset, bool ZeroMemset,
bool MemcpyStrSrc,
MachineFunction &MF) const {
- const Function *F = MF.getFunction();
- if (!F->hasFnAttribute(Attribute::NoImplicitFloat)) {
+ const Function &F = MF.getFunction();
+ if (!F.hasFnAttribute(Attribute::NoImplicitFloat)) {
if (Size >= 16 &&
(!Subtarget.isUnalignedMem16Slow() ||
((DstAlign == 0 || DstAlign >= 16) &&
@@ -1940,7 +1940,7 @@
if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
return;
unsigned ParamRegs = 0;
- if (auto *M = MF->getFunction()->getParent())
+ if (auto *M = MF->getFunction().getParent())
ParamRegs = M->getNumberRegisterParameters();
// Mark the first N int arguments as having reg
@@ -2207,7 +2207,7 @@
// For example, when they are used for argument passing.
bool ShouldDisableCalleeSavedRegister =
CallConv == CallingConv::X86_RegCall ||
- MF.getFunction()->hasFnAttribute("no_caller_saved_registers");
+ MF.getFunction().hasFnAttribute("no_caller_saved_registers");
if (CallConv == CallingConv::X86_INTR && !Outs.empty())
report_fatal_error("X86 interrupts may not return any value");
@@ -2889,8 +2889,8 @@
return None;
}
- const Function *Fn = MF.getFunction();
- bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
+ const Function &F = MF.getFunction();
+ bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
bool isSoftFloat = Subtarget.useSoftFloat();
assert(!(isSoftFloat && NoImplicitFloatOps) &&
"SSE register cannot be used when SSE is disabled!");
@@ -2923,10 +2923,9 @@
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
- const Function *Fn = MF.getFunction();
- if (Fn->hasExternalLinkage() &&
- Subtarget.isTargetCygMing() &&
- Fn->getName() == "main")
+ const Function &F = MF.getFunction();
+ if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
+ F.getName() == "main")
FuncInfo->setForceFramePointer(true);
MachineFrameInfo &MFI = MF.getFrameInfo();
@@ -3101,7 +3100,7 @@
// Figure out if XMM registers are in use.
assert(!(Subtarget.useSoftFloat() &&
- Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
+ F.hasFnAttribute(Attribute::NoImplicitFloat)) &&
"SSE register cannot be used when SSE is disabled!");
// 64-bit calling conventions support varargs and register parameters, so we
@@ -3258,7 +3257,7 @@
FuncInfo->setArgumentStackSize(StackSize);
if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
- EHPersonality Personality = classifyEHPersonality(Fn->getPersonalityFn());
+ EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
if (Personality == EHPersonality::CoreCLR) {
assert(Is64Bit);
// TODO: Add a mechanism to frame lowering that will allow us to indicate
@@ -3275,7 +3274,7 @@
}
if (CallConv == CallingConv::X86_RegCall ||
- Fn->hasFnAttribute("no_caller_saved_registers")) {
+ F.hasFnAttribute("no_caller_saved_registers")) {
MachineRegisterInfo &MRI = MF.getRegInfo();
for (std::pair<unsigned, unsigned> Pair : MRI.liveins())
MRI.disableCalleeSavedRegister(Pair.first);
@@ -3366,7 +3365,7 @@
StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
bool IsSibcall = false;
X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
- auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
+ auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
const auto *CI = dyn_cast_or_null<CallInst>(CLI.CS.getInstruction());
const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
@@ -3401,7 +3400,7 @@
// Check if it's really possible to do a tail call.
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
isVarArg, SR != NotStructReturn,
- MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
+ MF.getFunction().hasStructRetAttr(), CLI.RetTy,
Outs, OutVals, Ins, DAG);
// Sibcalls are automatically detected tailcalls which do not require
@@ -3747,7 +3746,7 @@
}
}
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
- const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
+ const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
unsigned char OpFlags =
Subtarget.classifyGlobalFunctionReference(nullptr, *Mod);
@@ -3796,10 +3795,10 @@
// FIXME: Model this more precisely so that we can register allocate across
// the normal edge and spill and fill across the exceptional edge.
if (!Is64Bit && CLI.CS && CLI.CS.isInvoke()) {
- const Function *CallerFn = MF.getFunction();
+ const Function &CallerFn = MF.getFunction();
EHPersonality Pers =
- CallerFn->hasPersonalityFn()
- ? classifyEHPersonality(CallerFn->getPersonalityFn())
+ CallerFn.hasPersonalityFn()
+ ? classifyEHPersonality(CallerFn.getPersonalityFn())
: EHPersonality::Unknown;
if (isFuncletEHPersonality(Pers))
Mask = RegInfo->getNoPreservedMask();
@@ -4047,15 +4046,15 @@
// If -tailcallopt is specified, make fastcc functions tail-callable.
MachineFunction &MF = DAG.getMachineFunction();
- const Function *CallerF = MF.getFunction();
+ const Function &CallerF = MF.getFunction();
// If the function return type is x86_fp80 and the callee return type is not,
// then the FP_EXTEND of the call result is not a nop. It's not safe to
// perform a tailcall optimization here.
- if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
+ if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
return false;
- CallingConv::ID CallerCC = CallerF->getCallingConv();
+ CallingConv::ID CallerCC = CallerF.getCallingConv();
bool CCMatch = CallerCC == CalleeCC;
bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
@@ -4639,7 +4638,7 @@
const SelectionDAG &DAG) const {
// Do not merge to float value size (128 bytes) if no implicit
// float attribute is set.
- bool NoFloat = DAG.getMachineFunction().getFunction()->hasFnAttribute(
+ bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
Attribute::NoImplicitFloat);
if (NoFloat) {
@@ -6927,7 +6926,7 @@
// TODO: If multiple splats are generated to load the same constant,
// it may be detrimental to overall size. There needs to be a way to detect
// that condition to know if this is truly a size win.
- bool OptForSize = DAG.getMachineFunction().getFunction()->optForSize();
+ bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
// Handle broadcasting a single constant scalar from the constant pool
// into a vector.
@@ -14903,7 +14902,7 @@
// Bits [3:0] of the constant are the zero mask. The DAG Combiner may
// combine either bitwise AND or insert of float 0.0 to set these bits.
- bool MinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
+ bool MinSize = DAG.getMachineFunction().getFunction().optForMinSize();
if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
// If this is an insertion of 32-bits into the low 32-bits of
// a vector, we prefer to generate a blend with immediate rather
@@ -15044,7 +15043,7 @@
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
- const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
+ const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
unsigned char OpFlag = Subtarget.classifyGlobalReference(nullptr, *Mod);
auto PtrVT = getPointerTy(DAG.getDataLayout());
@@ -16968,7 +16967,7 @@
// An add of one will be selected as an INC.
if (C->isOne() &&
(!Subtarget.slowIncDec() ||
- DAG.getMachineFunction().getFunction()->optForSize())) {
+ DAG.getMachineFunction().getFunction().optForSize())) {
Opcode = X86ISD::INC;
NumOperands = 1;
break;
@@ -16977,7 +16976,7 @@
// An add of negative one (subtract of one) will be selected as a DEC.
if (C->isAllOnesValue() &&
(!Subtarget.slowIncDec() ||
- DAG.getMachineFunction().getFunction()->optForSize())) {
+ DAG.getMachineFunction().getFunction().optForSize())) {
Opcode = X86ISD::DEC;
NumOperands = 1;
break;
@@ -17172,7 +17171,7 @@
// with an immediate. 16 bit immediates are to be avoided.
if ((Op0.getValueType() == MVT::i16 &&
(isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1))) &&
- !DAG.getMachineFunction().getFunction()->optForMinSize() &&
+ !DAG.getMachineFunction().getFunction().optForMinSize() &&
!Subtarget.isAtom()) {
unsigned ExtendOp =
isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
@@ -19242,8 +19241,8 @@
if (Is64Bit) {
// The 64 bit implementation of segmented stacks needs to clobber both r10
// r11. This makes it impossible to use it along with nested parameters.
- const Function *F = MF.getFunction();
- for (const auto &A : F->args()) {
+ const Function &F = MF.getFunction();
+ for (const auto &A : F.args()) {
if (A.hasNestAttr())
report_fatal_error("Cannot use segmented stacks with functions that "
"have nested arguments.");
@@ -19290,7 +19289,7 @@
SDLoc DL(Op);
if (!Subtarget.is64Bit() ||
- Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv())) {
+ Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
@@ -19344,7 +19343,7 @@
assert(Op.getNumOperands() == 4);
MachineFunction &MF = DAG.getMachineFunction();
- if (Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv()))
+ if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
// The Win64 ABI uses char* instead of a structure.
return DAG.expandVAArg(Op.getNode());
@@ -19375,7 +19374,7 @@
if (ArgMode == 2) {
// Sanity Check: Make sure using fp_offset makes sense.
assert(!Subtarget.useSoftFloat() &&
- !(MF.getFunction()->hasFnAttribute(Attribute::NoImplicitFloat)) &&
+ !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
Subtarget.hasSSE1());
}
@@ -19403,7 +19402,7 @@
// where a va_list is still an i8*.
assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
if (Subtarget.isCallingConvWin64(
- DAG.getMachineFunction().getFunction()->getCallingConv()))
+ DAG.getMachineFunction().getFunction().getCallingConv()))
// Probably a Win64 va_copy.
return DAG.expandVACopy(Op.getNode());
@@ -23939,7 +23938,7 @@
if (auto *C = dyn_cast<ConstantSDNode>(N->getOperand(2))) {
// Convert to inc/dec if they aren't slow or we are optimizing for size.
if (AllowIncDec && (!Subtarget.slowIncDec() ||
- DAG.getMachineFunction().getFunction()->optForSize())) {
+ DAG.getMachineFunction().getFunction().optForSize())) {
if ((NewOpc == X86ISD::LADD && C->isOne()) ||
(NewOpc == X86ISD::LSUB && C->isAllOnesValue()))
return DAG.getMemIntrinsicNode(X86ISD::LINC, SDLoc(N),
@@ -26085,7 +26084,7 @@
int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
- if (!Subtarget.isCallingConvWin64(F->getFunction()->getCallingConv())) {
+ if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) {
// If %al is 0, branch around the XMM save block.
BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
@@ -26728,7 +26727,7 @@
DebugLoc DL = MI.getDebugLoc();
assert(!isAsynchronousEHPersonality(
- classifyEHPersonality(MF->getFunction()->getPersonalityFn())) &&
+ classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
"SEH does not use catchret!");
// Only 32-bit EH needs to worry about manually restoring stack pointers.
@@ -26755,7 +26754,7 @@
X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
- const Constant *PerFn = MF->getFunction()->getPersonalityFn();
+ const Constant *PerFn = MF->getFunction().getPersonalityFn();
bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
// Only 32-bit SEH requires special handling for catchpad.
if (IsSEH && Subtarget.is32Bit()) {
@@ -32161,7 +32160,7 @@
// pmulld is supported since SSE41. It is better to use pmulld
// instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
// the expansion.
- bool OptForMinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
+ bool OptForMinSize = DAG.getMachineFunction().getFunction().optForMinSize();
if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
return SDValue();
@@ -32354,7 +32353,7 @@
if (!MulConstantOptimization)
return SDValue();
// An imul is usually smaller than the alternative sequence.
- if (DAG.getMachineFunction().getFunction()->optForMinSize())
+ if (DAG.getMachineFunction().getFunction().optForMinSize())
return SDValue();
if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
@@ -33572,7 +33571,7 @@
return SDValue();
// fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
- bool OptForSize = DAG.getMachineFunction().getFunction()->optForSize();
+ bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
// SHLD/SHRD instructions have lower register pressure, but on some
// platforms they have higher latency than the equivalent
@@ -34512,8 +34511,8 @@
if (VT.getSizeInBits() != 64)
return SDValue();
- const Function *F = DAG.getMachineFunction().getFunction();
- bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
+ const Function &F = DAG.getMachineFunction().getFunction();
+ bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
bool F64IsLegal =
!Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
if ((VT.isVector() ||
@@ -35388,7 +35387,7 @@
// This takes at least 3 instructions, so favor a library call when operating
// on a scalar and minimizing code size.
- if (!VT.isVector() && DAG.getMachineFunction().getFunction()->optForMinSize())
+ if (!VT.isVector() && DAG.getMachineFunction().getFunction().optForMinSize())
return SDValue();
SDValue Op0 = N->getOperand(0);
@@ -38403,7 +38402,7 @@
// fine for CXX_FAST_TLS since the C++-style TLS access functions should be
// nounwind. If we want to generalize this later, we may need to emit
// CFI pseudo-instructions.
- assert(Entry->getParent()->getFunction()->hasFnAttribute(
+ assert(Entry->getParent()->getFunction().hasFnAttribute(
Attribute::NoUnwind) &&
"Function should be nounwind in insertCopiesSplitCSR!");
Entry->addLiveIn(*I);
@@ -38426,8 +38425,8 @@
/// string if not applicable.
StringRef X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const {
// If the function specifically requests stack probes, emit them.
- if (MF.getFunction()->hasFnAttribute("probe-stack"))
- return MF.getFunction()->getFnAttribute("probe-stack").getValueAsString();
+ if (MF.getFunction().hasFnAttribute("probe-stack"))
+ return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
// Generally, if we aren't on Windows, the platform ABI does not include
// support for stack probes, so don't emit them.
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 24a6cf4..4db969b 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -1228,8 +1228,8 @@
const SDLoc &dl, SelectionDAG &DAG) const override;
bool supportSplitCSR(MachineFunction *MF) const override {
- return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
- MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
+ return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
}
void initializeSplitCSR(MachineBasicBlock *Entry) const override;
void insertCopiesSplitCSR(
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index a4ddb31..a246359 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -7726,7 +7726,7 @@
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
bool NeedsDwarfCFI =
!IsWin64Prologue &&
- (MF.getMMI().hasDebugInfo() || MF.getFunction()->needsUnwindTableEntry());
+ (MF.getMMI().hasDebugInfo() || MF.getFunction().needsUnwindTableEntry());
bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI;
if (EmitCFI) {
TFL->BuildCFI(MBB, I, DL,
@@ -8409,7 +8409,7 @@
// For CPUs that favor the register form of a call or push,
// do not fold loads into calls or pushes, unless optimizing for size
// aggressively.
- if (isSlowTwoMemOps && !MF.getFunction()->optForMinSize() &&
+ if (isSlowTwoMemOps && !MF.getFunction().optForMinSize() &&
(MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r ||
MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r ||
MI.getOpcode() == X86::PUSH64r))
@@ -8417,7 +8417,7 @@
// Avoid partial register update stalls unless optimizing for size.
// TODO: we should block undef reg update as well.
- if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI.getOpcode()))
+ if (!MF.getFunction().optForSize() && hasPartialRegUpdate(MI.getOpcode()))
return nullptr;
unsigned NumOps = MI.getDesc().getNumOperands();
@@ -8586,7 +8586,7 @@
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
// TODO: we should block undef reg update as well.
- if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI.getOpcode()))
+ if (!MF.getFunction().optForSize() && hasPartialRegUpdate(MI.getOpcode()))
return nullptr;
// Don't fold subreg spills, or reloads that use a high subreg.
@@ -8785,7 +8785,7 @@
// Avoid partial register update stalls unless optimizing for size.
// TODO: we should block undef reg update as well.
- if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI.getOpcode()))
+ if (!MF.getFunction().optForSize() && hasPartialRegUpdate(MI.getOpcode()))
return nullptr;
// Determine the alignment of the load.
@@ -8881,16 +8881,16 @@
Type *Ty;
unsigned Opc = LoadMI.getOpcode();
if (Opc == X86::FsFLD0SS || Opc == X86::AVX512_FsFLD0SS)
- Ty = Type::getFloatTy(MF.getFunction()->getContext());
+ Ty = Type::getFloatTy(MF.getFunction().getContext());
else if (Opc == X86::FsFLD0SD || Opc == X86::AVX512_FsFLD0SD)
- Ty = Type::getDoubleTy(MF.getFunction()->getContext());
+ Ty = Type::getDoubleTy(MF.getFunction().getContext());
else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES)
- Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()),16);
+ Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),16);
else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 ||
Opc == X86::AVX512_256_SET0 || Opc == X86::AVX1_SETALLONES)
- Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 8);
+ Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 8);
else
- Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4);
+ Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 4);
bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES ||
Opc == X86::AVX512_512_SETALLONES ||
@@ -10691,7 +10691,7 @@
LDTLSCleanup() : MachineFunctionPass(ID) {}
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
@@ -10852,16 +10852,16 @@
bool X86InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF,
bool OutlineFromLinkOnceODRs) const {
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
// Does the function use a red zone? If it does, then we can't risk messing
// with the stack.
- if (!F->hasFnAttribute(Attribute::NoRedZone))
+ if (!F.hasFnAttribute(Attribute::NoRedZone))
return false;
// If we *don't* want to outline from things that could potentially be deduped
// then return false.
- if (!OutlineFromLinkOnceODRs && F->hasLinkOnceODRLinkage())
+ if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
return false;
// This function is viable for outlining, so return true.
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index 7bc67c7..42e89cb 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -918,11 +918,11 @@
// the Function object through the <Target>Subtarget and objections were raised
// to that (see post-commit review comments for r301750).
let RecomputePerFunction = 1 in {
- def OptForSize : Predicate<"MF->getFunction()->optForSize()">;
- def OptForMinSize : Predicate<"MF->getFunction()->optForMinSize()">;
- def OptForSpeed : Predicate<"!MF->getFunction()->optForSize()">;
+ def OptForSize : Predicate<"MF->getFunction().optForSize()">;
+ def OptForMinSize : Predicate<"MF->getFunction().optForMinSize()">;
+ def OptForSpeed : Predicate<"!MF->getFunction().optForSize()">;
def UseIncDec : Predicate<"!Subtarget->slowIncDec() || "
- "MF->getFunction()->optForSize()">;
+ "MF->getFunction().optForSize()">;
}
def CallImmAddr : Predicate<"Subtarget->isLegalToCallImmediateAddr()">;
diff --git a/lib/Target/X86/X86OptimizeLEAs.cpp b/lib/Target/X86/X86OptimizeLEAs.cpp
index 0b77014..1fc6f07 100644
--- a/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -672,7 +672,7 @@
bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
- if (DisableX86LEAOpt || skipFunction(*MF.getFunction()))
+ if (DisableX86LEAOpt || skipFunction(MF.getFunction()))
return false;
MRI = &MF.getRegInfo();
@@ -696,7 +696,7 @@
// Remove redundant address calculations. Do it only for -Os/-Oz since only
// a code size gain is expected from this part of the pass.
- if (MF.getFunction()->optForSize())
+ if (MF.getFunction().optForSize())
Changed |= removeRedundantAddrCalc(LEAs);
}
diff --git a/lib/Target/X86/X86PadShortFunction.cpp b/lib/Target/X86/X86PadShortFunction.cpp
index f2ee437..1da0fad 100644
--- a/lib/Target/X86/X86PadShortFunction.cpp
+++ b/lib/Target/X86/X86PadShortFunction.cpp
@@ -96,10 +96,10 @@
/// runOnMachineFunction - Loop over all of the basic blocks, inserting
/// NOOP instructions before early exits.
bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
- if (MF.getFunction()->optForSize()) {
+ if (MF.getFunction().optForSize()) {
return false;
}
diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp
index d690035..bc31e95 100644
--- a/lib/Target/X86/X86RegisterInfo.cpp
+++ b/lib/Target/X86/X86RegisterInfo.cpp
@@ -218,13 +218,13 @@
const TargetRegisterClass *
X86RegisterInfo::getGPRsForTailCall(const MachineFunction &MF) const {
- const Function *F = MF.getFunction();
- if (IsWin64 || (F && F->getCallingConv() == CallingConv::Win64))
+ const Function &F = MF.getFunction();
+ if (IsWin64 || (F.getCallingConv() == CallingConv::Win64))
return &X86::GR64_TCW64RegClass;
else if (Is64Bit)
return &X86::GR64_TCRegClass;
- bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false);
+ bool hasHipeCC = (F.getCallingConv() == CallingConv::HiPE);
if (hasHipeCC)
return &X86::GR32RegClass;
return &X86::GR32_TCRegClass;
@@ -266,17 +266,17 @@
assert(MF && "MachineFunction required");
const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>();
- const Function *F = MF->getFunction();
+ const Function &F = MF->getFunction();
bool HasSSE = Subtarget.hasSSE1();
bool HasAVX = Subtarget.hasAVX();
bool HasAVX512 = Subtarget.hasAVX512();
bool CallsEHReturn = MF->callsEHReturn();
- CallingConv::ID CC = F->getCallingConv();
+ CallingConv::ID CC = F.getCallingConv();
// If attribute NoCallerSavedRegisters exists then we set X86_INTR calling
// convention because it has the CSR list.
- if (MF->getFunction()->hasFnAttribute("no_caller_saved_registers"))
+ if (MF->getFunction().hasFnAttribute("no_caller_saved_registers"))
CC = CallingConv::X86_INTR;
switch (CC) {
@@ -362,7 +362,7 @@
if (Is64Bit) {
bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
- F->getAttributes().hasAttrSomewhere(Attribute::SwiftError);
+ F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
if (IsSwiftCC)
return IsWin64 ? CSR_Win64_SwiftError_SaveList
: CSR_64_SwiftError_SaveList;
@@ -380,7 +380,7 @@
const MCPhysReg *X86RegisterInfo::getCalleeSavedRegsViaCopy(
const MachineFunction *MF) const {
assert(MF && "Invalid MachineFunction pointer.");
- if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR())
return CSR_64_CXX_TLS_Darwin_ViaCopy_SaveList;
return nullptr;
@@ -473,9 +473,9 @@
// Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
// callsEHReturn().
if (Is64Bit) {
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
- F->getAttributes().hasAttrSomewhere(Attribute::SwiftError);
+ F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
if (IsSwiftCC)
return IsWin64 ? CSR_Win64_SwiftError_RegMask : CSR_64_SwiftError_RegMask;
return IsWin64 ? CSR_Win64_RegMask : CSR_64_RegMask;
@@ -519,7 +519,7 @@
// Set the base-pointer register and its aliases as reserved if needed.
if (hasBasePointer(MF)) {
- CallingConv::ID CC = MF.getFunction()->getCallingConv();
+ CallingConv::ID CC = MF.getFunction().getCallingConv();
const uint32_t *RegMask = getCallPreservedMask(MF, CC);
if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
report_fatal_error(
diff --git a/lib/Target/X86/X86SelectionDAGInfo.cpp b/lib/Target/X86/X86SelectionDAGInfo.cpp
index d006556..1e04997 100644
--- a/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -247,7 +247,7 @@
Repeats.AVT = Subtarget.is64Bit() ? MVT::i64 : MVT::i32;
if (Repeats.BytesLeft() > 0 &&
- DAG.getMachineFunction().getFunction()->optForMinSize()) {
+ DAG.getMachineFunction().getFunction().optForMinSize()) {
// When agressively optimizing for size, avoid generating the code to
// handle BytesLeft.
Repeats.AVT = MVT::i8;
diff --git a/lib/Target/X86/X86VZeroUpper.cpp b/lib/Target/X86/X86VZeroUpper.cpp
index 0b67e81..2242628 100644
--- a/lib/Target/X86/X86VZeroUpper.cpp
+++ b/lib/Target/X86/X86VZeroUpper.cpp
@@ -285,7 +285,7 @@
TII = ST.getInstrInfo();
MachineRegisterInfo &MRI = MF.getRegInfo();
EverMadeChange = false;
- IsX86INTR = MF.getFunction()->getCallingConv() == CallingConv::X86_INTR;
+ IsX86INTR = MF.getFunction().getCallingConv() == CallingConv::X86_INTR;
bool FnHasLiveInYmmOrZmm = checkFnHasLiveInYmmOrZmm(MRI);
diff --git a/lib/Target/X86/X86WinAllocaExpander.cpp b/lib/Target/X86/X86WinAllocaExpander.cpp
index 8a186e9..1046696 100644
--- a/lib/Target/X86/X86WinAllocaExpander.cpp
+++ b/lib/Target/X86/X86WinAllocaExpander.cpp
@@ -279,9 +279,9 @@
SlotSize = TRI->getSlotSize();
StackProbeSize = 4096;
- if (MF.getFunction()->hasFnAttribute("stack-probe-size")) {
+ if (MF.getFunction().hasFnAttribute("stack-probe-size")) {
MF.getFunction()
- ->getFnAttribute("stack-probe-size")
+ .getFnAttribute("stack-probe-size")
.getValueAsString()
.getAsInteger(0, StackProbeSize);
}
diff --git a/lib/Target/XCore/XCoreFrameLowering.cpp b/lib/Target/XCore/XCoreFrameLowering.cpp
index 3d8712d..62b2c8e 100644
--- a/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -238,7 +238,7 @@
report_fatal_error("emitPrologue unsupported alignment: "
+ Twine(MFI.getMaxAlignment()));
- const AttributeList &PAL = MF.getFunction()->getAttributes();
+ const AttributeList &PAL = MF.getFunction().getAttributes();
if (PAL.hasAttrSomewhere(Attribute::Nest))
BuildMI(MBB, MBBI, dl, TII.get(XCore::LDWSP_ru6), XCore::R11).addImm(0);
// FIX: Needs addMemOperand() but can't use getFixedStack() or getStack().
@@ -324,7 +324,7 @@
if (XFI->hasEHSpillSlot()) {
// The unwinder requires stack slot & CFI offsets for the exception info.
// We do not save/spill these registers.
- const Function *Fn = MF.getFunction();
+ const Function *Fn = &MF.getFunction();
const Constant *PersonalityFn =
Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr;
SmallVector<StackSlotInfo, 2> SpillList;
@@ -359,7 +359,7 @@
if (RetOpcode == XCore::EH_RETURN) {
// 'Restore' the exception info the unwinder has placed into the stack
// slots.
- const Function *Fn = MF.getFunction();
+ const Function *Fn = &MF.getFunction();
const Constant *PersonalityFn =
Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr;
SmallVector<StackSlotInfo, 2> SpillList;
@@ -542,7 +542,7 @@
const MachineRegisterInfo &MRI = MF.getRegInfo();
bool LRUsed = MRI.isPhysRegModified(XCore::LR);
- if (!LRUsed && !MF.getFunction()->isVarArg() &&
+ if (!LRUsed && !MF.getFunction().isVarArg() &&
MF.getFrameInfo().estimateStackSize(MF))
// If we need to extend the stack it is more efficient to use entsp / retsp.
// We force the LR to be saved so these instructions are used.
diff --git a/lib/Target/XCore/XCoreInstrInfo.cpp b/lib/Target/XCore/XCoreInstrInfo.cpp
index 7a9c6fc..c885332 100644
--- a/lib/Target/XCore/XCoreInstrInfo.cpp
+++ b/lib/Target/XCore/XCoreInstrInfo.cpp
@@ -443,7 +443,7 @@
}
MachineConstantPool *ConstantPool = MBB.getParent()->getConstantPool();
const Constant *C = ConstantInt::get(
- Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Value);
+ Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), Value);
unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
return BuildMI(MBB, MI, dl, get(XCore::LDWCP_lru6), Reg)
.addConstantPoolIndex(Idx)
diff --git a/lib/Target/XCore/XCoreMachineFunctionInfo.cpp b/lib/Target/XCore/XCoreMachineFunctionInfo.cpp
index 35089fa..b7b0daa 100644
--- a/lib/Target/XCore/XCoreMachineFunctionInfo.cpp
+++ b/lib/Target/XCore/XCoreMachineFunctionInfo.cpp
@@ -39,7 +39,7 @@
const TargetRegisterClass &RC = XCore::GRRegsRegClass;
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
MachineFrameInfo &MFI = MF.getFrameInfo();
- if (! MF.getFunction()->isVarArg()) {
+ if (! MF.getFunction().isVarArg()) {
// A fixed offset of 0 allows us to save / restore LR using entsp / retsp.
LRSpillSlot = MFI.CreateFixedObject(TRI.getSpillSize(RC), 0, true);
} else {
diff --git a/lib/Target/XCore/XCoreRegisterInfo.cpp b/lib/Target/XCore/XCoreRegisterInfo.cpp
index a6cf683..70376d4 100644
--- a/lib/Target/XCore/XCoreRegisterInfo.cpp
+++ b/lib/Target/XCore/XCoreRegisterInfo.cpp
@@ -204,8 +204,7 @@
}
bool XCoreRegisterInfo::needsFrameMoves(const MachineFunction &MF) {
- return MF.getMMI().hasDebugInfo() ||
- MF.getFunction()->needsUnwindTableEntry();
+ return MF.getMMI().hasDebugInfo() || MF.getFunction().needsUnwindTableEntry();
}
const MCPhysReg *