blob: 04163aeaddc52f382b65d02a0b2e6a66a5c58e63 [file] [log] [blame]
//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This provides C++ code generation targeting the Itanium C++ ABI. The class
// in this file generates structures that follow the Itanium C++ ABI, which is
// documented at:
// https://itanium-cxx-abi.github.io/cxx-abi/abi.html
// https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
//
// It also supports the closely-related ARM ABI, documented at:
// https://developer.arm.com/documentation/ihi0041/g/
//
//===----------------------------------------------------------------------===//
#include "CGCXXABI.h"
#include "CGCleanup.h"
#include "CGRecordLayout.h"
#include "CGVTables.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "TargetInfo.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/Type.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/ScopedPrinter.h"
using namespace clang;
using namespace CodeGen;
namespace {
class ItaniumCXXABI : public CodeGen::CGCXXABI {
/// VTables - All the vtables which have been defined.
llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
/// All the thread wrapper functions that have been used.
llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
ThreadWrappers;
protected:
bool UseARMMethodPtrABI;
bool UseARMGuardVarABI;
bool Use32BitVTableOffsetABI;
ItaniumMangleContext &getMangleContext() {
return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
}
public:
ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
bool UseARMMethodPtrABI = false,
bool UseARMGuardVarABI = false) :
CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
UseARMGuardVarABI(UseARMGuardVarABI),
Use32BitVTableOffsetABI(false) { }
bool classifyReturnType(CGFunctionInfo &FI) const override;
RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
// If C++ prohibits us from making a copy, pass by address.
if (!RD->canPassInRegisters())
return RAA_Indirect;
return RAA_Default;
}
bool isThisCompleteObject(GlobalDecl GD) const override {
// The Itanium ABI has separate complete-object vs. base-object
// variants of both constructors and destructors.
if (isa<CXXDestructorDecl>(GD.getDecl())) {
switch (GD.getDtorType()) {
case Dtor_Complete:
case Dtor_Deleting:
return true;
case Dtor_Base:
return false;
case Dtor_Comdat:
llvm_unreachable("emitting dtor comdat as function?");
}
llvm_unreachable("bad dtor kind");
}
if (isa<CXXConstructorDecl>(GD.getDecl())) {
switch (GD.getCtorType()) {
case Ctor_Complete:
return true;
case Ctor_Base:
return false;
case Ctor_CopyingClosure:
case Ctor_DefaultClosure:
llvm_unreachable("closure ctors in Itanium ABI?");
case Ctor_Comdat:
llvm_unreachable("emitting ctor comdat as function?");
}
llvm_unreachable("bad dtor kind");
}
// No other kinds.
return false;
}
bool isZeroInitializable(const MemberPointerType *MPT) override;
llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
CGCallee
EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
const Expr *E,
Address This,
llvm::Value *&ThisPtrForCall,
llvm::Value *MemFnPtr,
const MemberPointerType *MPT) override;
llvm::Value *
EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
Address Base,
llvm::Value *MemPtr,
const MemberPointerType *MPT) override;
llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
const CastExpr *E,
llvm::Value *Src) override;
llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
llvm::Constant *Src) override;
llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
CharUnits offset) override;
llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
CharUnits ThisAdjustment);
llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
llvm::Value *L, llvm::Value *R,
const MemberPointerType *MPT,
bool Inequality) override;
llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
llvm::Value *Addr,
const MemberPointerType *MPT) override;
void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
Address Ptr, QualType ElementType,
const CXXDestructorDecl *Dtor) override;
void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
llvm::CallInst *
emitTerminateForUnexpectedException(CodeGenFunction &CGF,
llvm::Value *Exn) override;
void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
CatchTypeInfo
getAddrOfCXXCatchHandlerType(QualType Ty,
QualType CatchHandlerType) override {
return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
}
bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
void EmitBadTypeidCall(CodeGenFunction &CGF) override;
llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
Address ThisPtr,
llvm::Type *StdTypeInfoPtrTy) override;
bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
QualType SrcRecordTy) override;
llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy, QualType DestTy,
QualType DestRecordTy,
llvm::BasicBlock *CastEnd) override;
llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy,
QualType DestTy) override;
bool EmitBadCastCall(CodeGenFunction &CGF) override;
llvm::Value *
GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) override;
void EmitCXXConstructors(const CXXConstructorDecl *D) override;
AddedStructorArgCounts
buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) override;
bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
CXXDtorType DT) const override {
// Itanium does not emit any destructor variant as an inline thunk.
// Delegating may occur as an optimization, but all variants are either
// emitted with external linkage or as linkonce if they are inline and used.
return false;
}
void EmitCXXDestructors(const CXXDestructorDecl *D) override;
void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
FunctionArgList &Params) override;
void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
const CXXConstructorDecl *D,
CXXCtorType Type,
bool ForVirtualBase,
bool Delegating) override;
llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
const CXXDestructorDecl *DD,
CXXDtorType Type,
bool ForVirtualBase,
bool Delegating) override;
void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
bool Delegating, Address This,
QualType ThisTy) override;
void emitVTableDefinitions(CodeGenVTables &CGVT,
const CXXRecordDecl *RD) override;
bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
CodeGenFunction::VPtr Vptr) override;
bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
return true;
}
llvm::Constant *
getVTableAddressPoint(BaseSubobject Base,
const CXXRecordDecl *VTableClass) override;
llvm::Value *getVTableAddressPointInStructor(
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
llvm::Value *getVTableAddressPointInStructorWithVTT(
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
BaseSubobject Base, const CXXRecordDecl *NearestVBase);
llvm::Constant *
getVTableAddressPointForConstExpr(BaseSubobject Base,
const CXXRecordDecl *VTableClass) override;
llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
CharUnits VPtrOffset) override;
CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
Address This, llvm::Type *Ty,
SourceLocation Loc) override;
llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *Dtor,
CXXDtorType DtorType, Address This,
DeleteOrMemberCallExpr E) override;
void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
bool ReturnAdjustment) override {
// Allow inlining of thunks by emitting them with available_externally
// linkage together with vtables when needed.
if (ForVTable && !Thunk->hasLocalLinkage())
Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
CGM.setGVProperties(Thunk, GD);
}
bool exportThunk() override { return true; }
llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
const ThisAdjustment &TA) override;
llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
const ReturnAdjustment &RA) override;
size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
FunctionArgList &Args) const override {
assert(!Args.empty() && "expected the arglist to not be empty!");
return Args.size() - 1;
}
StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
StringRef GetDeletedVirtualCallName() override
{ return "__cxa_deleted_virtual"; }
CharUnits getArrayCookieSizeImpl(QualType elementType) override;
Address InitializeArrayCookie(CodeGenFunction &CGF,
Address NewPtr,
llvm::Value *NumElements,
const CXXNewExpr *expr,
QualType ElementType) override;
llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
Address allocPtr,
CharUnits cookieSize) override;
void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::GlobalVariable *DeclPtr,
bool PerformInit) override;
void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
llvm::FunctionCallee dtor,
llvm::Constant *addr) override;
llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
llvm::Value *Val);
void EmitThreadLocalInitFuncs(
CodeGenModule &CGM,
ArrayRef<const VarDecl *> CXXThreadLocals,
ArrayRef<llvm::Function *> CXXThreadLocalInits,
ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
bool mayNeedDestruction(const VarDecl *VD) const {
if (VD->needsDestruction(getContext()))
return true;
// If the variable has an incomplete class type (or array thereof), it
// might need destruction.
const Type *T = VD->getType()->getBaseElementTypeUnsafe();
if (T->getAs<RecordType>() && T->isIncompleteType())
return true;
return false;
}
/// Determine whether we will definitely emit this variable with a constant
/// initializer, either because the language semantics demand it or because
/// we know that the initializer is a constant.
// For weak definitions, any initializer available in the current translation
// is not necessarily reflective of the initializer used; such initializers
// are ignored unless if InspectInitForWeakDef is true.
bool
isEmittedWithConstantInitializer(const VarDecl *VD,
bool InspectInitForWeakDef = false) const {
VD = VD->getMostRecentDecl();
if (VD->hasAttr<ConstInitAttr>())
return true;
// All later checks examine the initializer specified on the variable. If
// the variable is weak, such examination would not be correct.
if (!InspectInitForWeakDef &&
(VD->isWeak() || VD->hasAttr<SelectAnyAttr>()))
return false;
const VarDecl *InitDecl = VD->getInitializingDeclaration();
if (!InitDecl)
return false;
// If there's no initializer to run, this is constant initialization.
if (!InitDecl->hasInit())
return true;
// If we have the only definition, we don't need a thread wrapper if we
// will emit the value as a constant.
if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD)))
return !mayNeedDestruction(VD) && InitDecl->evaluateValue();
// Otherwise, we need a thread wrapper unless we know that every
// translation unit will emit the value as a constant. We rely on the
// variable being constant-initialized in every translation unit if it's
// constant-initialized in any translation unit, which isn't actually
// guaranteed by the standard but is necessary for sanity.
return InitDecl->hasConstantInitialization();
}
bool usesThreadWrapperFunction(const VarDecl *VD) const override {
return !isEmittedWithConstantInitializer(VD) ||
mayNeedDestruction(VD);
}
LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
QualType LValType) override;
bool NeedsVTTParameter(GlobalDecl GD) override;
/**************************** RTTI Uniqueness ******************************/
protected:
/// Returns true if the ABI requires RTTI type_info objects to be unique
/// across a program.
virtual bool shouldRTTIBeUnique() const { return true; }
public:
/// What sort of unique-RTTI behavior should we use?
enum RTTIUniquenessKind {
/// We are guaranteeing, or need to guarantee, that the RTTI string
/// is unique.
RUK_Unique,
/// We are not guaranteeing uniqueness for the RTTI string, so we
/// can demote to hidden visibility but must use string comparisons.
RUK_NonUniqueHidden,
/// We are not guaranteeing uniqueness for the RTTI string, so we
/// have to use string comparisons, but we also have to emit it with
/// non-hidden visibility.
RUK_NonUniqueVisible
};
/// Return the required visibility status for the given type and linkage in
/// the current ABI.
RTTIUniquenessKind
classifyRTTIUniqueness(QualType CanTy,
llvm::GlobalValue::LinkageTypes Linkage) const;
friend class ItaniumRTTIBuilder;
void emitCXXStructor(GlobalDecl GD) override;
std::pair<llvm::Value *, const CXXRecordDecl *>
LoadVTablePtr(CodeGenFunction &CGF, Address This,
const CXXRecordDecl *RD) override;
private:
bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
const auto &VtableLayout =
CGM.getItaniumVTableContext().getVTableLayout(RD);
for (const auto &VtableComponent : VtableLayout.vtable_components()) {
// Skip empty slot.
if (!VtableComponent.isUsedFunctionPointerKind())
continue;
const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
if (!Method->getCanonicalDecl()->isInlined())
continue;
StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
auto *Entry = CGM.GetGlobalValue(Name);
// This checks if virtual inline function has already been emitted.
// Note that it is possible that this inline function would be emitted
// after trying to emit vtable speculatively. Because of this we do
// an extra pass after emitting all deferred vtables to find and emit
// these vtables opportunistically.
if (!Entry || Entry->isDeclaration())
return true;
}
return false;
}
bool isVTableHidden(const CXXRecordDecl *RD) const {
const auto &VtableLayout =
CGM.getItaniumVTableContext().getVTableLayout(RD);
for (const auto &VtableComponent : VtableLayout.vtable_components()) {
if (VtableComponent.isRTTIKind()) {
const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
return true;
} else if (VtableComponent.isUsedFunctionPointerKind()) {
const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
if (Method->getVisibility() == Visibility::HiddenVisibility &&
!Method->isDefined())
return true;
}
}
return false;
}
};
class ARMCXXABI : public ItaniumCXXABI {
public:
ARMCXXABI(CodeGen::CodeGenModule &CGM) :
ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
/*UseARMGuardVarABI=*/true) {}
bool HasThisReturn(GlobalDecl GD) const override {
return (isa<CXXConstructorDecl>(GD.getDecl()) || (
isa<CXXDestructorDecl>(GD.getDecl()) &&
GD.getDtorType() != Dtor_Deleting));
}
void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
QualType ResTy) override;
CharUnits getArrayCookieSizeImpl(QualType elementType) override;
Address InitializeArrayCookie(CodeGenFunction &CGF,
Address NewPtr,
llvm::Value *NumElements,
const CXXNewExpr *expr,
QualType ElementType) override;
llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
CharUnits cookieSize) override;
};
class AppleARM64CXXABI : public ARMCXXABI {
public:
AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
Use32BitVTableOffsetABI = true;
}
// ARM64 libraries are prepared for non-unique RTTI.
bool shouldRTTIBeUnique() const override { return false; }
};
class FuchsiaCXXABI final : public ItaniumCXXABI {
public:
explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
: ItaniumCXXABI(CGM) {}
private:
bool HasThisReturn(GlobalDecl GD) const override {
return isa<CXXConstructorDecl>(GD.getDecl()) ||
(isa<CXXDestructorDecl>(GD.getDecl()) &&
GD.getDtorType() != Dtor_Deleting);
}
};
class WebAssemblyCXXABI final : public ItaniumCXXABI {
public:
explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
: ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
/*UseARMGuardVarABI=*/true) {}
void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
llvm::CallInst *
emitTerminateForUnexpectedException(CodeGenFunction &CGF,
llvm::Value *Exn) override;
private:
bool HasThisReturn(GlobalDecl GD) const override {
return isa<CXXConstructorDecl>(GD.getDecl()) ||
(isa<CXXDestructorDecl>(GD.getDecl()) &&
GD.getDtorType() != Dtor_Deleting);
}
bool canCallMismatchedFunctionType() const override { return false; }
};
class XLCXXABI final : public ItaniumCXXABI {
public:
explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
: ItaniumCXXABI(CGM) {}
void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
llvm::FunctionCallee dtor,
llvm::Constant *addr) override;
bool useSinitAndSterm() const override { return true; }
private:
void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
llvm::Constant *addr);
};
}
CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
switch (CGM.getContext().getCXXABIKind()) {
// For IR-generation purposes, there's no significant difference
// between the ARM and iOS ABIs.
case TargetCXXABI::GenericARM:
case TargetCXXABI::iOS:
case TargetCXXABI::WatchOS:
return new ARMCXXABI(CGM);
case TargetCXXABI::AppleARM64:
return new AppleARM64CXXABI(CGM);
case TargetCXXABI::Fuchsia:
return new FuchsiaCXXABI(CGM);
// Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
// include the other 32-bit ARM oddities: constructor/destructor return values
// and array cookies.
case TargetCXXABI::GenericAArch64:
return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
/*UseARMGuardVarABI=*/true);
case TargetCXXABI::GenericMIPS:
return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
case TargetCXXABI::WebAssembly:
return new WebAssemblyCXXABI(CGM);
case TargetCXXABI::XL:
return new XLCXXABI(CGM);
case TargetCXXABI::GenericItanium:
if (CGM.getContext().getTargetInfo().getTriple().getArch()
== llvm::Triple::le32) {
// For PNaCl, use ARM-style method pointers so that PNaCl code
// does not assume anything about the alignment of function
// pointers.
return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
}
return new ItaniumCXXABI(CGM);
case TargetCXXABI::Microsoft:
llvm_unreachable("Microsoft ABI is not Itanium-based");
}
llvm_unreachable("bad ABI kind");
}
llvm::Type *
ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
if (MPT->isMemberDataPointer())
return CGM.PtrDiffTy;
return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
}
/// In the Itanium and ARM ABIs, method pointers have the form:
/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
///
/// In the Itanium ABI:
/// - method pointers are virtual if (memptr.ptr & 1) is nonzero
/// - the this-adjustment is (memptr.adj)
/// - the virtual offset is (memptr.ptr - 1)
///
/// In the ARM ABI:
/// - method pointers are virtual if (memptr.adj & 1) is nonzero
/// - the this-adjustment is (memptr.adj >> 1)
/// - the virtual offset is (memptr.ptr)
/// ARM uses 'adj' for the virtual flag because Thumb functions
/// may be only single-byte aligned.
///
/// If the member is virtual, the adjusted 'this' pointer points
/// to a vtable pointer from which the virtual offset is applied.
///
/// If the member is non-virtual, memptr.ptr is the address of
/// the function to call.
CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
llvm::Value *&ThisPtrForCall,
llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
CGBuilderTy &Builder = CGF.Builder;
const FunctionProtoType *FPT =
MPT->getPointeeType()->getAs<FunctionProtoType>();
auto *RD =
cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
// Extract memptr.adj, which is in the second field.
llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
// Compute the true adjustment.
llvm::Value *Adj = RawAdj;
if (UseARMMethodPtrABI)
Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
// Apply the adjustment and cast back to the original struct type
// for consistency.
llvm::Value *This = ThisAddr.getPointer();
llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
Ptr = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Ptr, Adj);
This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
ThisPtrForCall = This;
// Load the function pointer.
llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
// If the LSB in the function pointer is 1, the function pointer points to
// a virtual function.
llvm::Value *IsVirtual;
if (UseARMMethodPtrABI)
IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
else
IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
// In the virtual path, the adjustment left 'This' pointing to the
// vtable of the correct base subobject. The "function pointer" is an
// offset within the vtable (+1 for the virtual flag on non-ARM).
CGF.EmitBlock(FnVirtual);
// Cast the adjusted this to a pointer to vtable pointer and load.
llvm::Type *VTableTy = Builder.getInt8PtrTy();
CharUnits VTablePtrAlign =
CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
CGF.getPointerAlign());
llvm::Value *VTable =
CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
// Apply the offset.
// On ARM64, to reserve extra space in virtual member function pointers,
// we only pay attention to the low 32 bits of the offset.
llvm::Value *VTableOffset = FnAsInt;
if (!UseARMMethodPtrABI)
VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
if (Use32BitVTableOffsetABI) {
VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
}
// Check the address of the function pointer if CFI on member function
// pointers is enabled.
llvm::Constant *CheckSourceLocation;
llvm::Constant *CheckTypeDesc;
bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
CGM.HasHiddenLTOVisibility(RD);
bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
CGM.HasHiddenLTOVisibility(RD);
bool ShouldEmitWPDInfo =
CGM.getCodeGenOpts().WholeProgramVTables &&
// Don't insert type tests if we are forcing public std visibility.
!CGM.HasLTOVisibilityPublicStd(RD);
llvm::Value *VirtualFn = nullptr;
{
CodeGenFunction::SanitizerScope SanScope(&CGF);
llvm::Value *TypeId = nullptr;
llvm::Value *CheckResult = nullptr;
if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
// If doing CFI, VFE or WPD, we will need the metadata node to check
// against.
llvm::Metadata *MD =
CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
}
if (ShouldEmitVFEInfo) {
llvm::Value *VFPAddr =
Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
// If doing VFE, load from the vtable with a type.checked.load intrinsic
// call. Note that we use the GEP to calculate the address to load from
// and pass 0 as the offset to the intrinsic. This is because every
// vtable slot of the correct type is marked with matching metadata, and
// we know that the load must be from one of these slots.
llvm::Value *CheckedLoad = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
{VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
"memptr.virtualfn");
} else {
// When not doing VFE, emit a normal load, as it allows more
// optimisations than type.checked.load.
if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
llvm::Value *VFPAddr =
Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
CheckResult = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::type_test),
{Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
}
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
VirtualFn = CGF.Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::load_relative,
{VTableOffset->getType()}),
{VTable, VTableOffset});
VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
} else {
llvm::Value *VFPAddr =
CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
VFPAddr = CGF.Builder.CreateBitCast(
VFPAddr, FTy->getPointerTo()->getPointerTo());
VirtualFn = CGF.Builder.CreateAlignedLoad(
FTy->getPointerTo(), VFPAddr, CGF.getPointerAlign(),
"memptr.virtualfn");
}
}
assert(VirtualFn && "Virtual fuction pointer not created!");
assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
CheckResult) &&
"Check result required but not created!");
if (ShouldEmitCFICheck) {
// If doing CFI, emit the check.
CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
llvm::Constant *StaticData[] = {
llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
CheckSourceLocation,
CheckTypeDesc,
};
if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
} else {
llvm::Value *AllVtables = llvm::MetadataAsValue::get(
CGM.getLLVMContext(),
llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
llvm::Value *ValidVtable = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
SanitizerHandler::CFICheckFail, StaticData,
{VTable, ValidVtable});
}
FnVirtual = Builder.GetInsertBlock();
}
} // End of sanitizer scope
CGF.EmitBranch(FnEnd);
// In the non-virtual path, the function pointer is actually a
// function pointer.
CGF.EmitBlock(FnNonVirtual);
llvm::Value *NonVirtualFn =
Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
// Check the function pointer if CFI on member function pointers is enabled.
if (ShouldEmitCFICheck) {
CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
if (RD->hasDefinition()) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
llvm::Constant *StaticData[] = {
llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
CheckSourceLocation,
CheckTypeDesc,
};
llvm::Value *Bit = Builder.getFalse();
llvm::Value *CastedNonVirtualFn =
Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
getContext().getMemberPointerType(
MPT->getPointeeType(),
getContext().getRecordType(Base).getTypePtr()));
llvm::Value *TypeId =
llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
llvm::Value *TypeTest =
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
{CastedNonVirtualFn, TypeId});
Bit = Builder.CreateOr(Bit, TypeTest);
}
CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
SanitizerHandler::CFICheckFail, StaticData,
{CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
FnNonVirtual = Builder.GetInsertBlock();
}
}
// We're done.
CGF.EmitBlock(FnEnd);
llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
CalleePtr->addIncoming(VirtualFn, FnVirtual);
CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
CGCallee Callee(FPT, CalleePtr);
return Callee;
}
/// Compute an l-value by applying the given pointer-to-member to a
/// base object.
llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
const MemberPointerType *MPT) {
assert(MemPtr->getType() == CGM.PtrDiffTy);
CGBuilderTy &Builder = CGF.Builder;
// Cast to char*.
Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
// Apply the offset, which we assume is non-null.
llvm::Value *Addr = Builder.CreateInBoundsGEP(
Base.getElementType(), Base.getPointer(), MemPtr, "memptr.offset");
// Cast the address to the appropriate pointer type, adopting the
// address space of the base pointer.
llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
->getPointerTo(Base.getAddressSpace());
return Builder.CreateBitCast(Addr, PType);
}
/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
/// conversion.
///
/// Bitcast conversions are always a no-op under Itanium.
///
/// Obligatory offset/adjustment diagram:
/// <-- offset --> <-- adjustment -->
/// |--------------------------|----------------------|--------------------|
/// ^Derived address point ^Base address point ^Member address point
///
/// So when converting a base member pointer to a derived member pointer,
/// we add the offset to the adjustment because the address point has
/// decreased; and conversely, when converting a derived MP to a base MP
/// we subtract the offset from the adjustment because the address point
/// has increased.
///
/// The standard forbids (at compile time) conversion to and from
/// virtual bases, which is why we don't have to consider them here.
///
/// The standard forbids (at run time) casting a derived MP to a base
/// MP when the derived MP does not point to a member of the base.
/// This is why -1 is a reasonable choice for null data member
/// pointers.
llvm::Value *
ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
const CastExpr *E,
llvm::Value *src) {
assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
E->getCastKind() == CK_BaseToDerivedMemberPointer ||
E->getCastKind() == CK_ReinterpretMemberPointer);
// Under Itanium, reinterprets don't require any additional processing.
if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
// Use constant emission if we can.
if (isa<llvm::Constant>(src))
return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
llvm::Constant *adj = getMemberPointerAdjustment(E);
if (!adj) return src;
CGBuilderTy &Builder = CGF.Builder;
bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
const MemberPointerType *destTy =
E->getType()->castAs<MemberPointerType>();
// For member data pointers, this is just a matter of adding the
// offset if the source is non-null.
if (destTy->isMemberDataPointer()) {
llvm::Value *dst;
if (isDerivedToBase)
dst = Builder.CreateNSWSub(src, adj, "adj");
else
dst = Builder.CreateNSWAdd(src, adj, "adj");
// Null check.
llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
return Builder.CreateSelect(isNull, src, dst);
}
// The this-adjustment is left-shifted by 1 on ARM.
if (UseARMMethodPtrABI) {
uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
offset <<= 1;
adj = llvm::ConstantInt::get(adj->getType(), offset);
}
llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
llvm::Value *dstAdj;
if (isDerivedToBase)
dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
else
dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
return Builder.CreateInsertValue(src, dstAdj, 1);
}
llvm::Constant *
ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
llvm::Constant *src) {
assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
E->getCastKind() == CK_BaseToDerivedMemberPointer ||
E->getCastKind() == CK_ReinterpretMemberPointer);
// Under Itanium, reinterprets don't require any additional processing.
if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
// If the adjustment is trivial, we don't need to do anything.
llvm::Constant *adj = getMemberPointerAdjustment(E);
if (!adj) return src;
bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
const MemberPointerType *destTy =
E->getType()->castAs<MemberPointerType>();
// For member data pointers, this is just a matter of adding the
// offset if the source is non-null.
if (destTy->isMemberDataPointer()) {
// null maps to null.
if (src->isAllOnesValue()) return src;
if (isDerivedToBase)
return llvm::ConstantExpr::getNSWSub(src, adj);
else
return llvm::ConstantExpr::getNSWAdd(src, adj);
}
// The this-adjustment is left-shifted by 1 on ARM.
if (UseARMMethodPtrABI) {
uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
offset <<= 1;
adj = llvm::ConstantInt::get(adj->getType(), offset);
}
llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
llvm::Constant *dstAdj;
if (isDerivedToBase)
dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
else
dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
}
llvm::Constant *
ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
// Itanium C++ ABI 2.3:
// A NULL pointer is represented as -1.
if (MPT->isMemberDataPointer())
return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
llvm::Constant *Values[2] = { Zero, Zero };
return llvm::ConstantStruct::getAnon(Values);
}
llvm::Constant *
ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
CharUnits offset) {
// Itanium C++ ABI 2.3:
// A pointer to data member is an offset from the base address of
// the class object containing it, represented as a ptrdiff_t
return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
}
llvm::Constant *
ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
return BuildMemberPointer(MD, CharUnits::Zero());
}
llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
CharUnits ThisAdjustment) {
assert(MD->isInstance() && "Member function must not be static!");
CodeGenTypes &Types = CGM.getTypes();
// Get the function pointer (or index if this is a virtual function).
llvm::Constant *MemPtr[2];
if (MD->isVirtual()) {
uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
uint64_t VTableOffset;
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
// Multiply by 4-byte relative offsets.
VTableOffset = Index * 4;
} else {
const ASTContext &Context = getContext();
CharUnits PointerWidth = Context.toCharUnitsFromBits(
Context.getTargetInfo().getPointerWidth(0));
VTableOffset = Index * PointerWidth.getQuantity();
}
if (UseARMMethodPtrABI) {
// ARM C++ ABI 3.2.1:
// This ABI specifies that adj contains twice the this
// adjustment, plus 1 if the member function is virtual. The
// least significant bit of adj then makes exactly the same
// discrimination as the least significant bit of ptr does for
// Itanium.
MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
2 * ThisAdjustment.getQuantity() + 1);
} else {
// Itanium C++ ABI 2.3:
// For a virtual function, [the pointer field] is 1 plus the
// virtual table offset (in bytes) of the function,
// represented as a ptrdiff_t.
MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
ThisAdjustment.getQuantity());
}
} else {
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
llvm::Type *Ty;
// Check whether the function has a computable LLVM signature.
if (Types.isFuncTypeConvertible(FPT)) {
// The function has a computable LLVM signature; use the correct type.
Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
} else {
// Use an arbitrary non-function type to tell GetAddrOfFunction that the
// function type is incomplete.
Ty = CGM.PtrDiffTy;
}
llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
(UseARMMethodPtrABI ? 2 : 1) *
ThisAdjustment.getQuantity());
}
return llvm::ConstantStruct::getAnon(MemPtr);
}
llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
QualType MPType) {
const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
const ValueDecl *MPD = MP.getMemberPointerDecl();
if (!MPD)
return EmitNullMemberPointer(MPT);
CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
return BuildMemberPointer(MD, ThisAdjustment);
CharUnits FieldOffset =
getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
}
/// The comparison algorithm is pretty easy: the member pointers are
/// the same if they're either bitwise identical *or* both null.
///
/// ARM is different here only because null-ness is more complicated.
llvm::Value *
ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
llvm::Value *L,
llvm::Value *R,
const MemberPointerType *MPT,
bool Inequality) {
CGBuilderTy &Builder = CGF.Builder;
llvm::ICmpInst::Predicate Eq;
llvm::Instruction::BinaryOps And, Or;
if (Inequality) {
Eq = llvm::ICmpInst::ICMP_NE;
And = llvm::Instruction::Or;
Or = llvm::Instruction::And;
} else {
Eq = llvm::ICmpInst::ICMP_EQ;
And = llvm::Instruction::And;
Or = llvm::Instruction::Or;
}
// Member data pointers are easy because there's a unique null
// value, so it just comes down to bitwise equality.
if (MPT->isMemberDataPointer())
return Builder.CreateICmp(Eq, L, R);
// For member function pointers, the tautologies are more complex.
// The Itanium tautology is:
// (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
// The ARM tautology is:
// (L == R) <==> (L.ptr == R.ptr &&
// (L.adj == R.adj ||
// (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
// The inequality tautologies have exactly the same structure, except
// applying De Morgan's laws.
llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
// This condition tests whether L.ptr == R.ptr. This must always be
// true for equality to hold.
llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
// This condition, together with the assumption that L.ptr == R.ptr,
// tests whether the pointers are both null. ARM imposes an extra
// condition.
llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
// This condition tests whether L.adj == R.adj. If this isn't
// true, the pointers are unequal unless they're both null.
llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
// Null member function pointers on ARM clear the low bit of Adj,
// so the zero condition has to check that neither low bit is set.
if (UseARMMethodPtrABI) {
llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
// Compute (l.adj | r.adj) & 1 and test it against zero.
llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
"cmp.or.adj");
EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
}
// Tie together all our conditions.
llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
Result = Builder.CreateBinOp(And, PtrEq, Result,
Inequality ? "memptr.ne" : "memptr.eq");
return Result;
}
llvm::Value *
ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
llvm::Value *MemPtr,
const MemberPointerType *MPT) {
CGBuilderTy &Builder = CGF.Builder;
/// For member data pointers, this is just a check against -1.
if (MPT->isMemberDataPointer()) {
assert(MemPtr->getType() == CGM.PtrDiffTy);
llvm::Value *NegativeOne =
llvm::Constant::getAllOnesValue(MemPtr->getType());
return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
}
// In Itanium, a member function pointer is not null if 'ptr' is not null.
llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
// On ARM, a member function pointer is also non-null if the low bit of 'adj'
// (the virtual bit) is set.
if (UseARMMethodPtrABI) {
llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
"memptr.isvirtual");
Result = Builder.CreateOr(Result, IsVirtual);
}
return Result;
}
bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
if (!RD)
return false;
// If C++ prohibits us from making a copy, return by address.
if (!RD->canPassInRegisters()) {
auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
return true;
}
return false;
}
/// The Itanium ABI requires non-zero initialization only for data
/// member pointers, for which '0' is a valid offset.
bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
return MPT->isMemberFunctionPointer();
}
/// The Itanium ABI always places an offset to the complete object
/// at entry -2 in the vtable.
void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
Address Ptr,
QualType ElementType,
const CXXDestructorDecl *Dtor) {
bool UseGlobalDelete = DE->isGlobalDelete();
if (UseGlobalDelete) {
// Derive the complete-object pointer, which is what we need
// to pass to the deallocation function.
// Grab the vtable pointer as an intptr_t*.
auto *ClassDecl =
cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
llvm::Value *VTable =
CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
// Track back to entry -2 and pull out the offset there.
llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr, CGF.getPointerAlign());
// Apply the offset.
llvm::Value *CompletePtr =
CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
CompletePtr =
CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
// If we're supposed to call the global delete, make sure we do so
// even if the destructor throws.
CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
ElementType);
}
// FIXME: Provide a source location here even though there's no
// CXXMemberCallExpr for dtor call.
CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
if (UseGlobalDelete)
CGF.PopCleanupBlock();
}
void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
// void __cxa_rethrow();
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
if (isNoReturn)
CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
else
CGF.EmitRuntimeCallOrInvoke(Fn);
}
static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
// void *__cxa_allocate_exception(size_t thrown_size);
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
}
static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
// void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
// void (*dest) (void *));
llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
}
void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
QualType ThrowType = E->getSubExpr()->getType();
// Now allocate the exception object.
llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
// Now throw the exception.
llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
/*ForEH=*/true);
// The address of the destructor. If the exception type has a
// trivial destructor (or isn't a record), we just pass null.
llvm::Constant *Dtor = nullptr;
if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
if (!Record->hasTrivialDestructor()) {
CXXDestructorDecl *DtorD = Record->getDestructor();
Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
}
}
if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
}
static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
// void *__dynamic_cast(const void *sub,
// const abi::__class_type_info *src,
// const abi::__class_type_info *dst,
// std::ptrdiff_t src2dst_offset);
llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
// Mark the function as nounwind readonly.
llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
llvm::Attribute::ReadOnly };
llvm::AttributeList Attrs = llvm::AttributeList::get(
CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
}
static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
// void __cxa_bad_cast();
llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
}
/// Compute the src2dst_offset hint as described in the
/// Itanium C++ ABI [2.9.7]
static CharUnits computeOffsetHint(ASTContext &Context,
const CXXRecordDecl *Src,
const CXXRecordDecl *Dst) {
CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
/*DetectVirtual=*/false);
// If Dst is not derived from Src we can skip the whole computation below and
// return that Src is not a public base of Dst. Record all inheritance paths.
if (!Dst->isDerivedFrom(Src, Paths))
return CharUnits::fromQuantity(-2ULL);
unsigned NumPublicPaths = 0;
CharUnits Offset;
// Now walk all possible inheritance paths.
for (const CXXBasePath &Path : Paths) {
if (Path.Access != AS_public) // Ignore non-public inheritance.
continue;
++NumPublicPaths;
for (const CXXBasePathElement &PathElement : Path) {
// If the path contains a virtual base class we can't give any hint.
// -1: no hint.
if (PathElement.Base->isVirtual())
return CharUnits::fromQuantity(-1ULL);
if (NumPublicPaths > 1) // Won't use offsets, skip computation.
continue;
// Accumulate the base class offsets.
const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
Offset += L.getBaseClassOffset(
PathElement.Base->getType()->getAsCXXRecordDecl());
}
}
// -2: Src is not a public base of Dst.
if (NumPublicPaths == 0)
return CharUnits::fromQuantity(-2ULL);
// -3: Src is a multiple public base type but never a virtual base type.
if (NumPublicPaths > 1)
return CharUnits::fromQuantity(-3ULL);
// Otherwise, the Src type is a unique public nonvirtual base type of Dst.
// Return the offset of Src from the origin of Dst.
return Offset;
}
static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
// void __cxa_bad_typeid();
llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
}
bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
QualType SrcRecordTy) {
return IsDeref;
}
void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
Call->setDoesNotReturn();
CGF.Builder.CreateUnreachable();
}
llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
QualType SrcRecordTy,
Address ThisPtr,
llvm::Type *StdTypeInfoPtrTy) {
auto *ClassDecl =
cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
llvm::Value *Value =
CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
// Load the type info.
Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
Value = CGF.Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
{Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
// Setup to dereference again since this is a proxy we accessed.
Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
} else {
// Load the type info.
Value =
CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL);
}
return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
CGF.getPointerAlign());
}
bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
QualType SrcRecordTy) {
return SrcIsPtr;
}
llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
llvm::Type *PtrDiffLTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
llvm::Type *DestLTy = CGF.ConvertType(DestTy);
llvm::Value *SrcRTTI =
CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
llvm::Value *DestRTTI =
CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
// Compute the offset hint.
const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
llvm::Value *OffsetHint = llvm::ConstantInt::get(
PtrDiffLTy,
computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
// Emit the call to __dynamic_cast.
llvm::Value *Value = ThisAddr.getPointer();
Value = CGF.EmitCastToVoidPtr(Value);
llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
Value = CGF.Builder.CreateBitCast(Value, DestLTy);
/// C++ [expr.dynamic.cast]p9:
/// A failed cast to reference type throws std::bad_cast
if (DestTy->isReferenceType()) {
llvm::BasicBlock *BadCastBlock =
CGF.createBasicBlock("dynamic_cast.bad_cast");
llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
CGF.EmitBlock(BadCastBlock);
EmitBadCastCall(CGF);
}
return Value;
}
llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
Address ThisAddr,
QualType SrcRecordTy,
QualType DestTy) {
llvm::Type *DestLTy = CGF.ConvertType(DestTy);
auto *ClassDecl =
cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
llvm::Value *OffsetToTop;
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
// Get the vtable pointer.
llvm::Value *VTable =
CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
// Get the offset-to-top from the vtable.
OffsetToTop =
CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U);
OffsetToTop = CGF.Builder.CreateAlignedLoad(
CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
} else {
llvm::Type *PtrDiffLTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
// Get the vtable pointer.
llvm::Value *VTable =
CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
// Get the offset-to-top from the vtable.
OffsetToTop =
CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL);
OffsetToTop = CGF.Builder.CreateAlignedLoad(
PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
}
// Finally, add the offset to the pointer.
llvm::Value *Value = ThisAddr.getPointer();
Value = CGF.EmitCastToVoidPtr(Value);
Value = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, Value, OffsetToTop);
return CGF.Builder.CreateBitCast(Value, DestLTy);
}
bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
llvm::FunctionCallee Fn = getBadCastFn(CGF);
llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
Call->setDoesNotReturn();
CGF.Builder.CreateUnreachable();
return true;
}
llvm::Value *
ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
Address This,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) {
llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
CharUnits VBaseOffsetOffset =
CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
BaseClassDecl);
llvm::Value *VBaseOffsetPtr =
CGF.Builder.CreateConstGEP1_64(
CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(),
"vbase.offset.ptr");
llvm::Value *VBaseOffset;
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
VBaseOffsetPtr =
CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
VBaseOffset = CGF.Builder.CreateAlignedLoad(
CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
"vbase.offset");
} else {
VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
CGM.PtrDiffTy->getPointerTo());
VBaseOffset = CGF.Builder.CreateAlignedLoad(
CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
}
return VBaseOffset;
}
void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
// Just make sure we're in sync with TargetCXXABI.
assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
// The constructor used for constructing this as a base class;
// ignores virtual bases.
CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
// The constructor used for constructing this as a complete class;
// constructs the virtual bases, then calls the base constructor.
if (!D->getParent()->isAbstract()) {
// We don't need to emit the complete ctor if the class is abstract.
CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
}
}
CGCXXABI::AddedStructorArgCounts
ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) {
ASTContext &Context = getContext();
// All parameters are already in place except VTT, which goes after 'this'.
// These are Clang types, so we don't need to worry about sret yet.
// Check if we need to add a VTT parameter (which has type void **).
if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
: GD.getDtorType() == Dtor_Base) &&
cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
ArgTys.insert(ArgTys.begin() + 1,
Context.getPointerType(Context.VoidPtrTy));
return AddedStructorArgCounts::prefix(1);
}
return AddedStructorArgCounts{};
}
void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
// The destructor used for destructing this as a base class; ignores
// virtual bases.
CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
// The destructor used for destructing this as a most-derived class;
// call the base destructor and then destructs any virtual bases.
CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
// The destructor in a virtual table is always a 'deleting'
// destructor, which calls the complete destructor and then uses the
// appropriate operator delete.
if (D->isVirtual())
CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
}
void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
QualType &ResTy,
FunctionArgList &Params) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
// Check if we need a VTT parameter as well.
if (NeedsVTTParameter(CGF.CurGD)) {
ASTContext &Context = getContext();
// FIXME: avoid the fake decl
QualType T = Context.getPointerType(Context.VoidPtrTy);
auto *VTTDecl = ImplicitParamDecl::Create(
Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
T, ImplicitParamDecl::CXXVTT);
Params.insert(Params.begin() + 1, VTTDecl);
getStructorImplicitParamDecl(CGF) = VTTDecl;
}
}
void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
// Naked functions have no prolog.
if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
return;
/// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
/// adjustments are required, because they are all handled by thunks.
setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
/// Initialize the 'vtt' slot if needed.
if (getStructorImplicitParamDecl(CGF)) {
getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
}
/// If this is a function that the ABI specifies returns 'this', initialize
/// the return slot to 'this' at the start of the function.
///
/// Unlike the setting of return types, this is done within the ABI
/// implementation instead of by clients of CGCXXABI because:
/// 1) getThisValue is currently protected
/// 2) in theory, an ABI could implement 'this' returns some other way;
/// HasThisReturn only specifies a contract, not the implementation
if (HasThisReturn(CGF.CurGD))
CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
}
CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
bool ForVirtualBase, bool Delegating) {
if (!NeedsVTTParameter(GlobalDecl(D, Type)))
return AddedStructorArgs{};
// Insert the implicit 'vtt' argument as the second argument.
llvm::Value *VTT =
CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
return AddedStructorArgs::prefix({{VTT, VTTTy}});
}
llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
bool ForVirtualBase, bool Delegating) {
GlobalDecl GD(DD, Type);
return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
}
void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
bool Delegating, Address This,
QualType ThisTy) {
GlobalDecl GD(DD, Type);
llvm::Value *VTT =
getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
CGCallee Callee;
if (getContext().getLangOpts().AppleKext &&
Type != Dtor_Base && DD->isVirtual())
Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
else
Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
nullptr);
}
void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
const CXXRecordDecl *RD) {
llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
if (VTable->hasInitializer())
return;
ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
llvm::Constant *RTTI =
CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
// Create and set the initializer.
ConstantInitBuilder builder(CGM);
auto components = builder.beginStruct();
CGVT.createVTableInitializer(components, VTLayout, RTTI,
llvm::GlobalValue::isLocalLinkage(Linkage));
components.finishAndSetAsInitializer(VTable);
// Set the correct linkage.
VTable->setLinkage(Linkage);
if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
// Set the right visibility.
CGM.setGVProperties(VTable, RD);
// If this is the magic class __cxxabiv1::__fundamental_type_info,
// we will emit the typeinfo for the fundamental types. This is the
// same behaviour as GCC.
const DeclContext *DC = RD->getDeclContext();
if (RD->getIdentifier() &&
RD->getIdentifier()->isStr("__fundamental_type_info") &&
isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
DC->getParent()->isTranslationUnit())
EmitFundamentalRTTIDescriptors(RD);
// Always emit type metadata on non-available_externally definitions, and on
// available_externally definitions if we are performing whole program
// devirtualization. For WPD we need the type metadata on all vtable
// definitions to ensure we associate derived classes with base classes
// defined in headers but with a strong definition only in a shared library.
if (!VTable->isDeclarationForLinker() ||
CGM.getCodeGenOpts().WholeProgramVTables) {
CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
// For available_externally definitions, add the vtable to
// @llvm.compiler.used so that it isn't deleted before whole program
// analysis.
if (VTable->isDeclarationForLinker()) {
assert(CGM.getCodeGenOpts().WholeProgramVTables);
CGM.addCompilerUsedGlobal(VTable);
}
}
if (VTContext.isRelativeLayout() && !VTable->isDSOLocal())
CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
}
bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
if (Vptr.NearestVBase == nullptr)
return false;
return NeedsVTTParameter(CGF.CurGD);
}
llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
const CXXRecordDecl *NearestVBase) {
if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
NeedsVTTParameter(CGF.CurGD)) {
return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
NearestVBase);
}
return getVTableAddressPoint(Base, VTableClass);
}
llvm::Constant *
ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
const CXXRecordDecl *VTableClass) {
llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
// Find the appropriate vtable within the vtable group, and the address point
// within that vtable.
VTableLayout::AddressPointLocation AddressPoint =
CGM.getItaniumVTableContext()
.getVTableLayout(VTableClass)
.getAddressPoint(Base);
llvm::Value *Indices[] = {
llvm::ConstantInt::get(CGM.Int32Ty, 0),
llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
};
return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
Indices, /*InBounds=*/true,
/*InRangeIndex=*/1);
}
// Check whether all the non-inline virtual methods for the class have the
// specified attribute.
template <typename T>
static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) {
bool FoundNonInlineVirtualMethodWithAttr = false;
for (const auto *D : RD->noload_decls()) {
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() ||
FD->doesThisDeclarationHaveABody())
continue;
if (!D->hasAttr<T>())
return false;
FoundNonInlineVirtualMethodWithAttr = true;
}
}
// We didn't find any non-inline virtual methods missing the attribute. We
// will return true when we found at least one non-inline virtual with the
// attribute. (This lets our caller know that the attribute needs to be
// propagated up to the vtable.)
return FoundNonInlineVirtualMethodWithAttr;
}
llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
const CXXRecordDecl *NearestVBase) {
assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
// Get the secondary vpointer index.
uint64_t VirtualPointerIndex =
CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
/// Load the VTT.
llvm::Value *VTT = CGF.LoadCXXVTT();
if (VirtualPointerIndex)
VTT = CGF.Builder.CreateConstInBoundsGEP1_64(
CGF.VoidPtrTy, VTT, VirtualPointerIndex);
// And load the address point from the VTT.
return CGF.Builder.CreateAlignedLoad(CGF.VoidPtrTy, VTT,
CGF.getPointerAlign());
}
llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
BaseSubobject Base, const CXXRecordDecl *VTableClass) {
return getVTableAddressPoint(Base, VTableClass);
}
llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
CharUnits VPtrOffset) {
assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
llvm::GlobalVariable *&VTable = VTables[RD];
if (VTable)
return VTable;
// Queue up this vtable for possible deferred emission.
CGM.addDeferredVTable(RD);
SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
getMangleContext().mangleCXXVTable(RD, Out);
const VTableLayout &VTLayout =
CGM.getItaniumVTableContext().getVTableLayout(RD);
llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
// Use pointer alignment for the vtable. Otherwise we would align them based
// on the size of the initializer which doesn't make sense as only single
// values are read.
unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
? 32
: CGM.getTarget().getPointerAlign(0);
VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
Name, VTableType, llvm::GlobalValue::ExternalLinkage,
getContext().toCharUnitsFromBits(PAlign).getQuantity());
VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
// In MS C++ if you have a class with virtual functions in which you are using
// selective member import/export, then all virtual functions must be exported
// unless they are inline, otherwise a link error will result. To match this
// behavior, for such classes, we dllimport the vtable if it is defined
// externally and all the non-inline virtual methods are marked dllimport, and
// we dllexport the vtable if it is defined in this TU and all the non-inline
// virtual methods are marked dllexport.
if (CGM.getTarget().hasPS4DLLImportExport()) {
if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
if (CGM.getVTables().isVTableExternal(RD)) {
if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
} else {
if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
}
}
}
CGM.setGVProperties(VTable, RD);
return VTable;
}
CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
GlobalDecl GD,
Address This,
llvm::Type *Ty,
SourceLocation Loc) {
llvm::Type *TyPtr = Ty->getPointerTo();
auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
llvm::Value *VTable = CGF.GetVTablePtr(
This, TyPtr->getPointerTo(), MethodDecl->getParent());
uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
llvm::Value *VFunc;
if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
VFunc = CGF.EmitVTableTypeCheckedLoad(
MethodDecl->getParent(), VTable,
VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
} else {
CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
llvm::Value *VFuncLoad;
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
llvm::Value *Load = CGF.Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
{VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
VFuncLoad = CGF.Builder.CreateBitCast(Load, TyPtr);
} else {
VTable =
CGF.Builder.CreateBitCast(VTable, TyPtr->getPointerTo());
llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
TyPtr, VTable, VTableIndex, "vfn");
VFuncLoad =
CGF.Builder.CreateAlignedLoad(TyPtr, VTableSlotPtr,
CGF.getPointerAlign());
}
// Add !invariant.load md to virtual function load to indicate that
// function didn't change inside vtable.
// It's safe to add it without -fstrict-vtable-pointers, but it would not
// help in devirtualization because it will only matter if we will have 2
// the same virtual function loads from the same vtable load, which won't
// happen without enabled devirtualization with -fstrict-vtable-pointers.
if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
CGM.getCodeGenOpts().StrictVTablePointers) {
if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
VFuncLoadInstr->setMetadata(
llvm::LLVMContext::MD_invariant_load,
llvm::MDNode::get(CGM.getLLVMContext(),
llvm::ArrayRef<llvm::Metadata *>()));
}
}
VFunc = VFuncLoad;
}
CGCallee Callee(GD, VFunc);
return Callee;
}
llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
Address This, DeleteOrMemberCallExpr E) {
auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
auto *D = E.dyn_cast<const CXXDeleteExpr *>();
assert((CE != nullptr) ^ (D != nullptr));
assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
GlobalDecl GD(Dtor, DtorType);
const CGFunctionInfo *FInfo =
&CGM.getTypes().arrangeCXXStructorDeclaration(GD);
llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
QualType ThisTy;
if (CE) {
ThisTy = CE->getObjectType();
} else {
ThisTy = D->getDestroyedType();
}
CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
QualType(), nullptr);
return nullptr;
}
void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
CodeGenVTables &VTables = CGM.getVTables();
llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
}
bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
const CXXRecordDecl *RD) const {
// We don't emit available_externally vtables if we are in -fapple-kext mode
// because kext mode does not permit devirtualization.
if (CGM.getLangOpts().AppleKext)
return false;
// If the vtable is hidden then it is not safe to emit an available_externally
// copy of vtable.
if (isVTableHidden(RD))
return false;
if (CGM.getCodeGenOpts().ForceEmitVTables)
return true;
// If we don't have any not emitted inline virtual function then we are safe
// to emit an available_externally copy of vtable.
// FIXME we can still emit a copy of the vtable if we
// can emit definition of the inline functions.
if (hasAnyUnusedVirtualInlineFunction(RD))
return false;
// For a class with virtual bases, we must also be able to speculatively
// emit the VTT, because CodeGen doesn't have separate notions of "can emit
// the vtable" and "can emit the VTT". For a base subobject, this means we
// need to be able to emit non-virtual base vtables.
if (RD->getNumVBases()) {
for (const auto &B : RD->bases()) {
auto *BRD = B.getType()->getAsCXXRecordDecl();
assert(BRD && "no class for base specifier");
if (B.isVirtual() || !BRD->isDynamicClass())
continue;
if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
return false;
}
}
return true;
}
bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
if (!canSpeculativelyEmitVTableAsBaseClass(RD))
return false;
// For a complete-object vtable (or more specifically, for the VTT), we need
// to be able to speculatively emit the vtables of all dynamic virtual bases.
for (const auto &B : RD->vbases()) {
auto *BRD = B.getType()->getAsCXXRecordDecl();
assert(BRD && "no class for base specifier");
if (!BRD->isDynamicClass())
continue;
if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
return false;
}
return true;
}
static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
Address InitialPtr,
int64_t NonVirtualAdjustment,
int64_t VirtualAdjustment,
bool IsReturnAdjustment) {
if (!NonVirtualAdjustment && !VirtualAdjustment)
return InitialPtr.getPointer();
Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
// In a base-to-derived cast, the non-virtual adjustment is applied first.
if (NonVirtualAdjustment && !IsReturnAdjustment) {
V = CGF.Builder.CreateConstInBoundsByteGEP(V,
CharUnits::fromQuantity(NonVirtualAdjustment));
}
// Perform the virtual adjustment if we have one.
llvm::Value *ResultPtr;
if (VirtualAdjustment) {
Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
llvm::Value *Offset;
llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
CGF.Int8Ty, VTablePtr, VirtualAdjustment);
if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
// Load the adjustment offset from the vtable as a 32-bit int.
OffsetPtr =
CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
Offset =
CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
CharUnits::fromQuantity(4));
} else {
llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
OffsetPtr =
CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
// Load the adjustment offset from the vtable.
Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
CGF.getPointerAlign());
}
// Adjust our pointer.
ResultPtr = CGF.Builder.CreateInBoundsGEP(
V.getElementType(), V.getPointer(), Offset);
} else {
ResultPtr = V.getPointer();
}
// In a derived-to-base conversion, the non-virtual adjustment is
// applied second.
if (NonVirtualAdjustment && IsReturnAdjustment) {
ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr,
NonVirtualAdjustment);
}
// Cast back to the original type.
return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
}
llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
Address This,
const ThisAdjustment &TA) {
return performTypeAdjustment(CGF, This, TA.NonVirtual,
TA.Virtual.Itanium.VCallOffsetOffset,
/*IsReturnAdjustment=*/false);
}
llvm::Value *
ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
const ReturnAdjustment &RA) {
return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
RA.Virtual.Itanium.VBaseOffsetOffset,
/*IsReturnAdjustment=*/true);
}
void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
RValue RV, QualType ResultType) {
if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
// Destructor thunks in the ARM ABI have indeterminate results.
llvm::Type *T = CGF.ReturnValue.getElementType();
RValue Undef = RValue::get(llvm::UndefValue::get(T));
return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
}
/************************** Array allocation cookies **************************/
CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
// The array cookie is a size_t; pad that up to the element alignment.
// The cookie is actually right-justified in that space.
return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
CGM.getContext().getPreferredTypeAlignInChars(elementType));
}
Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
Address NewPtr,
llvm::Value *NumElements,
const CXXNewExpr *expr,
QualType ElementType) {
assert(requiresArrayCookie(expr));
unsigned AS = NewPtr.getAddressSpace();
ASTContext &Ctx = getContext();
CharUnits SizeSize = CGF.getSizeSize();
// The size of the cookie.
CharUnits CookieSize =
std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
assert(CookieSize == getArrayCookieSizeImpl(ElementType));
// Compute an offset to the cookie.
Address CookiePtr = NewPtr;
CharUnits CookieOffset = CookieSize - SizeSize;
if (!CookieOffset.isZero())
CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
// Write the number of elements into the appropriate slot.
Address NumElementsPtr =
CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
// Handle the array cookie specially in ASan.
if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
(expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
// The store to the CookiePtr does not need to be instrumented.
CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
llvm::FunctionCallee F =
CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
}
// Finally, compute a pointer to the actual data buffer by skipping
// over the cookie completely.
return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
}
llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
Address allocPtr,
CharUnits cookieSize) {
// The element size is right-justified in the cookie.
Address numElementsPtr = allocPtr;
CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
if (!numElementsOffset.isZero())
numElementsPtr =
CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
unsigned AS = allocPtr.getAddressSpace();
numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
return CGF.Builder.CreateLoad(numElementsPtr);
// In asan mode emit a function call instead of a regular load and let the
// run-time deal with it: if the shadow is properly poisoned return the
// cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
// We can't simply ignore this load using nosanitize metadata because
// the metadata may be lost.
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
llvm::FunctionCallee F =
CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
}
CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
// ARM says that the cookie is always:
// struct array_cookie {
// std::size_t element_size; // element_size != 0
// std::size_t element_count;
// };
// But the base ABI doesn't give anything an alignment greater than
// 8, so we can dismiss this as typical ABI-author blindness to
// actual language complexity and round up to the element alignment.
return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
CGM.getContext().getTypeAlignInChars(elementType));
}
Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
Address newPtr,
llvm::Value *numElements,
const CXXNewExpr *expr,
QualType elementType) {
assert(requiresArrayCookie(expr));
// The cookie is always at the start of the buffer.
Address cookie = newPtr;
// The first element is the element size.
cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
getContext().getTypeSizeInChars(elementType).getQuantity());
CGF.Builder.CreateStore(elementSize, cookie);
// The second element is the element count.
cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
CGF.Builder.CreateStore(numElements, cookie);
// Finally, compute a pointer to the actual data buffer by skipping
// over the cookie completely.
CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
}
llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
Address allocPtr,
CharUnits cookieSize) {
// The number of elements is at offset sizeof(size_t) relative to
// the allocated pointer.
Address numElementsPtr
= CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
return CGF.Builder.CreateLoad(numElementsPtr);
}
/*********************** Static local initialization **************************/
static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
llvm::PointerType *GuardPtrTy) {
// int __cxa_guard_acquire(__guard *guard_object);
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
GuardPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "__cxa_guard_acquire",
llvm::AttributeList::get(CGM.getLLVMContext(),
llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoUnwind));
}
static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_release(__guard *guard_object);
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "__cxa_guard_release",
llvm::AttributeList::get(CGM.getLLVMContext(),
llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoUnwind));
}
static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_abort(__guard *guard_object);
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "__cxa_guard_abort",
llvm::AttributeList::get(CGM.getLLVMContext(),
llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoUnwind));
}
namespace {
struct CallGuardAbort final : EHScopeStack::Cleanup {
llvm::GlobalVariable *Guard;
CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
Guard);
}
};
}
/// The ARM code here follows the Itanium code closely enough that we
/// just special-case it at particular places.
void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
const VarDecl &D,
llvm::GlobalVariable *var,
bool shouldPerformInit) {
CGBuilderTy &Builder = CGF.Builder;
// Inline variables that weren't instantiated from variable templates have
// partially-ordered initialization within their translation unit.
bool NonTemplateInline =
D.isInline() &&
!isTemplateInstantiation(D.getTemplateSpecializationKind());
// We only need to use thread-safe statics for local non-TLS variables and
// inline variables; other global initialization is always single-threaded
// or (through lazy dynamic loading in multiple threads) unsequenced.
bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
(D.isLocalVarDecl() || NonTemplateInline) &&
!D.getTLSKind();
// If we have a global variable with internal linkage and thread-safe statics
// are disabled, we can just let the guard variable be of type i8.
bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
llvm::IntegerType *guardTy;
CharUnits guardAlignment;
if (useInt8GuardVariable) {
guardTy = CGF.Int8Ty;
guardAlignment = CharUnits::One();
} else {
// Guard variables are 64 bits in the generic ABI and size width on ARM
// (i.e. 32-bit on AArch32, 64-bit on AArch64).
if (UseARMGuardVarABI) {
guardTy = CGF.SizeTy;
guardAlignment = CGF.getSizeAlign();
} else {
guardTy = CGF.Int64Ty;
guardAlignment = CharUnits::fromQuantity(
CGM.getDataLayout().getABITypeAlignment(guardTy));
}
}
llvm::PointerType *guardPtrTy = guardTy->getPointerTo(
CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
// Create the guard variable if we don't already have it (as we
// might if we're double-emitting this function body).
llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
if (!guard) {
// Mangle the name for the guard.
SmallString<256> guardName;
{
llvm::raw_svector_ostream out(guardName);
getMangleContext().mangleStaticGuardVariable(&D, out);
}
// Create the guard variable with a zero-initializer.
// Just absorb linkage and visibility from the guarded variable.
guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
false, var->getLinkage(),
llvm::ConstantInt::get(guardTy, 0),
guardName.str());
guard->setDSOLocal(var->isDSOLocal());
guard->setVisibility(var->getVisibility());
// If the variable is thread-local, so is its guard variable.
guard->setThreadLocalMode(var->getThreadLocalMode());
guard->setAlignment(guardAlignment.getAsAlign());
// The ABI says: "It is suggested that it be emitted in the same COMDAT
// group as the associated data object." In practice, this doesn't work for
// non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
llvm::Comdat *C = var->getComdat();
if (!D.isLocalVarDecl() && C &&
(CGM.getTarget().getTriple().isOSBinFormatELF() ||
CGM.getTarget().getTriple().isOSBinFormatWasm())) {
guard->setComdat(C);
} else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
}
CGM.setStaticLocalDeclGuardAddress(&D, guard);
}
Address guardAddr = Address(guard, guardAlignment);
// Test whether the variable has completed initialization.
//
// Itanium C++ ABI 3.3.2:
// The following is pseudo-code showing how these functions can be used:
// if (obj_guard.first_byte == 0) {
// if ( __cxa_guard_acquire (&obj_guard) ) {
// try {
// ... initialize the object ...;
// } catch (...) {
// __cxa_guard_abort (&obj_guard);
// throw;
// }
// ... queue object destructor with __cxa_atexit() ...;
// __cxa_guard_release (&obj_guard);
// }
// }
// Load the first byte of the guard variable.
llvm::LoadInst *LI =
Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
// Itanium ABI:
// An implementation supporting thread-safety on multiprocessor
// systems must also guarantee that references to the initialized
// object do not occur before the load of the initialization flag.
//
// In LLVM, we do this by marking the load Acquire.
if (threadsafe)
LI->setAtomic(llvm::AtomicOrdering::Acquire);
// For ARM, we should only check the first bit, rather than the entire byte:
//
// ARM C++ ABI 3.2.3.1:
// To support the potential use of initialization guard variables
// as semaphores that are the target of ARM SWP and LDREX/STREX
// synchronizing instructions we define a static initialization
// guard variable to be a 4-byte aligned, 4-byte word with the
// following inline access protocol.
// #define INITIALIZED 1
// if ((obj_guard & INITIALIZED) != INITIALIZED) {
// if (__cxa_guard_acquire(&obj_guard))
// ...
// }
//
// and similarly for ARM64:
//
// ARM64 C++ ABI 3.2.2:
// This ABI instead only specifies the value bit 0 of the static guard
// variable; all other bits are platform defined. Bit 0 shall be 0 when the
// variable is not initialized and 1 when it is.
llvm::Value *V =
(UseARMGuardVarABI && !useInt8GuardVariable)
? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
: LI;
llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
// Check if the first byte of the guard variable is zero.
CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
CodeGenFunction::GuardKind::VariableGuard, &D);
CGF.EmitBlock(InitCheckBlock);
// Variables used when coping with thread-safe statics and exceptions.
if (threadsafe) {
// Call __cxa_guard_acquire.
llvm::Value *V
= CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
InitBlock, EndBlock);
// Call __cxa_guard_abort along the exceptional edge.
CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
CGF.EmitBlock(InitBlock);
}
// Emit the initializer and add a global destructor if appropriate.
CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
if (threadsafe) {
// Pop the guard-abort cleanup if we pushed one.
CGF.PopCleanupBlock();
// Call __cxa_guard_release. This cannot throw.
CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
guardAddr.getPointer());
} else {
// Store 1 into the first byte of the guard variable after initialization is
// complete.
Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
}
CGF.EmitBlock(EndBlock);
}
/// Register a global destructor using __cxa_atexit.
static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
llvm::FunctionCallee dtor,
llvm::Constant *addr, bool TLS) {
assert(!CGF.getTarget().getTriple().isOSAIX() &&
"unexpected call to emitGlobalDtorWithCXAAtExit");
assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
"__cxa_atexit is disabled");
const char *Name = "__cxa_atexit";
if (TLS) {
const llvm::Triple &T = CGF.getTarget().getTriple();
Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
}
// We're assuming that the destructor function is something we can
// reasonably call with the default CC. Go ahead and cast it to the
// right prototype.
llvm::Type *dtorTy =
llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
// Preserve address space of addr.
auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
auto AddrInt8PtrTy =
AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
// Create a variable that binds the atexit to this shared object.
llvm::Constant *handle =
CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
// extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
llvm::FunctionType *atexitTy =
llvm::FunctionType::get(CGF.IntTy, paramTys, false);
// Fetch the actual function.
llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
fn->setDoesNotThrow();
if (!addr)
// addr is null when we are trying to register a dtor annotated with
// __attribute__((destructor)) in a constructor function. Using null here is
// okay because this argument is just passed back to the destructor
// function.
addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
cast<llvm::Constant>(dtor.getCallee()), dtorTy),
llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
handle};
CGF.EmitNounwindRuntimeCall(atexit, args);
}
static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
StringRef FnName) {
// Create a function that registers/unregisters destructors that have the same
// priority.
llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
return GlobalInitOrCleanupFn;
}
void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
for (const auto &I : DtorsUsingAtExit) {
int Priority = I.first;
std::string GlobalCleanupFnName =
std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
llvm::Function *GlobalCleanupFn =
createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
CodeGenFunction CGF(*this);
CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn,
getTypes().arrangeNullaryFunction(), FunctionArgList(),
SourceLocation(), SourceLocation());
auto AL = ApplyDebugLocation::CreateArtificial(CGF);
// Get the destructor function type, void(*)(void).
llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
llvm::Type *dtorTy = dtorFuncTy->getPointerTo();
// Destructor functions are run/unregistered in non-ascending
// order of their priorities.
const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
auto itv = Dtors.rbegin();
while (itv != Dtors.rend()) {
llvm::Function *Dtor = *itv;
// We're assuming that the destructor function is something we can
// reasonably call with the correct CC. Go ahead and cast it to the
// right prototype.
llvm::Constant *dtor = llvm::ConstantExpr::getBitCast(Dtor, dtorTy);
llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtor);
llvm::Value *NeedsDestruct =
CGF.Builder.CreateIsNull(V, "needs_destruct");
llvm::BasicBlock *</