blob: f719f009ea9996d3155208bbe7cba70c384d7440 [file] [log] [blame]
//===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This coordinates the per-module state used while generating code.
//
//===----------------------------------------------------------------------===//
#include "CodeGenModule.h"
#include "CGBlocks.h"
#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGCall.h"
#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
#include "CGOpenCLRuntime.h"
#include "CGOpenMPRuntime.h"
#include "CGOpenMPRuntimeAMDGCN.h"
#include "CGOpenMPRuntimeNVPTX.h"
#include "CodeGenFunction.h"
#include "CodeGenPGO.h"
#include "ConstantEmitter.h"
#include "CoverageMappingGen.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/Version.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ProfileSummary.h"
#include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/TimeProfiler.h"
using namespace clang;
using namespace CodeGen;
static llvm::cl::opt<bool> LimitedCoverage(
"limited-coverage-experimental", llvm::cl::ZeroOrMore, llvm::cl::Hidden,
llvm::cl::desc("Emit limited coverage mapping information (experimental)"),
llvm::cl::init(false));
static const char AnnotationSection[] = "llvm.metadata";
static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
switch (CGM.getTarget().getCXXABI().getKind()) {
case TargetCXXABI::AppleARM64:
case TargetCXXABI::Fuchsia:
case TargetCXXABI::GenericAArch64:
case TargetCXXABI::GenericARM:
case TargetCXXABI::iOS:
case TargetCXXABI::WatchOS:
case TargetCXXABI::GenericMIPS:
case TargetCXXABI::GenericItanium:
case TargetCXXABI::WebAssembly:
case TargetCXXABI::XL:
return CreateItaniumCXXABI(CGM);
case TargetCXXABI::Microsoft:
return CreateMicrosoftCXXABI(CGM);
}
llvm_unreachable("invalid C++ ABI kind");
}
CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
const PreprocessorOptions &PPO,
const CodeGenOptions &CGO, llvm::Module &M,
DiagnosticsEngine &diags,
CoverageSourceInfo *CoverageInfo)
: Context(C), LangOpts(C.getLangOpts()), HeaderSearchOpts(HSO),
PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
Target(C.getTargetInfo()), ABI(createCXXABI(*this)),
VMContext(M.getContext()), Types(*this), VTables(*this),
SanitizerMD(new SanitizerMetadata(*this)) {
// Initialize the type cache.
llvm::LLVMContext &LLVMContext = M.getContext();
VoidTy = llvm::Type::getVoidTy(LLVMContext);
Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
Int16Ty = llvm::Type::getInt16Ty(LLVMContext);
Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
HalfTy = llvm::Type::getHalfTy(LLVMContext);
BFloatTy = llvm::Type::getBFloatTy(LLVMContext);
FloatTy = llvm::Type::getFloatTy(LLVMContext);
DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
PointerAlignInBytes =
C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
SizeSizeInBytes =
C.toCharUnitsFromBits(C.getTargetInfo().getMaxPointerWidth()).getQuantity();
IntAlignInBytes =
C.toCharUnitsFromBits(C.getTargetInfo().getIntAlign()).getQuantity();
CharTy =
llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getCharWidth());
IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
IntPtrTy = llvm::IntegerType::get(LLVMContext,
C.getTargetInfo().getMaxPointerWidth());
Int8PtrTy = Int8Ty->getPointerTo(0);
Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
AllocaInt8PtrTy = Int8Ty->getPointerTo(
M.getDataLayout().getAllocaAddrSpace());
ASTAllocaAddressSpace = getTargetCodeGenInfo().getASTAllocaAddressSpace();
RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC();
if (LangOpts.ObjC)
createObjCRuntime();
if (LangOpts.OpenCL)
createOpenCLRuntime();
if (LangOpts.OpenMP)
createOpenMPRuntime();
if (LangOpts.CUDA)
createCUDARuntime();
// Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
if (LangOpts.Sanitize.has(SanitizerKind::Thread) ||
(!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
TBAA.reset(new CodeGenTBAA(Context, TheModule, CodeGenOpts, getLangOpts(),
getCXXABI().getMangleContext()));
// If debug info or coverage generation is enabled, create the CGDebugInfo
// object.
if (CodeGenOpts.getDebugInfo() != codegenoptions::NoDebugInfo ||
CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes)
DebugInfo.reset(new CGDebugInfo(*this));
Block.GlobalUniqueCount = 0;
if (C.getLangOpts().ObjC)
ObjCData.reset(new ObjCEntrypoints());
if (CodeGenOpts.hasProfileClangUse()) {
auto ReaderOrErr = llvm::IndexedInstrProfReader::create(
CodeGenOpts.ProfileInstrumentUsePath, CodeGenOpts.ProfileRemappingFile);
if (auto E = ReaderOrErr.takeError()) {
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"Could not read profile %0: %1");
llvm::handleAllErrors(std::move(E), [&](const llvm::ErrorInfoBase &EI) {
getDiags().Report(DiagID) << CodeGenOpts.ProfileInstrumentUsePath
<< EI.message();
});
} else
PGOReader = std::move(ReaderOrErr.get());
}
// If coverage mapping generation is enabled, create the
// CoverageMappingModuleGen object.
if (CodeGenOpts.CoverageMapping)
CoverageMapping.reset(new CoverageMappingModuleGen(*this, *CoverageInfo));
// Generate the module name hash here if needed.
if (CodeGenOpts.UniqueInternalLinkageNames &&
!getModule().getSourceFileName().empty()) {
std::string Path = getModule().getSourceFileName();
// Check if a path substitution is needed from the MacroPrefixMap.
for (const auto &Entry : PPO.MacroPrefixMap)
if (Path.rfind(Entry.first, 0) != std::string::npos) {
Path = Entry.second + Path.substr(Entry.first.size());
break;
}
llvm::MD5 Md5;
Md5.update(Path);
llvm::MD5::MD5Result R;
Md5.final(R);
SmallString<32> Str;
llvm::MD5::stringifyResult(R, Str);
// Convert MD5hash to Decimal. Demangler suffixes can either contain
// numbers or characters but not both.
llvm::APInt IntHash(128, Str.str(), 16);
// Prepend "__uniq" before the hash for tools like profilers to understand
// that this symbol is of internal linkage type. The "__uniq" is the
// pre-determined prefix that is used to tell tools that this symbol was
// created with -funique-internal-linakge-symbols and the tools can strip or
// keep the prefix as needed.
ModuleNameHash = (Twine(".__uniq.") +
Twine(IntHash.toString(/* Radix = */ 10, /* Signed = */false))).str();
}
}
CodeGenModule::~CodeGenModule() {}
void CodeGenModule::createObjCRuntime() {
// This is just isGNUFamily(), but we want to force implementors of
// new ABIs to decide how best to do this.
switch (LangOpts.ObjCRuntime.getKind()) {
case ObjCRuntime::GNUstep:
case ObjCRuntime::GCC:
case ObjCRuntime::ObjFW:
ObjCRuntime.reset(CreateGNUObjCRuntime(*this));
return;
case ObjCRuntime::FragileMacOSX:
case ObjCRuntime::MacOSX:
case ObjCRuntime::iOS:
case ObjCRuntime::WatchOS:
ObjCRuntime.reset(CreateMacObjCRuntime(*this));
return;
}
llvm_unreachable("bad runtime kind");
}
void CodeGenModule::createOpenCLRuntime() {
OpenCLRuntime.reset(new CGOpenCLRuntime(*this));
}
void CodeGenModule::createOpenMPRuntime() {
// Select a specialized code generation class based on the target, if any.
// If it does not exist use the default implementation.
switch (getTriple().getArch()) {
case llvm::Triple::nvptx:
case llvm::Triple::nvptx64:
assert(getLangOpts().OpenMPIsDevice &&
"OpenMP NVPTX is only prepared to deal with device code.");
OpenMPRuntime.reset(new CGOpenMPRuntimeNVPTX(*this));
break;
case llvm::Triple::amdgcn:
assert(getLangOpts().OpenMPIsDevice &&
"OpenMP AMDGCN is only prepared to deal with device code.");
OpenMPRuntime.reset(new CGOpenMPRuntimeAMDGCN(*this));
break;
default:
if (LangOpts.OpenMPSimd)
OpenMPRuntime.reset(new CGOpenMPSIMDRuntime(*this));
else
OpenMPRuntime.reset(new CGOpenMPRuntime(*this));
break;
}
}
void CodeGenModule::createCUDARuntime() {
CUDARuntime.reset(CreateNVCUDARuntime(*this));
}
void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) {
Replacements[Name] = C;
}
void CodeGenModule::applyReplacements() {
for (auto &I : Replacements) {
StringRef MangledName = I.first();
llvm::Constant *Replacement = I.second;
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (!Entry)
continue;
auto *OldF = cast<llvm::Function>(Entry);
auto *NewF = dyn_cast<llvm::Function>(Replacement);
if (!NewF) {
if (auto *Alias = dyn_cast<llvm::GlobalAlias>(Replacement)) {
NewF = dyn_cast<llvm::Function>(Alias->getAliasee());
} else {
auto *CE = cast<llvm::ConstantExpr>(Replacement);
assert(CE->getOpcode() == llvm::Instruction::BitCast ||
CE->getOpcode() == llvm::Instruction::GetElementPtr);
NewF = dyn_cast<llvm::Function>(CE->getOperand(0));
}
}
// Replace old with new, but keep the old order.
OldF->replaceAllUsesWith(Replacement);
if (NewF) {
NewF->removeFromParent();
OldF->getParent()->getFunctionList().insertAfter(OldF->getIterator(),
NewF);
}
OldF->eraseFromParent();
}
}
void CodeGenModule::addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C) {
GlobalValReplacements.push_back(std::make_pair(GV, C));
}
void CodeGenModule::applyGlobalValReplacements() {
for (auto &I : GlobalValReplacements) {
llvm::GlobalValue *GV = I.first;
llvm::Constant *C = I.second;
GV->replaceAllUsesWith(C);
GV->eraseFromParent();
}
}
// This is only used in aliases that we created and we know they have a
// linear structure.
static const llvm::GlobalObject *getAliasedGlobal(
const llvm::GlobalIndirectSymbol &GIS) {
llvm::SmallPtrSet<const llvm::GlobalIndirectSymbol*, 4> Visited;
const llvm::Constant *C = &GIS;
for (;;) {
C = C->stripPointerCasts();
if (auto *GO = dyn_cast<llvm::GlobalObject>(C))
return GO;
// stripPointerCasts will not walk over weak aliases.
auto *GIS2 = dyn_cast<llvm::GlobalIndirectSymbol>(C);
if (!GIS2)
return nullptr;
if (!Visited.insert(GIS2).second)
return nullptr;
C = GIS2->getIndirectSymbol();
}
}
void CodeGenModule::checkAliases() {
// Check if the constructed aliases are well formed. It is really unfortunate
// that we have to do this in CodeGen, but we only construct mangled names
// and aliases during codegen.
bool Error = false;
DiagnosticsEngine &Diags = getDiags();
for (const GlobalDecl &GD : Aliases) {
const auto *D = cast<ValueDecl>(GD.getDecl());
SourceLocation Location;
bool IsIFunc = D->hasAttr<IFuncAttr>();
if (const Attr *A = D->getDefiningAttr())
Location = A->getLocation();
else
llvm_unreachable("Not an alias or ifunc?");
StringRef MangledName = getMangledName(GD);
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
auto *Alias = cast<llvm::GlobalIndirectSymbol>(Entry);
const llvm::GlobalValue *GV = getAliasedGlobal(*Alias);
if (!GV) {
Error = true;
Diags.Report(Location, diag::err_cyclic_alias) << IsIFunc;
} else if (GV->isDeclaration()) {
Error = true;
Diags.Report(Location, diag::err_alias_to_undefined)
<< IsIFunc << IsIFunc;
} else if (IsIFunc) {
// Check resolver function type.
llvm::FunctionType *FTy = dyn_cast<llvm::FunctionType>(
GV->getType()->getPointerElementType());
assert(FTy);
if (!FTy->getReturnType()->isPointerTy())
Diags.Report(Location, diag::err_ifunc_resolver_return);
}
llvm::Constant *Aliasee = Alias->getIndirectSymbol();
llvm::GlobalValue *AliaseeGV;
if (auto CE = dyn_cast<llvm::ConstantExpr>(Aliasee))
AliaseeGV = cast<llvm::GlobalValue>(CE->getOperand(0));
else
AliaseeGV = cast<llvm::GlobalValue>(Aliasee);
if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
StringRef AliasSection = SA->getName();
if (AliasSection != AliaseeGV->getSection())
Diags.Report(SA->getLocation(), diag::warn_alias_with_section)
<< AliasSection << IsIFunc << IsIFunc;
}
// We have to handle alias to weak aliases in here. LLVM itself disallows
// this since the object semantics would not match the IL one. For
// compatibility with gcc we implement it by just pointing the alias
// to its aliasee's aliasee. We also warn, since the user is probably
// expecting the link to be weak.
if (auto GA = dyn_cast<llvm::GlobalIndirectSymbol>(AliaseeGV)) {
if (GA->isInterposable()) {
Diags.Report(Location, diag::warn_alias_to_weak_alias)
<< GV->getName() << GA->getName() << IsIFunc;
Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
GA->getIndirectSymbol(), Alias->getType());
Alias->setIndirectSymbol(Aliasee);
}
}
}
if (!Error)
return;
for (const GlobalDecl &GD : Aliases) {
StringRef MangledName = getMangledName(GD);
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
auto *Alias = cast<llvm::GlobalIndirectSymbol>(Entry);
Alias->replaceAllUsesWith(llvm::UndefValue::get(Alias->getType()));
Alias->eraseFromParent();
}
}
void CodeGenModule::clear() {
DeferredDeclsToEmit.clear();
if (OpenMPRuntime)
OpenMPRuntime->clear();
}
void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags,
StringRef MainFile) {
if (!hasDiagnostics())
return;
if (VisitedInMainFile > 0 && VisitedInMainFile == MissingInMainFile) {
if (MainFile.empty())
MainFile = "<stdin>";
Diags.Report(diag::warn_profile_data_unprofiled) << MainFile;
} else {
if (Mismatched > 0)
Diags.Report(diag::warn_profile_data_out_of_date) << Visited << Mismatched;
if (Missing > 0)
Diags.Report(diag::warn_profile_data_missing) << Visited << Missing;
}
}
static void setVisibilityFromDLLStorageClass(const clang::LangOptions &LO,
llvm::Module &M) {
if (!LO.VisibilityFromDLLStorageClass)
return;
llvm::GlobalValue::VisibilityTypes DLLExportVisibility =
CodeGenModule::GetLLVMVisibility(LO.getDLLExportVisibility());
llvm::GlobalValue::VisibilityTypes NoDLLStorageClassVisibility =
CodeGenModule::GetLLVMVisibility(LO.getNoDLLStorageClassVisibility());
llvm::GlobalValue::VisibilityTypes ExternDeclDLLImportVisibility =
CodeGenModule::GetLLVMVisibility(LO.getExternDeclDLLImportVisibility());
llvm::GlobalValue::VisibilityTypes ExternDeclNoDLLStorageClassVisibility =
CodeGenModule::GetLLVMVisibility(
LO.getExternDeclNoDLLStorageClassVisibility());
for (llvm::GlobalValue &GV : M.global_values()) {
if (GV.hasAppendingLinkage() || GV.hasLocalLinkage())
continue;
// Reset DSO locality before setting the visibility. This removes
// any effects that visibility options and annotations may have
// had on the DSO locality. Setting the visibility will implicitly set
// appropriate globals to DSO Local; however, this will be pessimistic
// w.r.t. to the normal compiler IRGen.
GV.setDSOLocal(false);
if (GV.isDeclarationForLinker()) {
GV.setVisibility(GV.getDLLStorageClass() ==
llvm::GlobalValue::DLLImportStorageClass
? ExternDeclDLLImportVisibility
: ExternDeclNoDLLStorageClassVisibility);
} else {
GV.setVisibility(GV.getDLLStorageClass() ==
llvm::GlobalValue::DLLExportStorageClass
? DLLExportVisibility
: NoDLLStorageClassVisibility);
}
GV.setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
}
}
void CodeGenModule::Release() {
EmitDeferred();
EmitVTablesOpportunistically();
applyGlobalValReplacements();
applyReplacements();
checkAliases();
emitMultiVersionFunctions();
EmitCXXGlobalInitFunc();
EmitCXXGlobalCleanUpFunc();
registerGlobalDtorsWithAtExit();
EmitCXXThreadLocalInitFunc();
if (ObjCRuntime)
if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
AddGlobalCtor(ObjCInitFunction);
if (Context.getLangOpts().CUDA && CUDARuntime) {
if (llvm::Function *CudaCtorFunction = CUDARuntime->finalizeModule())
AddGlobalCtor(CudaCtorFunction);
}
if (OpenMPRuntime) {
if (llvm::Function *OpenMPRequiresDirectiveRegFun =
OpenMPRuntime->emitRequiresDirectiveRegFun()) {
AddGlobalCtor(OpenMPRequiresDirectiveRegFun, 0);
}
OpenMPRuntime->createOffloadEntriesAndInfoMetadata();
OpenMPRuntime->clear();
}
if (PGOReader) {
getModule().setProfileSummary(
PGOReader->getSummary(/* UseCS */ false).getMD(VMContext),
llvm::ProfileSummary::PSK_Instr);
if (PGOStats.hasDiagnostics())
PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName);
}
EmitCtorList(GlobalCtors, "llvm.global_ctors");
EmitCtorList(GlobalDtors, "llvm.global_dtors");
EmitGlobalAnnotations();
EmitStaticExternCAliases();
EmitDeferredUnusedCoverageMappings();
if (CoverageMapping)
CoverageMapping->emit();
if (CodeGenOpts.SanitizeCfiCrossDso) {
CodeGenFunction(*this).EmitCfiCheckFail();
CodeGenFunction(*this).EmitCfiCheckStub();
}
emitAtAvailableLinkGuard();
if (Context.getTargetInfo().getTriple().isWasm() &&
!Context.getTargetInfo().getTriple().isOSEmscripten()) {
EmitMainVoidAlias();
}
emitLLVMUsed();
if (SanStats)
SanStats->finish();
if (CodeGenOpts.Autolink &&
(Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) {
EmitModuleLinkOptions();
}
// On ELF we pass the dependent library specifiers directly to the linker
// without manipulating them. This is in contrast to other platforms where
// they are mapped to a specific linker option by the compiler. This
// difference is a result of the greater variety of ELF linkers and the fact
// that ELF linkers tend to handle libraries in a more complicated fashion
// than on other platforms. This forces us to defer handling the dependent
// libs to the linker.
//
// CUDA/HIP device and host libraries are different. Currently there is no
// way to differentiate dependent libraries for host or device. Existing
// usage of #pragma comment(lib, *) is intended for host libraries on
// Windows. Therefore emit llvm.dependent-libraries only for host.
if (!ELFDependentLibraries.empty() && !Context.getLangOpts().CUDAIsDevice) {
auto *NMD = getModule().getOrInsertNamedMetadata("llvm.dependent-libraries");
for (auto *MD : ELFDependentLibraries)
NMD->addOperand(MD);
}
// Record mregparm value now so it is visible through rest of codegen.
if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
getModule().addModuleFlag(llvm::Module::Error, "NumRegisterParameters",
CodeGenOpts.NumRegisterParameters);
if (CodeGenOpts.DwarfVersion) {
getModule().addModuleFlag(llvm::Module::Max, "Dwarf Version",
CodeGenOpts.DwarfVersion);
}
if (CodeGenOpts.Dwarf64)
getModule().addModuleFlag(llvm::Module::Max, "DWARF64", 1);
if (Context.getLangOpts().SemanticInterposition)
// Require various optimization to respect semantic interposition.
getModule().setSemanticInterposition(1);
if (CodeGenOpts.EmitCodeView) {
// Indicate that we want CodeView in the metadata.
getModule().addModuleFlag(llvm::Module::Warning, "CodeView", 1);
}
if (CodeGenOpts.CodeViewGHash) {
getModule().addModuleFlag(llvm::Module::Warning, "CodeViewGHash", 1);
}
if (CodeGenOpts.ControlFlowGuard) {
// Function ID tables and checks for Control Flow Guard (cfguard=2).
getModule().addModuleFlag(llvm::Module::Warning, "cfguard", 2);
} else if (CodeGenOpts.ControlFlowGuardNoChecks) {
// Function ID tables for Control Flow Guard (cfguard=1).
getModule().addModuleFlag(llvm::Module::Warning, "cfguard", 1);
}
if (CodeGenOpts.EHContGuard) {
// Function ID tables for EH Continuation Guard.
getModule().addModuleFlag(llvm::Module::Warning, "ehcontguard", 1);
}
if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) {
// We don't support LTO with 2 with different StrictVTablePointers
// FIXME: we could support it by stripping all the information introduced
// by StrictVTablePointers.
getModule().addModuleFlag(llvm::Module::Error, "StrictVTablePointers",1);
llvm::Metadata *Ops[2] = {
llvm::MDString::get(VMContext, "StrictVTablePointers"),
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
llvm::Type::getInt32Ty(VMContext), 1))};
getModule().addModuleFlag(llvm::Module::Require,
"StrictVTablePointersRequirement",
llvm::MDNode::get(VMContext, Ops));
}
if (getModuleDebugInfo())
// We support a single version in the linked module. The LLVM
// parser will drop debug info with a different version number
// (and warn about it, too).
getModule().addModuleFlag(llvm::Module::Warning, "Debug Info Version",
llvm::DEBUG_METADATA_VERSION);
// We need to record the widths of enums and wchar_t, so that we can generate
// the correct build attributes in the ARM backend. wchar_size is also used by
// TargetLibraryInfo.
uint64_t WCharWidth =
Context.getTypeSizeInChars(Context.getWideCharType()).getQuantity();
getModule().addModuleFlag(llvm::Module::Error, "wchar_size", WCharWidth);
llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
if ( Arch == llvm::Triple::arm
|| Arch == llvm::Triple::armeb
|| Arch == llvm::Triple::thumb
|| Arch == llvm::Triple::thumbeb) {
// The minimum width of an enum in bytes
uint64_t EnumWidth = Context.getLangOpts().ShortEnums ? 1 : 4;
getModule().addModuleFlag(llvm::Module::Error, "min_enum_size", EnumWidth);
}
if (Arch == llvm::Triple::riscv32 || Arch == llvm::Triple::riscv64) {
StringRef ABIStr = Target.getABI();
llvm::LLVMContext &Ctx = TheModule.getContext();
getModule().addModuleFlag(llvm::Module::Error, "target-abi",
llvm::MDString::get(Ctx, ABIStr));
}
if (CodeGenOpts.SanitizeCfiCrossDso) {
// Indicate that we want cross-DSO control flow integrity checks.
getModule().addModuleFlag(llvm::Module::Override, "Cross-DSO CFI", 1);
}
if (CodeGenOpts.WholeProgramVTables) {
// Indicate whether VFE was enabled for this module, so that the
// vcall_visibility metadata added under whole program vtables is handled
// appropriately in the optimizer.
getModule().addModuleFlag(llvm::Module::Error, "Virtual Function Elim",
CodeGenOpts.VirtualFunctionElimination);
}
if (LangOpts.Sanitize.has(SanitizerKind::CFIICall)) {
getModule().addModuleFlag(llvm::Module::Override,
"CFI Canonical Jump Tables",
CodeGenOpts.SanitizeCfiCanonicalJumpTables);
}
if (CodeGenOpts.CFProtectionReturn &&
Target.checkCFProtectionReturnSupported(getDiags())) {
// Indicate that we want to instrument return control flow protection.
getModule().addModuleFlag(llvm::Module::Override, "cf-protection-return",
1);
}
if (CodeGenOpts.CFProtectionBranch &&
Target.checkCFProtectionBranchSupported(getDiags())) {
// Indicate that we want to instrument branch control flow protection.
getModule().addModuleFlag(llvm::Module::Override, "cf-protection-branch",
1);
}
if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
Arch == llvm::Triple::aarch64_be) {
getModule().addModuleFlag(llvm::Module::Error,
"branch-target-enforcement",
LangOpts.BranchTargetEnforcement);
getModule().addModuleFlag(llvm::Module::Error, "sign-return-address",
LangOpts.hasSignReturnAddress());
getModule().addModuleFlag(llvm::Module::Error, "sign-return-address-all",
LangOpts.isSignReturnAddressScopeAll());
getModule().addModuleFlag(llvm::Module::Error,
"sign-return-address-with-bkey",
!LangOpts.isSignReturnAddressWithAKey());
}
if (!CodeGenOpts.MemoryProfileOutput.empty()) {
llvm::LLVMContext &Ctx = TheModule.getContext();
getModule().addModuleFlag(
llvm::Module::Error, "MemProfProfileFilename",
llvm::MDString::get(Ctx, CodeGenOpts.MemoryProfileOutput));
}
if (LangOpts.CUDAIsDevice && getTriple().isNVPTX()) {
// Indicate whether __nvvm_reflect should be configured to flush denormal
// floating point values to 0. (This corresponds to its "__CUDA_FTZ"
// property.)
getModule().addModuleFlag(llvm::Module::Override, "nvvm-reflect-ftz",
CodeGenOpts.FP32DenormalMode.Output !=
llvm::DenormalMode::IEEE);
}
// Emit OpenCL specific module metadata: OpenCL/SPIR version.
if (LangOpts.OpenCL) {
EmitOpenCLMetadata();
// Emit SPIR version.
if (getTriple().isSPIR()) {
// SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
// opencl.spir.version named metadata.
// C++ is backwards compatible with OpenCL v2.0.
auto Version = LangOpts.OpenCLCPlusPlus ? 200 : LangOpts.OpenCLVersion;
llvm::Metadata *SPIRVerElts[] = {
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
Int32Ty, Version / 100)),
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
Int32Ty, (Version / 100 > 1) ? 0 : 2))};
llvm::NamedMDNode *SPIRVerMD =
TheModule.getOrInsertNamedMetadata("opencl.spir.version");
llvm::LLVMContext &Ctx = TheModule.getContext();
SPIRVerMD->addOperand(llvm::MDNode::get(Ctx, SPIRVerElts));
}
}
if (uint32_t PLevel = Context.getLangOpts().PICLevel) {
assert(PLevel < 3 && "Invalid PIC Level");
getModule().setPICLevel(static_cast<llvm::PICLevel::Level>(PLevel));
if (Context.getLangOpts().PIE)
getModule().setPIELevel(static_cast<llvm::PIELevel::Level>(PLevel));
}
if (getCodeGenOpts().CodeModel.size() > 0) {
unsigned CM = llvm::StringSwitch<unsigned>(getCodeGenOpts().CodeModel)
.Case("tiny", llvm::CodeModel::Tiny)
.Case("small", llvm::CodeModel::Small)
.Case("kernel", llvm::CodeModel::Kernel)
.Case("medium", llvm::CodeModel::Medium)
.Case("large", llvm::CodeModel::Large)
.Default(~0u);
if (CM != ~0u) {
llvm::CodeModel::Model codeModel = static_cast<llvm::CodeModel::Model>(CM);
getModule().setCodeModel(codeModel);
}
}
if (CodeGenOpts.NoPLT)
getModule().setRtLibUseGOT();
SimplifyPersonality();
if (getCodeGenOpts().EmitDeclMetadata)
EmitDeclMetadata();
if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
EmitCoverageFile();
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->finalize();
if (getCodeGenOpts().EmitVersionIdentMetadata)
EmitVersionIdentMetadata();
if (!getCodeGenOpts().RecordCommandLine.empty())
EmitCommandLineMetadata();
getTargetCodeGenInfo().emitTargetMetadata(*this, MangledDeclNames);
EmitBackendOptionsMetadata(getCodeGenOpts());
// Set visibility from DLL storage class
// We do this at the end of LLVM IR generation; after any operation
// that might affect the DLL storage class or the visibility, and
// before anything that might act on these.
setVisibilityFromDLLStorageClass(LangOpts, getModule());
}
void CodeGenModule::EmitOpenCLMetadata() {
// SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
// opencl.ocl.version named metadata node.
// C++ is backwards compatible with OpenCL v2.0.
// FIXME: We might need to add CXX version at some point too?
auto Version = LangOpts.OpenCLCPlusPlus ? 200 : LangOpts.OpenCLVersion;
llvm::Metadata *OCLVerElts[] = {
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
Int32Ty, Version / 100)),
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
Int32Ty, (Version % 100) / 10))};
llvm::NamedMDNode *OCLVerMD =
TheModule.getOrInsertNamedMetadata("opencl.ocl.version");
llvm::LLVMContext &Ctx = TheModule.getContext();
OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
}
void CodeGenModule::EmitBackendOptionsMetadata(
const CodeGenOptions CodeGenOpts) {
switch (getTriple().getArch()) {
default:
break;
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
getModule().addModuleFlag(llvm::Module::Error, "SmallDataLimit",
CodeGenOpts.SmallDataLimit);
break;
}
}
void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
// Make sure that this type is translated.
Types.UpdateCompletedType(TD);
}
void CodeGenModule::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
// Make sure that this type is translated.
Types.RefreshTypeCacheForClass(RD);
}
llvm::MDNode *CodeGenModule::getTBAATypeInfo(QualType QTy) {
if (!TBAA)
return nullptr;
return TBAA->getTypeInfo(QTy);
}
TBAAAccessInfo CodeGenModule::getTBAAAccessInfo(QualType AccessType) {
if (!TBAA)
return TBAAAccessInfo();
if (getLangOpts().CUDAIsDevice) {
// As CUDA builtin surface/texture types are replaced, skip generating TBAA
// access info.
if (AccessType->isCUDADeviceBuiltinSurfaceType()) {
if (getTargetCodeGenInfo().getCUDADeviceBuiltinSurfaceDeviceType() !=
nullptr)
return TBAAAccessInfo();
} else if (AccessType->isCUDADeviceBuiltinTextureType()) {
if (getTargetCodeGenInfo().getCUDADeviceBuiltinTextureDeviceType() !=
nullptr)
return TBAAAccessInfo();
}
}
return TBAA->getAccessInfo(AccessType);
}
TBAAAccessInfo
CodeGenModule::getTBAAVTablePtrAccessInfo(llvm::Type *VTablePtrType) {
if (!TBAA)
return TBAAAccessInfo();
return TBAA->getVTablePtrAccessInfo(VTablePtrType);
}
llvm::MDNode *CodeGenModule::getTBAAStructInfo(QualType QTy) {
if (!TBAA)
return nullptr;
return TBAA->getTBAAStructInfo(QTy);
}
llvm::MDNode *CodeGenModule::getTBAABaseTypeInfo(QualType QTy) {
if (!TBAA)
return nullptr;
return TBAA->getBaseTypeInfo(QTy);
}
llvm::MDNode *CodeGenModule::getTBAAAccessTagInfo(TBAAAccessInfo Info) {
if (!TBAA)
return nullptr;
return TBAA->getAccessTagInfo(Info);
}
TBAAAccessInfo CodeGenModule::mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo,
TBAAAccessInfo TargetInfo) {
if (!TBAA)
return TBAAAccessInfo();
return TBAA->mergeTBAAInfoForCast(SourceInfo, TargetInfo);
}
TBAAAccessInfo
CodeGenModule::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA,
TBAAAccessInfo InfoB) {
if (!TBAA)
return TBAAAccessInfo();
return TBAA->mergeTBAAInfoForConditionalOperator(InfoA, InfoB);
}
TBAAAccessInfo
CodeGenModule::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo,
TBAAAccessInfo SrcInfo) {
if (!TBAA)
return TBAAAccessInfo();
return TBAA->mergeTBAAInfoForConditionalOperator(DestInfo, SrcInfo);
}
void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst,
TBAAAccessInfo TBAAInfo) {
if (llvm::MDNode *Tag = getTBAAAccessTagInfo(TBAAInfo))
Inst->setMetadata(llvm::LLVMContext::MD_tbaa, Tag);
}
void CodeGenModule::DecorateInstructionWithInvariantGroup(
llvm::Instruction *I, const CXXRecordDecl *RD) {
I->setMetadata(llvm::LLVMContext::MD_invariant_group,
llvm::MDNode::get(getLLVMContext(), {}));
}
void CodeGenModule::Error(SourceLocation loc, StringRef message) {
unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0");
getDiags().Report(Context.getFullLoc(loc), diagID) << message;
}
/// ErrorUnsupported - Print out an error that codegen doesn't support the
/// specified stmt yet.
void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type) {
unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
"cannot compile this %0 yet");
std::string Msg = Type;
getDiags().Report(Context.getFullLoc(S->getBeginLoc()), DiagID)
<< Msg << S->getSourceRange();
}
/// ErrorUnsupported - Print out an error that codegen doesn't support the
/// specified decl yet.
void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type) {
unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
"cannot compile this %0 yet");
std::string Msg = Type;
getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
}
llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
return llvm::ConstantInt::get(SizeTy, size.getQuantity());
}
void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
const NamedDecl *D) const {
if (GV->hasDLLImportStorageClass())
return;
// Internal definitions always have default visibility.
if (GV->hasLocalLinkage()) {
GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
return;
}
if (!D)
return;
// Set visibility for definitions, and for declarations if requested globally
// or set explicitly.
LinkageInfo LV = D->getLinkageAndVisibility();
if (LV.isVisibilityExplicit() || getLangOpts().SetVisibilityForExternDecls ||
!GV->isDeclarationForLinker())
GV->setVisibility(GetLLVMVisibility(LV.getVisibility()));
}
static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
llvm::GlobalValue *GV) {
if (GV->hasLocalLinkage())
return true;
if (!GV->hasDefaultVisibility() && !GV->hasExternalWeakLinkage())
return true;
// DLLImport explicitly marks the GV as external.
if (GV->hasDLLImportStorageClass())
return false;
const llvm::Triple &TT = CGM.getTriple();
if (TT.isWindowsGNUEnvironment()) {
// In MinGW, variables without DLLImport can still be automatically
// imported from a DLL by the linker; don't mark variables that
// potentially could come from another DLL as DSO local.
if (GV->isDeclarationForLinker() && isa<llvm::GlobalVariable>(GV) &&
!GV->isThreadLocal())
return false;
}
// On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols
// remain unresolved in the link, they can be resolved to zero, which is
// outside the current DSO.
if (TT.isOSBinFormatCOFF() && GV->hasExternalWeakLinkage())
return false;
// Every other GV is local on COFF.
// Make an exception for windows OS in the triple: Some firmware builds use
// *-win32-macho triples. This (accidentally?) produced windows relocations
// without GOT tables in older clang versions; Keep this behaviour.
// FIXME: even thread local variables?
if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO()))
return true;
// Only handle COFF and ELF for now.
if (!TT.isOSBinFormatELF())
return false;
// If this is not an executable, don't assume anything is local.
const auto &CGOpts = CGM.getCodeGenOpts();
llvm::Reloc::Model RM = CGOpts.RelocationModel;
const auto &LOpts = CGM.getLangOpts();
if (RM != llvm::Reloc::Static && !LOpts.PIE) {
// On ELF, if -fno-semantic-interposition is specified and the target
// supports local aliases, there will be neither CC1
// -fsemantic-interposition nor -fhalf-no-semantic-interposition. Set
// dso_local if using a local alias is preferable (can avoid GOT
// indirection).
if (!GV->canBenefitFromLocalAlias())
return false;
return !(CGM.getLangOpts().SemanticInterposition ||
CGM.getLangOpts().HalfNoSemanticInterposition);
}
// A definition cannot be preempted from an executable.
if (!GV->isDeclarationForLinker())
return true;
// Most PIC code sequences that assume that a symbol is local cannot produce a
// 0 if it turns out the symbol is undefined. While this is ABI and relocation
// depended, it seems worth it to handle it here.
if (RM == llvm::Reloc::PIC_ && GV->hasExternalWeakLinkage())
return false;
// PowerPC64 prefers TOC indirection to avoid copy relocations.
if (TT.isPPC64())
return false;
if (CGOpts.DirectAccessExternalData) {
// If -fdirect-access-external-data (default for -fno-pic), set dso_local
// for non-thread-local variables. If the symbol is not defined in the
// executable, a copy relocation will be needed at link time. dso_local is
// excluded for thread-local variables because they generally don't support
// copy relocations.
if (auto *Var = dyn_cast<llvm::GlobalVariable>(GV))
if (!Var->isThreadLocal())
return true;
// -fno-pic sets dso_local on a function declaration to allow direct
// accesses when taking its address (similar to a data symbol). If the
// function is not defined in the executable, a canonical PLT entry will be
// needed at link time. -fno-direct-access-external-data can avoid the
// canonical PLT entry. We don't generalize this condition to -fpie/-fpic as
// it could just cause trouble without providing perceptible benefits.
if (isa<llvm::Function>(GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static)
return true;
}
// If we can use copy relocations we can assume it is local.
// Otherwise don't assume it is local.
return false;
}
void CodeGenModule::setDSOLocal(llvm::GlobalValue *GV) const {
GV->setDSOLocal(shouldAssumeDSOLocal(*this, GV));
}
void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
GlobalDecl GD) const {
const auto *D = dyn_cast<NamedDecl>(GD.getDecl());
// C++ destructors have a few C++ ABI specific special cases.
if (const auto *Dtor = dyn_cast_or_null<CXXDestructorDecl>(D)) {
getCXXABI().setCXXDestructorDLLStorage(GV, Dtor, GD.getDtorType());
return;
}
setDLLImportDLLExport(GV, D);
}
void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
const NamedDecl *D) const {
if (D && D->isExternallyVisible()) {
if (D->hasAttr<DLLImportAttr>())
GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
else if (D->hasAttr<DLLExportAttr>() && !GV->isDeclarationForLinker())
GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
}
}
void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
GlobalDecl GD) const {
setDLLImportDLLExport(GV, GD);
setGVPropertiesAux(GV, dyn_cast<NamedDecl>(GD.getDecl()));
}
void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
const NamedDecl *D) const {
setDLLImportDLLExport(GV, D);
setGVPropertiesAux(GV, D);
}
void CodeGenModule::setGVPropertiesAux(llvm::GlobalValue *GV,
const NamedDecl *D) const {
setGlobalVisibility(GV, D);
setDSOLocal(GV);
GV->setPartition(CodeGenOpts.SymbolPartition);
}
static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S)
.Case("global-dynamic", llvm::GlobalVariable::GeneralDynamicTLSModel)
.Case("local-dynamic", llvm::GlobalVariable::LocalDynamicTLSModel)
.Case("initial-exec", llvm::GlobalVariable::InitialExecTLSModel)
.Case("local-exec", llvm::GlobalVariable::LocalExecTLSModel);
}
llvm::GlobalVariable::ThreadLocalMode
CodeGenModule::GetDefaultLLVMTLSModel() const {
switch (CodeGenOpts.getDefaultTLSModel()) {
case CodeGenOptions::GeneralDynamicTLSModel:
return llvm::GlobalVariable::GeneralDynamicTLSModel;
case CodeGenOptions::LocalDynamicTLSModel:
return llvm::GlobalVariable::LocalDynamicTLSModel;
case CodeGenOptions::InitialExecTLSModel:
return llvm::GlobalVariable::InitialExecTLSModel;
case CodeGenOptions::LocalExecTLSModel:
return llvm::GlobalVariable::LocalExecTLSModel;
}
llvm_unreachable("Invalid TLS model!");
}
void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const {
assert(D.getTLSKind() && "setting TLS mode on non-TLS var!");
llvm::GlobalValue::ThreadLocalMode TLM;
TLM = GetDefaultLLVMTLSModel();
// Override the TLS model if it is explicitly specified.
if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) {
TLM = GetLLVMTLSModel(Attr->getModel());
}
GV->setThreadLocalMode(TLM);
}
static std::string getCPUSpecificMangling(const CodeGenModule &CGM,
StringRef Name) {
const TargetInfo &Target = CGM.getTarget();
return (Twine('.') + Twine(Target.CPUSpecificManglingCharacter(Name))).str();
}
static void AppendCPUSpecificCPUDispatchMangling(const CodeGenModule &CGM,
const CPUSpecificAttr *Attr,
unsigned CPUIndex,
raw_ostream &Out) {
// cpu_specific gets the current name, dispatch gets the resolver if IFunc is
// supported.
if (Attr)
Out << getCPUSpecificMangling(CGM, Attr->getCPUName(CPUIndex)->getName());
else if (CGM.getTarget().supportsIFunc())
Out << ".resolver";
}
static void AppendTargetMangling(const CodeGenModule &CGM,
const TargetAttr *Attr, raw_ostream &Out) {
if (Attr->isDefaultVersion())
return;
Out << '.';
const TargetInfo &Target = CGM.getTarget();
ParsedTargetAttr Info =
Attr->parse([&Target](StringRef LHS, StringRef RHS) {
// Multiversioning doesn't allow "no-${feature}", so we can
// only have "+" prefixes here.
assert(LHS.startswith("+") && RHS.startswith("+") &&
"Features should always have a prefix.");
return Target.multiVersionSortPriority(LHS.substr(1)) >
Target.multiVersionSortPriority(RHS.substr(1));
});
bool IsFirst = true;
if (!Info.Architecture.empty()) {
IsFirst = false;
Out << "arch_" << Info.Architecture;
}
for (StringRef Feat : Info.Features) {
if (!IsFirst)
Out << '_';
IsFirst = false;
Out << Feat.substr(1);
}
}
// Returns true if GD is a function decl with internal linkage and
// needs a unique suffix after the mangled name.
static bool isUniqueInternalLinkageDecl(GlobalDecl GD,
CodeGenModule &CGM) {
const Decl *D = GD.getDecl();
return !CGM.getModuleNameHash().empty() && isa<FunctionDecl>(D) &&
(CGM.getFunctionLinkage(GD) == llvm::GlobalValue::InternalLinkage);
}
static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
const NamedDecl *ND,
bool OmitMultiVersionMangling = false) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
MangleContext &MC = CGM.getCXXABI().getMangleContext();
if (!CGM.getModuleNameHash().empty())
MC.needsUniqueInternalLinkageNames();
bool ShouldMangle = MC.shouldMangleDeclName(ND);
if (ShouldMangle)
MC.mangleName(GD.getWithDecl(ND), Out);
else {
IdentifierInfo *II = ND->getIdentifier();
assert(II && "Attempt to mangle unnamed decl.");
const auto *FD = dyn_cast<FunctionDecl>(ND);
if (FD &&
FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
Out << "__regcall3__" << II->getName();
} else if (FD && FD->hasAttr<CUDAGlobalAttr>() &&
GD.getKernelReferenceKind() == KernelReferenceKind::Stub) {
Out << "__device_stub__" << II->getName();
} else {
Out << II->getName();
}
}
// Check if the module name hash should be appended for internal linkage
// symbols. This should come before multi-version target suffixes are
// appended. This is to keep the name and module hash suffix of the
// internal linkage function together. The unique suffix should only be
// added when name mangling is done to make sure that the final name can
// be properly demangled. For example, for C functions without prototypes,
// name mangling is not done and the unique suffix should not be appeneded
// then.
if (ShouldMangle && isUniqueInternalLinkageDecl(GD, CGM)) {
assert(CGM.getCodeGenOpts().UniqueInternalLinkageNames &&
"Hash computed when not explicitly requested");
Out << CGM.getModuleNameHash();
}
if (const auto *FD = dyn_cast<FunctionDecl>(ND))
if (FD->isMultiVersion() && !OmitMultiVersionMangling) {
switch (FD->getMultiVersionKind()) {
case MultiVersionKind::CPUDispatch:
case MultiVersionKind::CPUSpecific:
AppendCPUSpecificCPUDispatchMangling(CGM,
FD->getAttr<CPUSpecificAttr>(),
GD.getMultiVersionIndex(), Out);
break;
case MultiVersionKind::Target:
AppendTargetMangling(CGM, FD->getAttr<TargetAttr>(), Out);
break;
case MultiVersionKind::None:
llvm_unreachable("None multiversion type isn't valid here");
}
}
// Make unique name for device side static file-scope variable for HIP.
if (CGM.getContext().shouldExternalizeStaticVar(ND) &&
CGM.getLangOpts().GPURelocatableDeviceCode &&
CGM.getLangOpts().CUDAIsDevice && !CGM.getLangOpts().CUID.empty())
CGM.printPostfixForExternalizedStaticVar(Out);
return std::string(Out.str());
}
void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
const FunctionDecl *FD) {
if (!FD->isMultiVersion())
return;
// Get the name of what this would be without the 'target' attribute. This
// allows us to lookup the version that was emitted when this wasn't a
// multiversion function.
std::string NonTargetName =
getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
GlobalDecl OtherGD;
if (lookupRepresentativeDecl(NonTargetName, OtherGD)) {
assert(OtherGD.getCanonicalDecl()
.getDecl()
->getAsFunction()
->isMultiVersion() &&
"Other GD should now be a multiversioned function");
// OtherFD is the version of this function that was mangled BEFORE
// becoming a MultiVersion function. It potentially needs to be updated.
const FunctionDecl *OtherFD = OtherGD.getCanonicalDecl()
.getDecl()
->getAsFunction()
->getMostRecentDecl();
std::string OtherName = getMangledNameImpl(*this, OtherGD, OtherFD);
// This is so that if the initial version was already the 'default'
// version, we don't try to update it.
if (OtherName != NonTargetName) {
// Remove instead of erase, since others may have stored the StringRef
// to this.
const auto ExistingRecord = Manglings.find(NonTargetName);
if (ExistingRecord != std::end(Manglings))
Manglings.remove(&(*ExistingRecord));
auto Result = Manglings.insert(std::make_pair(OtherName, OtherGD));
MangledDeclNames[OtherGD.getCanonicalDecl()] = Result.first->first();
if (llvm::GlobalValue *Entry = GetGlobalValue(NonTargetName))
Entry->setName(OtherName);
}
}
}
StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
GlobalDecl CanonicalGD = GD.getCanonicalDecl();
// Some ABIs don't have constructor variants. Make sure that base and
// complete constructors get mangled the same.
if (const auto *CD = dyn_cast<CXXConstructorDecl>(CanonicalGD.getDecl())) {
if (!getTarget().getCXXABI().hasConstructorVariants()) {
CXXCtorType OrigCtorType = GD.getCtorType();
assert(OrigCtorType == Ctor_Base || OrigCtorType == Ctor_Complete);
if (OrigCtorType == Ctor_Base)
CanonicalGD = GlobalDecl(CD, Ctor_Complete);
}
}
// In CUDA/HIP device compilation with -fgpu-rdc, the mangled name of a
// static device variable depends on whether the variable is referenced by
// a host or device host function. Therefore the mangled name cannot be
// cached.
if (!LangOpts.CUDAIsDevice ||
!getContext().mayExternalizeStaticVar(GD.getDecl())) {
auto FoundName = MangledDeclNames.find(CanonicalGD);
if (FoundName != MangledDeclNames.end())
return FoundName->second;
}
// Keep the first result in the case of a mangling collision.
const auto *ND = cast<NamedDecl>(GD.getDecl());
std::string MangledName = getMangledNameImpl(*this, GD, ND);
// Ensure either we have different ABIs between host and device compilations,
// says host compilation following MSVC ABI but device compilation follows
// Itanium C++ ABI or, if they follow the same ABI, kernel names after
// mangling should be the same after name stubbing. The later checking is
// very important as the device kernel name being mangled in host-compilation
// is used to resolve the device binaries to be executed. Inconsistent naming
// result in undefined behavior. Even though we cannot check that naming
// directly between host- and device-compilations, the host- and
// device-mangling in host compilation could help catching certain ones.
assert(!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() ||
getLangOpts().CUDAIsDevice ||
(getContext().getAuxTargetInfo() &&
(getContext().getAuxTargetInfo()->getCXXABI() !=
getContext().getTargetInfo().getCXXABI())) ||
getCUDARuntime().getDeviceSideName(ND) ==
getMangledNameImpl(
*this,
GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel),
ND));
auto Result = Manglings.insert(std::make_pair(MangledName, GD));
return MangledDeclNames[CanonicalGD] = Result.first->first();
}
StringRef CodeGenModule::getBlockMangledName(GlobalDecl GD,
const BlockDecl *BD) {
MangleContext &MangleCtx = getCXXABI().getMangleContext();
const Decl *D = GD.getDecl();
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
if (!D)
MangleCtx.mangleGlobalBlock(BD,
dyn_cast_or_null<VarDecl>(initializedGlobalDecl.getDecl()), Out);
else if (const auto *CD = dyn_cast<CXXConstructorDecl>(D))
MangleCtx.mangleCtorBlock(CD, GD.getCtorType(), BD, Out);
else if (const auto *DD = dyn_cast<CXXDestructorDecl>(D))
MangleCtx.mangleDtorBlock(DD, GD.getDtorType(), BD, Out);
else
MangleCtx.mangleBlock(cast<DeclContext>(D), BD, Out);
auto Result = Manglings.insert(std::make_pair(Out.str(), BD));
return Result.first->first();
}
llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
return getModule().getNamedValue(Name);
}
/// AddGlobalCtor - Add a function to the list that will be called before
/// main() runs.
void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority,
llvm::Constant *AssociatedData) {
// FIXME: Type coercion of void()* types.
GlobalCtors.push_back(Structor(Priority, Ctor, AssociatedData));
}
/// AddGlobalDtor - Add a function to the list that will be called
/// when the module is unloaded.
void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority,
bool IsDtorAttrFunc) {
if (CodeGenOpts.RegisterGlobalDtorsWithAtExit &&
(!getContext().getTargetInfo().getTriple().isOSAIX() || IsDtorAttrFunc)) {
DtorsUsingAtExit[Priority].push_back(Dtor);
return;
}
// FIXME: Type coercion of void()* types.
GlobalDtors.push_back(Structor(Priority, Dtor, nullptr));
}
void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) {
if (Fns.empty()) return;
// Ctor function type is void()*.
llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false);
llvm::Type *CtorPFTy = llvm::PointerType::get(CtorFTy,
TheModule.getDataLayout().getProgramAddressSpace());
// Get the type of a ctor entry, { i32, void ()*, i8* }.
llvm::StructType *CtorStructTy = llvm::StructType::get(
Int32Ty, CtorPFTy, VoidPtrTy);
// Construct the constructor and destructor arrays.
ConstantInitBuilder builder(*this);
auto ctors = builder.beginArray(CtorStructTy);
for (const auto &I : Fns) {
auto ctor = ctors.beginStruct(CtorStructTy);
ctor.addInt(Int32Ty, I.Priority);
ctor.add(llvm::ConstantExpr::getBitCast(I.Initializer, CtorPFTy));
if (I.AssociatedData)
ctor.add(llvm::ConstantExpr::getBitCast(I.AssociatedData, VoidPtrTy));
else
ctor.addNullPointer(VoidPtrTy);
ctor.finishAndAddTo(ctors);
}
auto list =
ctors.finishAndCreateGlobal(GlobalName, getPointerAlign(),
/*constant*/ false,
llvm::GlobalValue::AppendingLinkage);
// The LTO linker doesn't seem to like it when we set an alignment
// on appending variables. Take it off as a workaround.
list->setAlignment(llvm::None);
Fns.clear();
}
llvm::GlobalValue::LinkageTypes
CodeGenModule::getFunctionLinkage(GlobalDecl GD) {
const auto *D = cast<FunctionDecl>(GD.getDecl());
GVALinkage Linkage = getContext().GetGVALinkageForFunction(D);
if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(D))
return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, GD.getDtorType());
if (isa<CXXConstructorDecl>(D) &&
cast<CXXConstructorDecl>(D)->isInheritingConstructor() &&
Context.getTargetInfo().getCXXABI().isMicrosoft()) {
// Our approach to inheriting constructors is fundamentally different from
// that used by the MS ABI, so keep our inheriting constructor thunks
// internal rather than trying to pick an unambiguous mangling for them.
return llvm::GlobalValue::InternalLinkage;
}
return getLLVMLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false);
}
llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
llvm::MDString *MDS = dyn_cast<llvm::MDString>(MD);
if (!MDS) return nullptr;
return llvm::ConstantInt::get(Int64Ty, llvm::MD5Hash(MDS->getString()));
}
void CodeGenModule::SetLLVMFunctionAttributes(GlobalDecl GD,
const CGFunctionInfo &Info,
llvm::Function *F) {
unsigned CallingConv;
llvm::AttributeList PAL;
ConstructAttributeList(F->getName(), Info, GD, PAL, CallingConv, false);
F->setAttributes(PAL);
F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
}
static void removeImageAccessQualifier(std::string& TyName) {
std::string ReadOnlyQual("__read_only");
std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual);
if (ReadOnlyPos != std::string::npos)
// "+ 1" for the space after access qualifier.
TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1);
else {
std::string WriteOnlyQual("__write_only");
std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual);
if (WriteOnlyPos != std::string::npos)
TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1);
else {
std::string ReadWriteQual("__read_write");
std::string::size_type ReadWritePos = TyName.find(ReadWriteQual);
if (ReadWritePos != std::string::npos)
TyName.erase(ReadWritePos, ReadWriteQual.size() + 1);
}
}
}
// Returns the address space id that should be produced to the
// kernel_arg_addr_space metadata. This is always fixed to the ids
// as specified in the SPIR 2.0 specification in order to differentiate
// for example in clGetKernelArgInfo() implementation between the address
// spaces with targets without unique mapping to the OpenCL address spaces
// (basically all single AS CPUs).
static unsigned ArgInfoAddressSpace(LangAS AS) {
switch (AS) {
case LangAS::opencl_global:
return 1;
case LangAS::opencl_constant:
return 2;
case LangAS::opencl_local:
return 3;
case LangAS::opencl_generic:
return 4; // Not in SPIR 2.0 specs.
case LangAS::opencl_global_device:
return 5;
case LangAS::opencl_global_host:
return 6;
default:
return 0; // Assume private.
}
}
void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
const FunctionDecl *FD,
CodeGenFunction *CGF) {
assert(((FD && CGF) || (!FD && !CGF)) &&
"Incorrect use - FD and CGF should either be both null or not!");
// Create MDNodes that represent the kernel arg metadata.
// Each MDNode is a list in the form of "key", N number of values which is
// the same number of values as their are kernel arguments.
const PrintingPolicy &Policy = Context.getPrintingPolicy();
// MDNode for the kernel argument address space qualifiers.
SmallVector<llvm::Metadata *, 8> addressQuals;
// MDNode for the kernel argument access qualifiers (images only).
SmallVector<llvm::Metadata *, 8> accessQuals;
// MDNode for the kernel argument type names.
SmallVector<llvm::Metadata *, 8> argTypeNames;
// MDNode for the kernel argument base type names.
SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
// MDNode for the kernel argument type qualifiers.
SmallVector<llvm::Metadata *, 8> argTypeQuals;
// MDNode for the kernel argument names.
SmallVector<llvm::Metadata *, 8> argNames;
if (FD && CGF)
for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
const ParmVarDecl *parm = FD->getParamDecl(i);
QualType ty = parm->getType();
std::string typeQuals;
// Get image and pipe access qualifier:
if (ty->isImageType() || ty->isPipeType()) {
const Decl *PDecl = parm;
if (auto *TD = dyn_cast<TypedefType>(ty))
PDecl = TD->getDecl();
const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
if (A && A->isWriteOnly())
accessQuals.push_back(llvm::MDString::get(VMContext, "write_only"));
else if (A && A->isReadWrite())
accessQuals.push_back(llvm::MDString::get(VMContext, "read_write"));
else
accessQuals.push_back(llvm::MDString::get(VMContext, "read_only"));
} else
accessQuals.push_back(llvm::MDString::get(VMContext, "none"));
// Get argument name.
argNames.push_back(llvm::MDString::get(VMContext, parm->getName()));
auto getTypeSpelling = [&](QualType Ty) {
auto typeName = Ty.getUnqualifiedType().getAsString(Policy);
if (Ty.isCanonical()) {
StringRef typeNameRef = typeName;
// Turn "unsigned type" to "utype"
if (typeNameRef.consume_front("unsigned "))
return std::string("u") + typeNameRef.str();
if (typeNameRef.consume_front("signed "))
return typeNameRef.str();
}
return typeName;
};
if (ty->isPointerType()) {
QualType pointeeTy = ty->getPointeeType();
// Get address qualifier.
addressQuals.push_back(
llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(
ArgInfoAddressSpace(pointeeTy.getAddressSpace()))));
// Get argument type name.
std::string typeName = getTypeSpelling(pointeeTy) + "*";
std::string baseTypeName =
getTypeSpelling(pointeeTy.getCanonicalType()) + "*";
argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
argBaseTypeNames.push_back(
llvm::MDString::get(VMContext, baseTypeName));
// Get argument type qualifiers:
if (ty.isRestrictQualified())
typeQuals = "restrict";
if (pointeeTy.isConstQualified() ||
(pointeeTy.getAddressSpace() == LangAS::opencl_constant))
typeQuals += typeQuals.empty() ? "const" : " const";
if (pointeeTy.isVolatileQualified())
typeQuals += typeQuals.empty() ? "volatile" : " volatile";
} else {
uint32_t AddrSpc = 0;
bool isPipe = ty->isPipeType();
if (ty->isImageType() || isPipe)
AddrSpc = ArgInfoAddressSpace(LangAS::opencl_global);
addressQuals.push_back(
llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(AddrSpc)));
// Get argument type name.
ty = isPipe ? ty->castAs<PipeType>()->getElementType() : ty;
std::string typeName = getTypeSpelling(ty);
std::string baseTypeName = getTypeSpelling(ty.getCanonicalType());
// Remove access qualifiers on images
// (as they are inseparable from type in clang implementation,
// but OpenCL spec provides a special query to get access qualifier
// via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER):
if (ty->isImageType()) {
removeImageAccessQualifier(typeName);
removeImageAccessQualifier(baseTypeName);
}
argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
argBaseTypeNames.push_back(
llvm::MDString::get(VMContext, baseTypeName));
if (isPipe)
typeQuals = "pipe";
}
argTypeQuals.push_back(llvm::MDString::get(VMContext, typeQuals));
}
Fn->setMetadata("kernel_arg_addr_space",
llvm::MDNode::get(VMContext, addressQuals));
Fn->setMetadata("kernel_arg_access_qual",
llvm::MDNode::get(VMContext, accessQuals));
Fn->setMetadata("kernel_arg_type",
llvm::MDNode::get(VMContext, argTypeNames));
Fn->setMetadata("kernel_arg_base_type",
llvm::MDNode::get(VMContext, argBaseTypeNames));
Fn->setMetadata("kernel_arg_type_qual",
llvm::MDNode::get(VMContext, argTypeQuals));
if (getCodeGenOpts().EmitOpenCLArgMetadata)
Fn->setMetadata("kernel_arg_name",
llvm::MDNode::get(VMContext, argNames));
}
/// Determines whether the language options require us to model
/// unwind exceptions. We treat -fexceptions as mandating this
/// except under the fragile ObjC ABI with only ObjC exceptions
/// enabled. This means, for example, that C with -fexceptions
/// enables this.
static bool hasUnwindExceptions(const LangOptions &LangOpts) {
// If exceptions are completely disabled, obviously this is false.
if (!LangOpts.Exceptions) return false;
// If C++ exceptions are enabled, this is true.
if (LangOpts.CXXExceptions) return true;
// If ObjC exceptions are enabled, this depends on the ABI.
if (LangOpts.ObjCExceptions) {
return LangOpts.ObjCRuntime.hasUnwindExceptions();
}
return true;
}
static bool requiresMemberFunctionPointerTypeMetadata(CodeGenModule &CGM,
const CXXMethodDecl *MD) {
// Check that the type metadata can ever actually be used by a call.
if (!CGM.getCodeGenOpts().LTOUnit ||
!CGM.HasHiddenLTOVisibility(MD->getParent()))
return false;
// Only functions whose address can be taken with a member function pointer
// need this sort of type metadata.
return !MD->isStatic() && !MD->isVirtual() && !isa<CXXConstructorDecl>(MD) &&
!isa<CXXDestructorDecl>(MD);
}
std::vector<const CXXRecordDecl *>
CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) {
llvm::SetVector<const CXXRecordDecl *> MostBases;
std::function<void (const CXXRecordDecl *)> CollectMostBases;
CollectMostBases = [&](const CXXRecordDecl *RD) {
if (RD->getNumBases() == 0)
MostBases.insert(RD);
for (const CXXBaseSpecifier &B : RD->bases())
CollectMostBases(B.getType()->getAsCXXRecordDecl());
};
CollectMostBases(RD);
return MostBases.takeVector();
}
void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
llvm::Function *F) {
llvm::AttrBuilder B;
if (CodeGenOpts.UnwindTables)
B.addAttribute(llvm::Attribute::UWTable);
if (CodeGenOpts.StackClashProtector)
B.addAttribute("probe-stack", "inline-asm");
if (!hasUnwindExceptions(LangOpts))
B.addAttribute(llvm::Attribute::NoUnwind);
if (!D || !D->hasAttr<NoStackProtectorAttr>()) {
if (LangOpts.getStackProtector() == LangOptions::SSPOn)
B.addAttribute(llvm::Attribute::StackProtect);
else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
B.addAttribute(llvm::Attribute::StackProtectStrong);
else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
B.addAttribute(llvm::Attribute::StackProtectReq);
}
if (!D) {
// If we don't have a declaration to control inlining, the function isn't
// explicitly marked as alwaysinline for semantic reasons, and inlining is
// disabled, mark the function as noinline.
if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline) &&
CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining)
B.addAttribute(llvm::Attribute::NoInline);
F->addAttributes(llvm::AttributeList::FunctionIndex, B);
return;
}
// Track whether we need to add the optnone LLVM attribute,
// starting with the default for this optimization level.
bool ShouldAddOptNone =
!CodeGenOpts.DisableO0ImplyOptNone && CodeGenOpts.OptimizationLevel == 0;
// We can't add optnone in the following cases, it won't pass the verifier.
ShouldAddOptNone &= !D->hasAttr<MinSizeAttr>();
ShouldAddOptNone &= !D->hasAttr<AlwaysInlineAttr>();
// Add optnone, but do so only if the function isn't always_inline.
if ((ShouldAddOptNone || D->hasAttr<OptimizeNoneAttr>()) &&
!F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
B.addAttribute(llvm::Attribute::OptimizeNone);
// OptimizeNone implies noinline; we should not be inlining such functions.
B.addAttribute(llvm::Attribute::NoInline);
// We still need to handle naked functions even though optnone subsumes
// much of their semantics.
if (D->hasAttr<NakedAttr>())
B.addAttribute(llvm::Attribute::Naked);
// OptimizeNone wins over OptimizeForSize and MinSize.
F->removeFnAttr(llvm::Attribute::OptimizeForSize);
F->removeFnAttr(llvm::Attribute::MinSize);
} else if (D->hasAttr<NakedAttr>()) {
// Naked implies noinline: we should not be inlining such functions.
B.addAttribute(llvm::Attribute::Naked);
B.addAttribute(llvm::Attribute::NoInline);
} else if (D->hasAttr<NoDuplicateAttr>()) {
B.addAttribute(llvm::Attribute::NoDuplicate);
} else if (D->hasAttr<NoInlineAttr>() && !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
// Add noinline if the function isn't always_inline.
B.addAttribute(llvm::Attribute::NoInline);
} else if (D->hasAttr<AlwaysInlineAttr>() &&
!F->hasFnAttribute(llvm::Attribute::NoInline)) {
// (noinline wins over always_inline, and we can't specify both in IR)
B.addAttribute(llvm::Attribute::AlwaysInline);
} else if (CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) {
// If we're not inlining, then force everything that isn't always_inline to
// carry an explicit noinline attribute.
if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline))
B.addAttribute(llvm::Attribute::NoInline);
} else {
// Otherwise, propagate the inline hint attribute and potentially use its
// absence to mark things as noinline.
if (auto *FD = dyn_cast<FunctionDecl>(D)) {
// Search function and template pattern redeclarations for inline.
auto CheckForInline = [](const FunctionDecl *FD) {
auto CheckRedeclForInline = [](const FunctionDecl *Redecl) {
return Redecl->isInlineSpecified();
};
if (any_of(FD->redecls(), CheckRedeclForInline))
return true;
const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern();
if (!Pattern)
return false;
return any_of(Pattern->redecls(), CheckRedeclForInline);
};
if (CheckForInline(FD)) {
B.addAttribute(llvm::Attribute::InlineHint);
} else if (CodeGenOpts.getInlining() ==
CodeGenOptions::OnlyHintInlining &&
!FD->isInlined() &&
!F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
B.addAttribute(llvm::Attribute::NoInline);
}
}
}
// Add other optimization related attributes if we are optimizing this
// function.
if (!D->hasAttr<OptimizeNoneAttr>()) {
if (D->hasAttr<ColdAttr>()) {
if (!ShouldAddOptNone)
B.addAttribute(llvm::Attribute::OptimizeForSize);
B.addAttribute(llvm::Attribute::Cold);
}
if (D->hasAttr<HotAttr>())
B.addAttribute(llvm::Attribute::Hot);
if (D->hasAttr<MinSizeAttr>())
B.addAttribute(llvm::Attribute::MinSize);
}
F->addAttributes(llvm::AttributeList::FunctionIndex, B);
unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
if (alignment)
F->setAlignment(llvm::Align(alignment));
if (!D->hasAttr<AlignedAttr>())
if (LangOpts.FunctionAlignment)
F->setAlignment(llvm::Align(1ull << LangOpts.FunctionAlignment));
// Some C++ ABIs require 2-byte alignment for member functions, in order to
// reserve a bit for differentiating between virtual and non-virtual member
// functions. If the current target's C++ ABI requires this and this is a
// member function, set its alignment accordingly.
if (getTarget().getCXXABI().areMemberFunctionsAligned()) {
if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
F->setAlignment(llvm::Align(2));
}
// In the cross-dso CFI mode with canonical jump tables, we want !type
// attributes on definitions only.
if (CodeGenOpts.SanitizeCfiCrossDso &&
CodeGenOpts.SanitizeCfiCanonicalJumpTables) {
if (auto *FD = dyn_cast<FunctionDecl>(D)) {
// Skip available_externally functions. They won't be codegen'ed in the
// current module anyway.
if (getContext().GetGVALinkageForFunction(FD) != GVA_AvailableExternally)
CreateFunctionTypeMetadataForIcall(FD, F);
}
}
// Emit type metadata on member functions for member function pointer checks.
// These are only ever necessary on definitions; we're guaranteed that the
// definition will be present in the LTO unit as a result of LTO visibility.
auto *MD = dyn_cast<CXXMethodDecl>(D);
if (MD && requiresMemberFunctionPointerTypeMetadata(*this, MD)) {
for (const CXXRecordDecl *Base : getMostBaseClasses(MD->getParent())) {
llvm::Metadata *Id =
CreateMetadataIdentifierForType(Context.getMemberPointerType(
MD->getType(), Context.getRecordType(Base).getTypePtr()));
F->addTypeMetadata(0, Id);
}
}
}
void CodeGenModule::setLLVMFunctionFEnvAttributes(const FunctionDecl *D,
llvm::Function *F) {
if (D->hasAttr<StrictFPAttr>()) {
llvm::AttrBuilder FuncAttrs;
FuncAttrs.addAttribute("strictfp");
F->addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
}
}
void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
const Decl *D = GD.getDecl();
if (dyn_cast_or_null<NamedDecl>(D))
setGVProperties(GV, GD);
else
GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
if (D && D->hasAttr<UsedAttr>())
addUsedOrCompilerUsedGlobal(GV);
if (CodeGenOpts.KeepStaticConsts && D && isa<VarDecl>(D)) {
const auto *VD = cast<VarDecl>(D);
if (VD->getType().isConstQualified() &&
VD->getStorageDuration() == SD_Static)
addUsedOrCompilerUsedGlobal(GV);
}
}
bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
llvm::AttrBuilder &Attrs) {
// Add target-cpu and target-features attributes to functions. If
// we have a decl for the function and it has a target attribute then
// parse that and add it to the feature set.
StringRef TargetCPU = getTarget().getTargetOpts().CPU;
StringRef TuneCPU = getTarget().getTargetOpts().TuneCPU;
std::vector<std::string> Features;
const auto *FD = dyn_cast_or_null<FunctionDecl>(GD.getDecl());
FD = FD ? FD->getMostRecentDecl() : FD;
const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr;
const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr;
bool AddedAttr = false;
if (TD || SD) {
llvm::StringMap<bool> FeatureMap;
getContext().getFunctionFeatureMap(FeatureMap, GD);
// Produce the canonical string for this set of features.
for (const llvm::StringMap<bool>::value_type &Entry : FeatureMap)
Features.push_back((Entry.getValue() ? "+" : "-") + Entry.getKey().str());
// Now add the target-cpu and target-features to the function.
// While we populated the feature map above, we still need to
// get and parse the target attribute so we can get the cpu for
// the function.
if (TD) {
ParsedTargetAttr ParsedAttr = TD->parse();
if (!ParsedAttr.Architecture.empty() &&
getTarget().isValidCPUName(ParsedAttr.Architecture)) {
TargetCPU = ParsedAttr.Architecture;
TuneCPU = ""; // Clear the tune CPU.
}
if (!ParsedAttr.Tune.empty() &&
getTarget().isValidCPUName(ParsedAttr.Tune))
TuneCPU = ParsedAttr.Tune;
}
} else {
// Otherwise just add the existing target cpu and target features to the
// function.
Features = getTarget().getTargetOpts().Features;
}
if (!TargetCPU.empty()) {
Attrs.addAttribute("target-cpu", TargetCPU);
AddedAttr = true;
}
if (!TuneCPU.empty()) {
Attrs.addAttribute("tune-cpu", TuneCPU);
AddedAttr = true;
}
if (!Features.empty()) {
llvm::sort(Features);
Attrs.addAttribute("target-features", llvm::join(Features, ","));
AddedAttr = true;
}
return AddedAttr;
}
void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
llvm::GlobalObject *GO) {
const Decl *D = GD.getDecl();
SetCommonAttributes(GD, GO);
if (D) {
if (auto *GV = dyn_cast<llvm::GlobalVariable>(GO)) {
if (D->hasAttr<RetainAttr>())
addUsedGlobal(GV);
if (auto *SA = D->getAttr<PragmaClangBSSSectionAttr>())
GV->addAttribute("bss-section", SA->getName());
if (auto *SA = D->getAttr<PragmaClangDataSectionAttr>())
GV->addAttribute("data-section", SA->getName());
if (auto *SA = D->getAttr<PragmaClangRodataSectionAttr>())
GV->addAttribute("rodata-section", SA->getName());
if (auto *SA = D->getAttr<PragmaClangRelroSectionAttr>())
GV->addAttribute("relro-section", SA->getName());
}
if (auto *F = dyn_cast<llvm::Function>(GO)) {
if (D->hasAttr<RetainAttr>())
addUsedGlobal(F);
if (auto *SA = D->getAttr<PragmaClangTextSectionAttr>())
if (!D->getAttr<SectionAttr>())
F->addFnAttr("implicit-section-name", SA->getName());
llvm::AttrBuilder Attrs;
if (GetCPUAndFeaturesAttributes(GD, Attrs)) {
// We know that GetCPUAndFeaturesAttributes will always have the
// newest set, since it has the newest possible FunctionDecl, so the
// new ones should replace the old.
llvm::AttrBuilder RemoveAttrs;
RemoveAttrs.addAttribute("target-cpu");
RemoveAttrs.addAttribute("target-features");
RemoveAttrs.addAttribute("tune-cpu");
F->removeAttributes(llvm::AttributeList::FunctionIndex, RemoveAttrs);
F->addAttributes(llvm::AttributeList::FunctionIndex, Attrs);
}
}
if (const auto *CSA = D->getAttr<CodeSegAttr>())
GO->setSection(CSA->getName());
else if (const auto *SA = D->getAttr<SectionAttr>())
GO->setSection(SA->getName());
}
getTargetCodeGenInfo().setTargetAttributes(D, GO, *this);
}
void CodeGenModule::SetInternalFunctionAttributes(GlobalDecl GD,
llvm::Function *F,
const CGFunctionInfo &FI) {
const Decl *D = GD.getDecl();
SetLLVMFunctionAttributes(GD, FI, F);
SetLLVMFunctionAttributesForDefinition(D, F);
F->setLinkage(llvm::Function::InternalLinkage);
setNonAliasAttributes(GD, F);
}
static void setLinkageForGV(llvm::GlobalValue *GV, const NamedDecl *ND) {
// Set linkage and visibility in case we never see a definition.
LinkageInfo LV = ND->getLinkageAndVisibility();
// Don't set internal linkage on declarations.
// "extern_weak" is overloaded in LLVM; we probably should have
// separate linkage types for this.
if (isExternallyVisible(LV.getLinkage()) &&
(ND->hasAttr<WeakAttr>() || ND->isWeakImported()))
GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
}
void CodeGenModule::CreateFunctionTypeMetadataForIcall(const FunctionDecl *FD,
llvm::Function *F) {
// Only if we are checking indirect calls.
if (!LangOpts.Sanitize.has(SanitizerKind::CFIICall))
return;
// Non-static class methods are handled via vtable or member function pointer
// checks elsewhere.
if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic())
return;
llvm::Metadata *MD = CreateMetadataIdentifierForType(FD->getType());
F->addTypeMetadata(0, MD);
F->addTypeMetadata(0, CreateMetadataIdentifierGeneralized(FD->getType()));
// Emit a hash-based bit set entry for cross-DSO calls.
if (CodeGenOpts.SanitizeCfiCrossDso)
if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD))
F->addTypeMetadata(0, llvm::ConstantAsMetadata::get(CrossDsoTypeId));
}
void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
bool IsIncompleteFunction,
bool IsThunk) {
if (llvm::Intrinsic::ID IID = F->getIntrinsicID()) {
// If this is an intrinsic function, set the function's attributes
// to the intrinsic's attributes.
F->setAttributes(llvm::Intrinsic::getAttributes(getLLVMContext(), IID));
return;
}
const auto *FD = cast<FunctionDecl>(GD.getDecl());
if (!IsIncompleteFunction)
SetLLVMFunctionAttributes(GD, getTypes().arrangeGlobalDeclaration(GD), F);
// Add the Returned attribute for "this", except for iOS 5 and earlier
// where substantial code, including the libstdc++ dylib, was compiled with
// GCC and does not actually return "this".
if (!IsThunk && getCXXABI().HasThisReturn(GD) &&
!(getTriple().isiOS() && getTriple().isOSVersionLT(6))) {
assert(!F->arg_empty() &&
F->arg_begin()->getType()
->canLosslesslyBitCastTo(F->getReturnType()) &&
"unexpected this return");
F->addAttribute(1, llvm::Attribute::Returned);
}
// Only a few attributes are set on declarations; these may later be
// overridden by a definition.
setLinkageForGV(F, FD);
setGVProperties(F, FD);
// Setup target-specific attributes.
if (!IsIncompleteFunction && F->isDeclaration())
getTargetCodeGenInfo().setTargetAttributes(FD, F, *this);
if (const auto *CSA = FD->getAttr<CodeSegAttr>())
F->setSection(CSA->getName());
else if (const auto *SA = FD->getAttr<SectionAttr>())
F->setSection(SA->getName());
// If we plan on emitting this inline builtin, we can't treat it as a builtin.
if (FD->isInlineBuiltinDeclaration()) {
const FunctionDecl *FDBody;
bool HasBody = FD->hasBody(FDBody);
(void)HasBody;
assert(HasBody && "Inline builtin declarations should always have an "
"available body!");
if (shouldEmitFunction(FDBody))
F->addAttribute(llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoBuiltin);
}
if (FD->isReplaceableGlobalAllocationFunction()) {
// A replaceable global allocation function does not act like a builtin by
// default, only if it is invoked by a new-expression or delete-expression.
F->addAttribute(llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoBuiltin);
}
if (isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD))
F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
else if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
if (MD->isVirtual())
F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
// Don't emit entries for function declarations in the cross-DSO mode. This
// is handled with better precision by the receiving DSO. But if jump tables
// are non-canonical then we need type metadata in order to produce the local
// jump table.
if (!CodeGenOpts.SanitizeCfiCrossDso ||
!CodeGenOpts.SanitizeCfiCanonicalJumpTables)
CreateFunctionTypeMetadataForIcall(FD, F);
if (getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>())
getOpenMPRuntime().emitDeclareSimdFunction(FD, F);
if (const auto *CB = FD->getAttr<CallbackAttr>()) {
// Annotate the callback behavior as metadata:
// - The callback callee (as argument number).
// - The callback payloads (as argument numbers).
llvm::LLVMContext &Ctx = F->getContext();
llvm::MDBuilder MDB(Ctx);
// The payload indices are all but the first one in the encoding. The first
// identifies the callback callee.
int CalleeIdx = *CB->encoding_begin();
ArrayRef<int> PayloadIndices(CB->encoding_begin() + 1, CB->encoding_end());
F->addMetadata(llvm::LLVMContext::MD_callback,
*llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
CalleeIdx, PayloadIndices,
/* VarArgsArePassed */ false)}));
}
}
void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) {
assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) &&
"Only globals with definition can force usage.");
LLVMUsed.emplace_back(GV);
}
void CodeGenModule::addCompilerUsedGlobal(llvm::GlobalValue *GV) {
assert(!GV->isDeclaration() &&
"Only globals with definition can force usage.");
LLVMCompilerUsed.emplace_back(GV);
}
void CodeGenModule::addUsedOrCompilerUsedGlobal(llvm::GlobalValue *GV) {
assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) &&
"Only globals with definition can force usage.");
if (getTriple().isOSBinFormatELF())
LLVMCompilerUsed.emplace_back(GV);
else
LLVMUsed.emplace_back(GV);
}
static void emitUsed(CodeGenModule &CGM, StringRef Name,
std::vector<llvm::WeakTrackingVH> &List) {
// Don't create llvm.used if there is no need.
if (List.empty())
return;
// Convert List to what ConstantArray needs.
SmallVector<llvm::Constant*, 8> UsedArray;
UsedArray.resize(List.size());
for (unsigned i = 0, e = List.size(); i != e; ++i) {
UsedArray[i] =
llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
cast<llvm::Constant>(&*List[i]), CGM.Int8PtrTy);
}
if (UsedArray.empty())
return;
llvm::ArrayType *ATy = llvm::ArrayType::get(CGM.Int8PtrTy, UsedArray.size());
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), ATy, false, llvm::GlobalValue::AppendingLinkage,
llvm::ConstantArray::get(ATy, UsedArray), Name);
GV->setSection("llvm.metadata");
}
void CodeGenModule::emitLLVMUsed() {
emitUsed(*this, "llvm.used", LLVMUsed);
emitUsed(*this, "llvm.compiler.used", LLVMCompilerUsed);
}
void CodeGenModule::AppendLinkerOptions(StringRef Opts) {
auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opts);
LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
}
void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) {
llvm::SmallString<32> Opt;
getTargetCodeGenInfo().getDetectMismatchOption(Name, Value, Opt);
if (Opt.empty())
return;
auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
}
void CodeGenModule::AddDependentLib(StringRef Lib) {
auto &C = getLLVMContext();
if (getTarget().getTriple().isOSBinFormatELF()) {
ELFDependentLibraries.push_back(
llvm::MDNode::get(C, llvm::MDString::get(C, Lib)));
return;
}
llvm::SmallString<24> Opt;
getTargetCodeGenInfo().getDependentLibraryOption(Lib, Opt);
auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
LinkerOptionsMetadata.push_back(llvm::MDNode::get(C, MDOpts));
}
/// Add link options implied by the given module, including modules
/// it depends on, using a postorder walk.
static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
SmallVectorImpl<llvm::MDNode *> &Metadata,
llvm::SmallPtrSet<Module *, 16> &Visited) {
// Import this module's parent.
if (Mod->Parent && Visited.insert(Mod->Parent).second) {
addLinkOptionsPostorder(CGM, Mod->Parent, Metadata, Visited);
}
// Import this module's dependencies.
for (unsigned I = Mod->Imports.size(); I > 0; --I) {
if (Visited.insert(Mod->Imports[I - 1]).second)
addLinkOptionsPostorder(CGM, Mod->Imports[I-1], Metadata, Visited);
}
// Add linker options to link against the libraries/frameworks
// described by this module.
llvm::LLVMContext &Context = CGM.getLLVMContext();
bool IsELF = CGM.getTarget().getTriple().isOSBinFormatELF();
// For modules that use export_as for linking, use that module
// name instead.
if (Mod->UseExportAsModuleLinkName)
return;
for (unsigned I = Mod->LinkLibraries.size(); I > 0; --I) {
// Link against a framework. Frameworks are currently Darwin only, so we
// don't to ask TargetCodeGenInfo for the spelling of the linker option.
if (Mod->LinkLibraries[I-1].IsFramework) {
llvm::Metadata *Args[2] = {
llvm::MDString::get(Context, "-framework"),
llvm::MDString::get(Context, Mod->LinkLibraries[I - 1].Library)};
Metadata.push_back(llvm::MDNode::get(Context, Args));
continue;
}
// Link against a library.
if (IsELF) {
llvm::Metadata *Args[2] = {
llvm::MDString::get(Context, "lib"),
llvm::MDString::get(Context, Mod->LinkLibraries[I - 1].Library),
};
Metadata.push_back(llvm::MDNode::get(Context, Args));
} else {
llvm::SmallString<24> Opt;
CGM.getTargetCodeGenInfo().getDependentLibraryOption(
Mod->LinkLibraries[I - 1].Library, Opt);
auto *OptString = llvm::MDString::get(Context, Opt);
Metadata.push_back(llvm::MDNode::get(Context, OptString));
}
}
}
void CodeGenModule::EmitModuleLinkOptions() {
// Collect the set of all of the modules we want to visit to emit link
// options, which is essentially the imported modules and all of their
// non-explicit child modules.
llvm::SetVector<clang::Module *> LinkModules;
llvm::SmallPtrSet<clang::Module *, 16> Visited;
SmallVector<clang::Module *, 16> Stack;
// Seed the stack with imported modules.
for (Module *M : ImportedModules) {
// Do not add any link flags when an implementation TU of a module imports
// a header of that same module.
if (M->getTopLevelModuleName() == getLangOpts().CurrentModule &&
!getLangOpts().isCompilingModule())
continue;
if (Visited.insert(M).second)
Stack.push_back(M);
}
// Find all of the modules to import, making a little effort to prune
// non-leaf modules.
while (!Stack.empty()) {
clang::Module *Mod = Stack.pop_back_val();
bool AnyChildren = false;
// Visit the submodules of this module.
for (const auto &SM : Mod->submodules()) {
// Skip explicit children; they need to be explicitly imported to be
// linked against.
if (SM->IsExplicit)
continue;
if (Visited.insert(SM).second) {
Stack.push_back(SM);
AnyChildren = true;
}
}
// We didn't find any children, so add this module to the list of
// modules to link against.
if (!AnyChildren) {
LinkModules.insert(Mod);
}
}
// Add link options for all of the imported modules in reverse topological
// order. We don't do anything to try to order import link flags with respect
// to linker options inserted by things like #pragma comment().
SmallVector<llvm::MDNode *, 16> MetadataArgs;
Visited.clear();
for (Module *M : LinkModules)
if (Visited.insert(M).second)
addLinkOptionsPostorder(*this, M, MetadataArgs, Visited);
std::reverse(MetadataArgs.begin(), MetadataArgs.end());
LinkerOptionsMetadata.append(MetadataArgs.begin(), MetadataArgs.end());
// Add the linker options metadata flag.
auto *NMD = getModule().getOrInsertNamedMetadata("llvm.linker.options");
for (auto *MD : LinkerOptionsMetadata)
NMD->addOperand(MD);
}
void CodeGenModule::EmitDeferred() {
// Emit deferred declare target declarations.
if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
getOpenMPRuntime().emitDeferredTargetDecls();
// Emit code for any potentially referenced deferred decls. Since a
// previously unused static decl may become used during the generation of code
// for a static function, iterate until no changes are made.
if (!DeferredVTables.empty()) {
EmitDeferredVTables();
// Emitting a vtable doesn't directly cause more vtables to
// become deferred, although it can cause functions to be
// emitted that then need those vtables.
assert(DeferredVTables.empty());
}
// Emit CUDA/HIP static device variables referenced by host code only.
if (getLangOpts().CUDA)
for (auto V : getContext().CUDAStaticDeviceVarReferencedByHost)
DeferredDeclsToEmit.push_back(V);
// Stop if we're out of both deferred vtables and deferred declarations.
if (DeferredDeclsToEmit.empty())
return;
// Grab the list of decls to emit. If EmitGlobalDefinition schedules more
// work, it will not interfere with this.
std::vector<GlobalDecl> CurDeclsToEmit;
CurDeclsToEmit.swap(DeferredDeclsToEmit);
for (GlobalDecl &D : CurDeclsToEmit) {
// We should call GetAddrOfGlobal with IsForDefinition set to true in order
// to get GlobalValue with exactly the type we need, not something that
// might had been created for another decl with the same mangled name but
// different type.
llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>(
GetAddrOfGlobal(D, ForDefinition));
// In case of different address spaces, we may still get a cast, even with
// IsForDefinition equal to true. Query mangled names table to get
// GlobalValue.
if (!GV)
GV = GetGlobalValue(getMangledName(D));
// Make sure GetGlobalValue returned non-null.
assert(GV);
// Check to see if we've already emitted this. This is necessary
// for a couple of reasons: first, decls can end up in the
// deferred-decls queue multiple times, and second, decls can end
// up with definitions in unusual ways (e.g. by an extern inline
// function acquiring a strong function redefinition). Just
// ignore these cases.
if (!GV->isDeclaration())
continue;
// If this is OpenMP, check if it is legal to emit this global normally.
if (LangOpts.OpenMP && OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(D))
continue;
// Otherwise, emit the definition and move on to the next one.
EmitGlobalDefinition(D, GV);
// If we found out that we need to emit more decls, do that recursively.
// This has the advantage that the decls are emitted in a DFS and related
// ones are close together, which is convenient for testing.
if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) {
EmitDeferred();
assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty());
}
}
}
void CodeGenModule::EmitVTablesOpportunistically() {
// Try to emit external vtables as available_externally if they have emitted
// all inlined virtual functions. It runs after EmitDeferred() and therefore
// is not allowed to create new references to things that need to be emitted
// lazily. Note that it also uses fact that we eagerly emitting RTTI.
assert((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables())
&& "Only emit opportunistic vtables with optimizations");
for (const CXXRecordDecl *RD : OpportunisticVTables) {
assert(getVTables().isVTableExternal(RD) &&