blob: 63525258456200b7ed2905f0b4ffa83d8c9e8d24 [file] [log] [blame]
//===--- SemaExprCXX.cpp - Semantic Analysis for Expressions --------------===//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
/// \file
/// Implements semantic analysis for C++ expressions.
#include "clang/Sema/Template.h"
#include "clang/Sema/SemaInternal.h"
#include "TreeTransform.h"
#include "TypeLocBuilder.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/AlignedAllocation.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaLambda.h"
#include "clang/Sema/TemplateDeduction.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
using namespace sema;
/// Handle the result of the special case name lookup for inheriting
/// constructor declarations. 'NS::X::X' and 'NS::X<...>::X' are treated as
/// constructor names in member using declarations, even if 'X' is not the
/// name of the corresponding type.
ParsedType Sema::getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name) {
NestedNameSpecifier *NNS = SS.getScopeRep();
// Convert the nested-name-specifier into a type.
QualType Type;
switch (NNS->getKind()) {
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate:
Type = QualType(NNS->getAsType(), 0);
case NestedNameSpecifier::Identifier:
// Strip off the last layer of the nested-name-specifier and build a
// typename type for it.
assert(NNS->getAsIdentifier() == &Name && "not a constructor name");
Type = Context.getDependentNameType(ETK_None, NNS->getPrefix(),
case NestedNameSpecifier::Global:
case NestedNameSpecifier::Super:
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::NamespaceAlias:
llvm_unreachable("Nested name specifier is not a type for inheriting ctor");
// This reference to the type is located entirely at the location of the
// final identifier in the qualified-id.
return CreateParsedType(Type,
Context.getTrivialTypeSourceInfo(Type, NameLoc));
ParsedType Sema::getConstructorName(IdentifierInfo &II,
SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext) {
CXXRecordDecl *CurClass = getCurrentClass(S, &SS);
assert(CurClass && &II == CurClass->getIdentifier() &&
"not a constructor name");
// When naming a constructor as a member of a dependent context (eg, in a
// friend declaration or an inherited constructor declaration), form an
// unresolved "typename" type.
if (CurClass->isDependentContext() && !EnteringContext && SS.getScopeRep()) {
QualType T = Context.getDependentNameType(ETK_None, SS.getScopeRep(), &II);
return ParsedType::make(T);
if (SS.isNotEmpty() && RequireCompleteDeclContext(SS, CurClass))
return ParsedType();
// Find the injected-class-name declaration. Note that we make no attempt to
// diagnose cases where the injected-class-name is shadowed: the only
// declaration that can validly shadow the injected-class-name is a
// non-static data member, and if the class contains both a non-static data
// member and a constructor then it is ill-formed (we check that in
// CheckCompletedCXXClass).
CXXRecordDecl *InjectedClassName = nullptr;
for (NamedDecl *ND : CurClass->lookup(&II)) {
auto *RD = dyn_cast<CXXRecordDecl>(ND);
if (RD && RD->isInjectedClassName()) {
InjectedClassName = RD;
if (!InjectedClassName) {
if (!CurClass->isInvalidDecl()) {
// FIXME: RequireCompleteDeclContext doesn't check dependent contexts
// properly. Work around it here for now.
diag::err_incomplete_nested_name_spec) << CurClass << SS.getRange();
return ParsedType();
QualType T = Context.getTypeDeclType(InjectedClassName);
DiagnoseUseOfDecl(InjectedClassName, NameLoc);
MarkAnyDeclReferenced(NameLoc, InjectedClassName, /*OdrUse=*/false);
return ParsedType::make(T);
ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II,
SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectTypePtr,
bool EnteringContext) {
// Determine where to perform name lookup.
// FIXME: This area of the standard is very messy, and the current
// wording is rather unclear about which scopes we search for the
// destructor name; see core issues 399 and 555. Issue 399 in
// particular shows where the current description of destructor name
// lookup is completely out of line with existing practice, e.g.,
// this appears to be ill-formed:
// namespace N {
// template <typename T> struct S {
// ~S();
// };
// }
// void f(N::S<int>* s) {
// s->N::S<int>::~S();
// }
// See also PR6358 and PR6359.
// For now, we accept all the cases in which the name given could plausibly
// be interpreted as a correct destructor name, issuing off-by-default
// extension diagnostics on the cases that don't strictly conform to the
// C++20 rules. This basically means we always consider looking in the
// nested-name-specifier prefix, the complete nested-name-specifier, and
// the scope, and accept if we find the expected type in any of the three
// places.
if (SS.isInvalid())
return nullptr;
// Whether we've failed with a diagnostic already.
bool Failed = false;
llvm::SmallVector<NamedDecl*, 8> FoundDecls;
llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 8> FoundDeclSet;
// If we have an object type, it's because we are in a
// pseudo-destructor-expression or a member access expression, and
// we know what type we're looking for.
QualType SearchType =
ObjectTypePtr ? GetTypeFromParser(ObjectTypePtr) : QualType();
auto CheckLookupResult = [&](LookupResult &Found) -> ParsedType {
auto IsAcceptableResult = [&](NamedDecl *D) -> bool {
auto *Type = dyn_cast<TypeDecl>(D->getUnderlyingDecl());
if (!Type)
return false;
if (SearchType.isNull() || SearchType->isDependentType())
return true;
QualType T = Context.getTypeDeclType(Type);
return Context.hasSameUnqualifiedType(T, SearchType);
unsigned NumAcceptableResults = 0;
for (NamedDecl *D : Found) {
if (IsAcceptableResult(D))
// Don't list a class twice in the lookup failure diagnostic if it's
// found by both its injected-class-name and by the name in the enclosing
// scope.
if (auto *RD = dyn_cast<CXXRecordDecl>(D))
if (RD->isInjectedClassName())
D = cast<NamedDecl>(RD->getParent());
if (FoundDeclSet.insert(D).second)
// As an extension, attempt to "fix" an ambiguity by erasing all non-type
// results, and all non-matching results if we have a search type. It's not
// clear what the right behavior is if destructor lookup hits an ambiguity,
// but other compilers do generally accept at least some kinds of
// ambiguity.
if (Found.isAmbiguous() && NumAcceptableResults == 1) {
Diag(NameLoc, diag::ext_dtor_name_ambiguous);
LookupResult::Filter F = Found.makeFilter();
while (F.hasNext()) {
NamedDecl *D =;
if (auto *TD = dyn_cast<TypeDecl>(D->getUnderlyingDecl()))
Diag(D->getLocation(), diag::note_destructor_type_here)
<< Context.getTypeDeclType(TD);
Diag(D->getLocation(), diag::note_destructor_nontype_here);
if (!IsAcceptableResult(D))
if (Found.isAmbiguous())
Failed = true;
if (TypeDecl *Type = Found.getAsSingle<TypeDecl>()) {
if (IsAcceptableResult(Type)) {
QualType T = Context.getTypeDeclType(Type);
MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
return CreateParsedType(T,
Context.getTrivialTypeSourceInfo(T, NameLoc));
return nullptr;
bool IsDependent = false;
auto LookupInObjectType = [&]() -> ParsedType {
if (Failed || SearchType.isNull())
return nullptr;
IsDependent |= SearchType->isDependentType();
LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
DeclContext *LookupCtx = computeDeclContext(SearchType);
if (!LookupCtx)
return nullptr;
LookupQualifiedName(Found, LookupCtx);
return CheckLookupResult(Found);
auto LookupInNestedNameSpec = [&](CXXScopeSpec &LookupSS) -> ParsedType {
if (Failed)
return nullptr;
IsDependent |= isDependentScopeSpecifier(LookupSS);
DeclContext *LookupCtx = computeDeclContext(LookupSS, EnteringContext);
if (!LookupCtx)
return nullptr;
LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
if (RequireCompleteDeclContext(LookupSS, LookupCtx)) {
Failed = true;
return nullptr;
LookupQualifiedName(Found, LookupCtx);
return CheckLookupResult(Found);
auto LookupInScope = [&]() -> ParsedType {
if (Failed || !S)
return nullptr;
LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
LookupName(Found, S);
return CheckLookupResult(Found);
// C++2a [basic.lookup.qual]p6:
// In a qualified-id of the form
// nested-name-specifier[opt] type-name :: ~ type-name
// the second type-name is looked up in the same scope as the first.
// We interpret this as meaning that if you do a dual-scope lookup for the
// first name, you also do a dual-scope lookup for the second name, per
// C++ [basic.lookup.classref]p4:
// If the id-expression in a class member access is a qualified-id of the
// form
// class-name-or-namespace-name :: ...
// the class-name-or-namespace-name following the . or -> is first looked
// up in the class of the object expression and the name, if found, is used.
// Otherwise, it is looked up in the context of the entire
// postfix-expression.
// This looks in the same scopes as for an unqualified destructor name:
// C++ [basic.lookup.classref]p3:
// If the unqualified-id is ~ type-name, the type-name is looked up
// in the context of the entire postfix-expression. If the type T
// of the object expression is of a class type C, the type-name is
// also looked up in the scope of class C. At least one of the
// lookups shall find a name that refers to cv T.
// FIXME: The intent is unclear here. Should type-name::~type-name look in
// the scope anyway if it finds a non-matching name declared in the class?
// If both lookups succeed and find a dependent result, which result should
// we retain? (Same question for p->~type-name().)
if (NestedNameSpecifier *Prefix =
SS.isSet() ? SS.getScopeRep()->getPrefix() : nullptr) {
// This is
// nested-name-specifier type-name :: ~ type-name
// Look for the second type-name in the nested-name-specifier.
CXXScopeSpec PrefixSS;
PrefixSS.Adopt(NestedNameSpecifierLoc(Prefix, SS.location_data()));
if (ParsedType T = LookupInNestedNameSpec(PrefixSS))
return T;
} else {
// This is one of
// type-name :: ~ type-name
// ~ type-name
// Look in the scope and (if any) the object type.
if (ParsedType T = LookupInScope())
return T;
if (ParsedType T = LookupInObjectType())
return T;
if (Failed)
return nullptr;
if (IsDependent) {
// We didn't find our type, but that's OK: it's dependent anyway.
// FIXME: What if we have no nested-name-specifier?
QualType T = CheckTypenameType(ETK_None, SourceLocation(),
II, NameLoc);
return ParsedType::make(T);
// The remaining cases are all non-standard extensions imitating the behavior
// of various other compilers.
unsigned NumNonExtensionDecls = FoundDecls.size();
if (SS.isSet()) {
// For compatibility with older broken C++ rules and existing code,
// nested-name-specifier :: ~ type-name
// also looks for type-name within the nested-name-specifier.
if (ParsedType T = LookupInNestedNameSpec(SS)) {
Diag(SS.getEndLoc(), diag::ext_dtor_named_in_wrong_scope)
<< SS.getRange()
<< FixItHint::CreateInsertion(SS.getEndLoc(),
("::" + II.getName()).str());
return T;
// For compatibility with other compilers and older versions of Clang,
// nested-name-specifier type-name :: ~ type-name
// also looks for type-name in the scope. Unfortunately, we can't
// reasonably apply this fallback for dependent nested-name-specifiers.
if (SS.getScopeRep()->getPrefix()) {
if (ParsedType T = LookupInScope()) {
Diag(SS.getEndLoc(), diag::ext_qualified_dtor_named_in_lexical_scope)
<< FixItHint::CreateRemoval(SS.getRange());
Diag(FoundDecls.back()->getLocation(), diag::note_destructor_type_here)
<< GetTypeFromParser(T);
return T;
// We didn't find anything matching; tell the user what we did find (if
// anything).
// Don't tell the user about declarations we shouldn't have found.
// List types before non-types.
std::stable_sort(FoundDecls.begin(), FoundDecls.end(),
[](NamedDecl *A, NamedDecl *B) {
return isa<TypeDecl>(A->getUnderlyingDecl()) >
// Suggest a fixit to properly name the destroyed type.
auto MakeFixItHint = [&]{
const CXXRecordDecl *Destroyed = nullptr;
// FIXME: If we have a scope specifier, suggest its last component?
if (!SearchType.isNull())
Destroyed = SearchType->getAsCXXRecordDecl();
else if (S)
Destroyed = dyn_cast_or_null<CXXRecordDecl>(S->getEntity());
if (Destroyed)
return FixItHint::CreateReplacement(SourceRange(NameLoc),
return FixItHint();
if (FoundDecls.empty()) {
// FIXME: Attempt typo-correction?
Diag(NameLoc, diag::err_undeclared_destructor_name)
<< &II << MakeFixItHint();
} else if (!SearchType.isNull() && FoundDecls.size() == 1) {
if (auto *TD = dyn_cast<TypeDecl>(FoundDecls[0]->getUnderlyingDecl())) {
assert(!SearchType.isNull() &&
"should only reject a type result if we have a search type");
QualType T = Context.getTypeDeclType(TD);
Diag(NameLoc, diag::err_destructor_expr_type_mismatch)
<< T << SearchType << MakeFixItHint();
} else {
Diag(NameLoc, diag::err_destructor_expr_nontype)
<< &II << MakeFixItHint();
} else {
Diag(NameLoc, SearchType.isNull() ? diag::err_destructor_name_nontype
: diag::err_destructor_expr_mismatch)
<< &II << SearchType << MakeFixItHint();
for (NamedDecl *FoundD : FoundDecls) {
if (auto *TD = dyn_cast<TypeDecl>(FoundD->getUnderlyingDecl()))
Diag(FoundD->getLocation(), diag::note_destructor_type_here)
<< Context.getTypeDeclType(TD);
Diag(FoundD->getLocation(), diag::note_destructor_nontype_here)
<< FoundD;
return nullptr;
ParsedType Sema::getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType) {
if (DS.getTypeSpecType() == DeclSpec::TST_error)
return nullptr;
if (DS.getTypeSpecType() == DeclSpec::TST_decltype_auto) {
Diag(DS.getTypeSpecTypeLoc(), diag::err_decltype_auto_invalid);
return nullptr;
assert(DS.getTypeSpecType() == DeclSpec::TST_decltype &&
"unexpected type in getDestructorType");
QualType T = BuildDecltypeType(DS.getRepAsExpr());
// If we know the type of the object, check that the correct destructor
// type was named now; we can give better diagnostics this way.
QualType SearchType = GetTypeFromParser(ObjectType);
if (!SearchType.isNull() && !SearchType->isDependentType() &&
!Context.hasSameUnqualifiedType(T, SearchType)) {
Diag(DS.getTypeSpecTypeLoc(), diag::err_destructor_expr_type_mismatch)
<< T << SearchType;
return nullptr;
return ParsedType::make(T);
bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
const UnqualifiedId &Name, bool IsUDSuffix) {
assert(Name.getKind() == UnqualifiedIdKind::IK_LiteralOperatorId);
if (!IsUDSuffix) {
// [over.literal] p8
// double operator""_Bq(long double); // OK: not a reserved identifier
// double operator"" _Bq(long double); // ill-formed, no diagnostic required
IdentifierInfo *II = Name.Identifier;
ReservedIdentifierStatus Status = II->isReserved(PP.getLangOpts());
SourceLocation Loc = Name.getEndLoc();
if (isReservedInAllContexts(Status) &&
!PP.getSourceManager().isInSystemHeader(Loc)) {
Diag(Loc, diag::warn_reserved_extern_symbol)
<< II << static_cast<int>(Status)
<< FixItHint::CreateReplacement(
(StringRef("operator\"\"") + II->getName()).str());
if (!SS.isValid())
return false;
switch (SS.getScopeRep()->getKind()) {
case NestedNameSpecifier::Identifier:
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate:
// Per C++11 [over.literal]p2, literal operators can only be declared at
// namespace scope. Therefore, this unqualified-id cannot name anything.
// Reject it early, because we have no AST representation for this in the
// case where the scope is dependent.
Diag(Name.getBeginLoc(), diag::err_literal_operator_id_outside_namespace)
<< SS.getScopeRep();
return true;
case NestedNameSpecifier::Global:
case NestedNameSpecifier::Super:
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::NamespaceAlias:
return false;
llvm_unreachable("unknown nested name specifier kind");
/// Build a C++ typeid expression with a type operand.
ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc) {
// C++ [expr.typeid]p4:
// The top-level cv-qualifiers of the lvalue expression or the type-id
// that is the operand of typeid are always ignored.
// If the type of the type-id is a class type or a reference to a class
// type, the class shall be completely-defined.
Qualifiers Quals;
QualType T
= Context.getUnqualifiedArrayType(Operand->getType().getNonReferenceType(),
if (T->getAs<RecordType>() &&
RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid))
return ExprError();
if (T->isVariablyModifiedType())
return ExprError(Diag(TypeidLoc, diag::err_variably_modified_typeid) << T);
if (CheckQualifiedFunctionForTypeId(T, TypeidLoc))
return ExprError();
return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), Operand,
SourceRange(TypeidLoc, RParenLoc));
/// Build a C++ typeid expression with an expression operand.
ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *E,
SourceLocation RParenLoc) {
bool WasEvaluated = false;
if (E && !E->isTypeDependent()) {
if (E->getType()->isPlaceholderType()) {
ExprResult result = CheckPlaceholderExpr(E);
if (result.isInvalid()) return ExprError();
E = result.get();
QualType T = E->getType();
if (const RecordType *RecordT = T->getAs<RecordType>()) {
CXXRecordDecl *RecordD = cast<CXXRecordDecl>(RecordT->getDecl());
// C++ [expr.typeid]p3:
// [...] If the type of the expression is a class type, the class
// shall be completely-defined.
if (RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid))
return ExprError();
// C++ [expr.typeid]p3:
// When typeid is applied to an expression other than an glvalue of a
// polymorphic class type [...] [the] expression is an unevaluated
// operand. [...]
if (RecordD->isPolymorphic() && E->isGLValue()) {
if (isUnevaluatedContext()) {
// The operand was processed in unevaluated context, switch the
// context and recheck the subexpression.
ExprResult Result = TransformToPotentiallyEvaluated(E);
if (Result.isInvalid())
return ExprError();
E = Result.get();
// We require a vtable to query the type at run time.
MarkVTableUsed(TypeidLoc, RecordD);
WasEvaluated = true;
ExprResult Result = CheckUnevaluatedOperand(E);
if (Result.isInvalid())
return ExprError();
E = Result.get();
// C++ [expr.typeid]p4:
// [...] If the type of the type-id is a reference to a possibly
// cv-qualified type, the result of the typeid expression refers to a
// std::type_info object representing the cv-unqualified referenced
// type.
Qualifiers Quals;
QualType UnqualT = Context.getUnqualifiedArrayType(T, Quals);
if (!Context.hasSameType(T, UnqualT)) {
T = UnqualT;
E = ImpCastExprToType(E, UnqualT, CK_NoOp, E->getValueKind()).get();
if (E->getType()->isVariablyModifiedType())
return ExprError(Diag(TypeidLoc, diag::err_variably_modified_typeid)
<< E->getType());
else if (!inTemplateInstantiation() &&
E->HasSideEffects(Context, WasEvaluated)) {
// The expression operand for typeid is in an unevaluated expression
// context, so side effects could result in unintended consequences.
Diag(E->getExprLoc(), WasEvaluated
? diag::warn_side_effects_typeid
: diag::warn_side_effects_unevaluated_context);
return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), E,
SourceRange(TypeidLoc, RParenLoc));
/// ActOnCXXTypeidOfType - Parse typeid( type-id ) or typeid (expression);
Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc,
bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
// typeid is not supported in OpenCL.
if (getLangOpts().OpenCLCPlusPlus) {
return ExprError(Diag(OpLoc, diag::err_openclcxx_not_supported)
<< "typeid");
// Find the std::type_info type.
if (!getStdNamespace())
return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid));
if (!CXXTypeInfoDecl) {
IdentifierInfo *TypeInfoII = &PP.getIdentifierTable().get("type_info");
LookupResult R(*this, TypeInfoII, SourceLocation(), LookupTagName);
LookupQualifiedName(R, getStdNamespace());
CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
// Microsoft's typeinfo doesn't have type_info in std but in the global
// namespace if _HAS_EXCEPTIONS is defined to 0. See PR13153.
if (!CXXTypeInfoDecl && LangOpts.MSVCCompat) {
LookupQualifiedName(R, Context.getTranslationUnitDecl());
CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
if (!CXXTypeInfoDecl)
return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid));
if (!getLangOpts().RTTI) {
return ExprError(Diag(OpLoc, diag::err_no_typeid_with_fno_rtti));
QualType TypeInfoType = Context.getTypeDeclType(CXXTypeInfoDecl);
if (isType) {
// The operand is a type; handle it as such.
TypeSourceInfo *TInfo = nullptr;
QualType T = GetTypeFromParser(ParsedType::getFromOpaquePtr(TyOrExpr),
if (T.isNull())
return ExprError();
if (!TInfo)
TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc);
return BuildCXXTypeId(TypeInfoType, OpLoc, TInfo, RParenLoc);
// The operand is an expression.
ExprResult Result =
BuildCXXTypeId(TypeInfoType, OpLoc, (Expr *)TyOrExpr, RParenLoc);
if (!getLangOpts().RTTIData && !Result.isInvalid())
if (auto *CTE = dyn_cast<CXXTypeidExpr>(Result.get()))
if (CTE->isPotentiallyEvaluated() && !CTE->isMostDerived(Context))
Diag(OpLoc, diag::warn_no_typeid_with_rtti_disabled)
<< (getDiagnostics().getDiagnosticOptions().getFormat() ==
return Result;
/// Grabs __declspec(uuid()) off a type, or returns 0 if we cannot resolve to
/// a single GUID.
static void
getUuidAttrOfType(Sema &SemaRef, QualType QT,
llvm::SmallSetVector<const UuidAttr *, 1> &UuidAttrs) {
// Optionally remove one level of pointer, reference or array indirection.
const Type *Ty = QT.getTypePtr();
if (QT->isPointerType() || QT->isReferenceType())
Ty = QT->getPointeeType().getTypePtr();
else if (QT->isArrayType())
Ty = Ty->getBaseElementTypeUnsafe();
const auto *TD = Ty->getAsTagDecl();
if (!TD)
if (const auto *Uuid = TD->getMostRecentDecl()->getAttr<UuidAttr>()) {
// __uuidof can grab UUIDs from template arguments.
if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(TD)) {
const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
for (const TemplateArgument &TA : TAL.asArray()) {
const UuidAttr *UuidForTA = nullptr;
if (TA.getKind() == TemplateArgument::Type)
getUuidAttrOfType(SemaRef, TA.getAsType(), UuidAttrs);
else if (TA.getKind() == TemplateArgument::Declaration)
getUuidAttrOfType(SemaRef, TA.getAsDecl()->getType(), UuidAttrs);
if (UuidForTA)
/// Build a Microsoft __uuidof expression with a type operand.
ExprResult Sema::BuildCXXUuidof(QualType Type,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc) {
MSGuidDecl *Guid = nullptr;
if (!Operand->getType()->isDependentType()) {
llvm::SmallSetVector<const UuidAttr *, 1> UuidAttrs;
getUuidAttrOfType(*this, Operand->getType(), UuidAttrs);
if (UuidAttrs.empty())
return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
if (UuidAttrs.size() > 1)
return ExprError(Diag(TypeidLoc, diag::err_uuidof_with_multiple_guids));
Guid = UuidAttrs.back()->getGuidDecl();
return new (Context)
CXXUuidofExpr(Type, Operand, Guid, SourceRange(TypeidLoc, RParenLoc));
/// Build a Microsoft __uuidof expression with an expression operand.
ExprResult Sema::BuildCXXUuidof(QualType Type, SourceLocation TypeidLoc,
Expr *E, SourceLocation RParenLoc) {
MSGuidDecl *Guid = nullptr;
if (!E->getType()->isDependentType()) {
if (E->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
// A null pointer results in {00000000-0000-0000-0000-000000000000}.
Guid = Context.getMSGuidDecl(MSGuidDecl::Parts{});
} else {
llvm::SmallSetVector<const UuidAttr *, 1> UuidAttrs;
getUuidAttrOfType(*this, E->getType(), UuidAttrs);
if (UuidAttrs.empty())
return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
if (UuidAttrs.size() > 1)
return ExprError(Diag(TypeidLoc, diag::err_uuidof_with_multiple_guids));
Guid = UuidAttrs.back()->getGuidDecl();
return new (Context)
CXXUuidofExpr(Type, E, Guid, SourceRange(TypeidLoc, RParenLoc));
/// ActOnCXXUuidof - Parse __uuidof( type-id ) or __uuidof (expression);
Sema::ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc,
bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
QualType GuidType = Context.getMSGuidType();
if (isType) {
// The operand is a type; handle it as such.
TypeSourceInfo *TInfo = nullptr;
QualType T = GetTypeFromParser(ParsedType::getFromOpaquePtr(TyOrExpr),
if (T.isNull())
return ExprError();
if (!TInfo)
TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc);
return BuildCXXUuidof(GuidType, OpLoc, TInfo, RParenLoc);
// The operand is an expression.
return BuildCXXUuidof(GuidType, OpLoc, (Expr*)TyOrExpr, RParenLoc);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
Sema::ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
assert((Kind == tok::kw_true || Kind == tok::kw_false) &&
"Unknown C++ Boolean value!");
return new (Context)
CXXBoolLiteralExpr(Kind == tok::kw_true, Context.BoolTy, OpLoc);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
Sema::ActOnCXXNullPtrLiteral(SourceLocation Loc) {
return new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc);
/// ActOnCXXThrow - Parse throw expressions.
Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) {
bool IsThrownVarInScope = false;
if (Ex) {
// C++0x [class.copymove]p31:
// When certain criteria are met, an implementation is allowed to omit the
// copy/move construction of a class object [...]
// - in a throw-expression, when the operand is the name of a
// non-volatile automatic object (other than a function or catch-
// clause parameter) whose scope does not extend beyond the end of the
// innermost enclosing try-block (if there is one), the copy/move
// operation from the operand to the exception object (15.1) can be
// omitted by constructing the automatic object directly into the
// exception object
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Ex->IgnoreParens()))
if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
if (Var->hasLocalStorage() && !Var->getType().isVolatileQualified()) {
for( ; S; S = S->getParent()) {
if (S->isDeclScope(Var)) {
IsThrownVarInScope = true;
if (S->getFlags() &
(Scope::FnScope | Scope::ClassScope | Scope::BlockScope |
Scope::FunctionPrototypeScope | Scope::ObjCMethodScope |
return BuildCXXThrow(OpLoc, Ex, IsThrownVarInScope);
ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope) {
// Don't report an error if 'throw' is used in system headers.
if (!getLangOpts().CXXExceptions &&
!getSourceManager().isInSystemHeader(OpLoc) && !getLangOpts().CUDA) {
// Delay error emission for the OpenMP device code.
targetDiag(OpLoc, diag::err_exceptions_disabled) << "throw";
// Exceptions aren't allowed in CUDA device code.
if (getLangOpts().CUDA)
CUDADiagIfDeviceCode(OpLoc, diag::err_cuda_device_exceptions)
<< "throw" << CurrentCUDATarget();
if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope())
Diag(OpLoc, diag::err_omp_simd_region_cannot_use_stmt) << "throw";
if (Ex && !Ex->isTypeDependent()) {
// Initialize the exception result. This implicitly weeds out
// abstract types or types with inaccessible copy constructors.
// C++0x [class.copymove]p31:
// When certain criteria are met, an implementation is allowed to omit the
// copy/move construction of a class object [...]
// - in a throw-expression, when the operand is the name of a
// non-volatile automatic object (other than a function or
// catch-clause
// parameter) whose scope does not extend beyond the end of the
// innermost enclosing try-block (if there is one), the copy/move
// operation from the operand to the exception object (15.1) can be
// omitted by constructing the automatic object directly into the
// exception object
NamedReturnInfo NRInfo =
IsThrownVarInScope ? getNamedReturnInfo(Ex) : NamedReturnInfo();
QualType ExceptionObjectTy = Context.getExceptionObjectType(Ex->getType());
if (CheckCXXThrowOperand(OpLoc, ExceptionObjectTy, Ex))
return ExprError();
InitializedEntity Entity =
InitializedEntity::InitializeException(OpLoc, ExceptionObjectTy);
ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRInfo, Ex);
if (Res.isInvalid())
return ExprError();
Ex = Res.get();
// PPC MMA non-pointer types are not allowed as throw expr types.
if (Ex && Context.getTargetInfo().getTriple().isPPC64())
CheckPPCMMAType(Ex->getType(), Ex->getBeginLoc());
return new (Context)
CXXThrowExpr(Ex, Context.VoidTy, OpLoc, IsThrownVarInScope);
static void
collectPublicBases(CXXRecordDecl *RD,
llvm::DenseMap<CXXRecordDecl *, unsigned> &SubobjectsSeen,
llvm::SmallPtrSetImpl<CXXRecordDecl *> &VBases,
llvm::SetVector<CXXRecordDecl *> &PublicSubobjectsSeen,
bool ParentIsPublic) {
for (const CXXBaseSpecifier &BS : RD->bases()) {
CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
bool NewSubobject;
// Virtual bases constitute the same subobject. Non-virtual bases are
// always distinct subobjects.
if (BS.isVirtual())
NewSubobject = VBases.insert(BaseDecl).second;
NewSubobject = true;
if (NewSubobject)
// Only add subobjects which have public access throughout the entire chain.
bool PublicPath = ParentIsPublic && BS.getAccessSpecifier() == AS_public;
if (PublicPath)
// Recurse on to each base subobject.
collectPublicBases(BaseDecl, SubobjectsSeen, VBases, PublicSubobjectsSeen,
static void getUnambiguousPublicSubobjects(
CXXRecordDecl *RD, llvm::SmallVectorImpl<CXXRecordDecl *> &Objects) {
llvm::DenseMap<CXXRecordDecl *, unsigned> SubobjectsSeen;
llvm::SmallSet<CXXRecordDecl *, 2> VBases;
llvm::SetVector<CXXRecordDecl *> PublicSubobjectsSeen;
SubobjectsSeen[RD] = 1;
collectPublicBases(RD, SubobjectsSeen, VBases, PublicSubobjectsSeen,
for (CXXRecordDecl *PublicSubobject : PublicSubobjectsSeen) {
// Skip ambiguous objects.
if (SubobjectsSeen[PublicSubobject] > 1)
/// CheckCXXThrowOperand - Validate the operand of a throw.
bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc,
QualType ExceptionObjectTy, Expr *E) {
// If the type of the exception would be an incomplete type or a pointer
// to an incomplete type other than (cv) void the program is ill-formed.
QualType Ty = ExceptionObjectTy;
bool isPointer = false;
if (const PointerType* Ptr = Ty->getAs<PointerType>()) {
Ty = Ptr->getPointeeType();
isPointer = true;
if (!isPointer || !Ty->isVoidType()) {
if (RequireCompleteType(ThrowLoc, Ty,
isPointer ? diag::err_throw_incomplete_ptr
: diag::err_throw_incomplete,
return true;
if (!isPointer && Ty->isSizelessType()) {
Diag(ThrowLoc, diag::err_throw_sizeless) << Ty << E->getSourceRange();
return true;
if (RequireNonAbstractType(ThrowLoc, ExceptionObjectTy,
diag::err_throw_abstract_type, E))
return true;
// If the exception has class type, we need additional handling.
CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
if (!RD)
return false;
// If we are throwing a polymorphic class type or pointer thereof,
// exception handling will make use of the vtable.
MarkVTableUsed(ThrowLoc, RD);
// If a pointer is thrown, the referenced object will not be destroyed.
if (isPointer)
return false;
// If the class has a destructor, we must be able to call it.
if (!RD->hasIrrelevantDestructor()) {
if (CXXDestructorDecl *Destructor = LookupDestructor(RD)) {
MarkFunctionReferenced(E->getExprLoc(), Destructor);
CheckDestructorAccess(E->getExprLoc(), Destructor,
PDiag(diag::err_access_dtor_exception) << Ty);
if (DiagnoseUseOfDecl(Destructor, E->getExprLoc()))
return true;
// The MSVC ABI creates a list of all types which can catch the exception
// object. This list also references the appropriate copy constructor to call
// if the object is caught by value and has a non-trivial copy constructor.
if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
// We are only interested in the public, unambiguous bases contained within
// the exception object. Bases which are ambiguous or otherwise
// inaccessible are not catchable types.
llvm::SmallVector<CXXRecordDecl *, 2> UnambiguousPublicSubobjects;
getUnambiguousPublicSubobjects(RD, UnambiguousPublicSubobjects);
for (CXXRecordDecl *Subobject : UnambiguousPublicSubobjects) {
// Attempt to lookup the copy constructor. Various pieces of machinery
// will spring into action, like template instantiation, which means this
// cannot be a simple walk of the class's decls. Instead, we must perform
// lookup and overload resolution.
CXXConstructorDecl *CD = LookupCopyingConstructor(Subobject, 0);
if (!CD || CD->isDeleted())
// Mark the constructor referenced as it is used by this throw expression.
MarkFunctionReferenced(E->getExprLoc(), CD);
// Skip this copy constructor if it is trivial, we don't need to record it
// in the catchable type data.
if (CD->isTrivial())
// The copy constructor is non-trivial, create a mapping from this class
// type to this constructor.
// N.B. The selection of copy constructor is not sensitive to this
// particular throw-site. Lookup will be performed at the catch-site to
// ensure that the copy constructor is, in fact, accessible (via
// friendship or any other means).
Context.addCopyConstructorForExceptionObject(Subobject, CD);
// We don't keep the instantiated default argument expressions around so
// we must rebuild them here.
for (unsigned I = 1, E = CD->getNumParams(); I != E; ++I) {
if (CheckCXXDefaultArgExpr(ThrowLoc, CD, CD->getParamDecl(I)))
return true;
// Under the Itanium C++ ABI, memory for the exception object is allocated by
// the runtime with no ability for the compiler to request additional
// alignment. Warn if the exception type requires alignment beyond the minimum
// guaranteed by the target C++ runtime.
if (Context.getTargetInfo().getCXXABI().isItaniumFamily()) {
CharUnits TypeAlign = Context.getTypeAlignInChars(Ty);
CharUnits ExnObjAlign = Context.getExnObjectAlignment();
if (ExnObjAlign < TypeAlign) {
Diag(ThrowLoc, diag::warn_throw_underaligned_obj);
Diag(ThrowLoc, diag::note_throw_underaligned_obj)
<< Ty << (unsigned)TypeAlign.getQuantity()
<< (unsigned)ExnObjAlign.getQuantity();
return false;
static QualType adjustCVQualifiersForCXXThisWithinLambda(
ArrayRef<FunctionScopeInfo *> FunctionScopes, QualType ThisTy,
DeclContext *CurSemaContext, ASTContext &ASTCtx) {
QualType ClassType = ThisTy->getPointeeType();
LambdaScopeInfo *CurLSI = nullptr;
DeclContext *CurDC = CurSemaContext;
// Iterate through the stack of lambdas starting from the innermost lambda to
// the outermost lambda, checking if '*this' is ever captured by copy - since
// that could change the cv-qualifiers of the '*this' object.
// The object referred to by '*this' starts out with the cv-qualifiers of its
// member function. We then start with the innermost lambda and iterate
// outward checking to see if any lambda performs a by-copy capture of '*this'
// - and if so, any nested lambda must respect the 'constness' of that
// capturing lamdbda's call operator.
// Since the FunctionScopeInfo stack is representative of the lexical
// nesting of the lambda expressions during initial parsing (and is the best
// place for querying information about captures about lambdas that are
// partially processed) and perhaps during instantiation of function templates
// that contain lambda expressions that need to be transformed BUT not
// necessarily during instantiation of a nested generic lambda's function call
// operator (which might even be instantiated at the end of the TU) - at which
// time the DeclContext tree is mature enough to query capture information
// reliably - we use a two pronged approach to walk through all the lexically
// enclosing lambda expressions:
// 1) Climb down the FunctionScopeInfo stack as long as each item represents
// a Lambda (i.e. LambdaScopeInfo) AND each LSI's 'closure-type' is lexically
// enclosed by the call-operator of the LSI below it on the stack (while
// tracking the enclosing DC for step 2 if needed). Note the topmost LSI on
// the stack represents the innermost lambda.
// 2) If we run out of enclosing LSI's, check if the enclosing DeclContext
// represents a lambda's call operator. If it does, we must be instantiating
// a generic lambda's call operator (represented by the Current LSI, and
// should be the only scenario where an inconsistency between the LSI and the
// DeclContext should occur), so climb out the DeclContexts if they
// represent lambdas, while querying the corresponding closure types
// regarding capture information.
// 1) Climb down the function scope info stack.
for (int I = FunctionScopes.size();
I-- && isa<LambdaScopeInfo>(FunctionScopes[I]) &&
(!CurLSI || !CurLSI->Lambda || CurLSI->Lambda->getDeclContext() ==
CurDC = getLambdaAwareParentOfDeclContext(CurDC)) {
CurLSI = cast<LambdaScopeInfo>(FunctionScopes[I]);
if (!CurLSI->isCXXThisCaptured())
auto C = CurLSI->getCXXThisCapture();
if (C.isCopyCapture()) {
if (CurLSI->CallOperator->isConst())
return ASTCtx.getPointerType(ClassType);
// 2) We've run out of ScopeInfos but check 1. if CurDC is a lambda (which
// can happen during instantiation of its nested generic lambda call
// operator); 2. if we're in a lambda scope (lambda body).
if (CurLSI && isLambdaCallOperator(CurDC)) {
assert(isGenericLambdaCallOperatorSpecialization(CurLSI->CallOperator) &&
"While computing 'this' capture-type for a generic lambda, when we "
"run out of enclosing LSI's, yet the enclosing DC is a "
"lambda-call-operator we must be (i.e. Current LSI) in a generic "
"lambda call oeprator");
assert(CurDC == getLambdaAwareParentOfDeclContext(CurLSI->CallOperator));
auto IsThisCaptured =
[](CXXRecordDecl *Closure, bool &IsByCopy, bool &IsConst) {
IsConst = false;
IsByCopy = false;
for (auto &&C : Closure->captures()) {
if (C.capturesThis()) {
if (C.getCaptureKind() == LCK_StarThis)
IsByCopy = true;
if (Closure->getLambdaCallOperator()->isConst())
IsConst = true;
return true;
return false;
bool IsByCopyCapture = false;
bool IsConstCapture = false;
CXXRecordDecl *Closure = cast<CXXRecordDecl>(CurDC->getParent());
while (Closure &&
IsThisCaptured(Closure, IsByCopyCapture, IsConstCapture)) {
if (IsByCopyCapture) {
if (IsConstCapture)
return ASTCtx.getPointerType(ClassType);
Closure = isLambdaCallOperator(Closure->getParent())
? cast<CXXRecordDecl>(Closure->getParent()->getParent())
: nullptr;
return ASTCtx.getPointerType(ClassType);
QualType Sema::getCurrentThisType() {
DeclContext *DC = getFunctionLevelDeclContext();
QualType ThisTy = CXXThisTypeOverride;
if (CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(DC)) {
if (method && method->isInstance())
ThisTy = method->getThisType();
if (ThisTy.isNull() && isLambdaCallOperator(CurContext) &&
inTemplateInstantiation() && isa<CXXRecordDecl>(DC)) {
// This is a lambda call operator that is being instantiated as a default
// initializer. DC must point to the enclosing class type, so we can recover
// the 'this' type from it.
QualType ClassTy = Context.getTypeDeclType(cast<CXXRecordDecl>(DC));
// There are no cv-qualifiers for 'this' within default initializers,
// per [expr.prim.general]p4.
ThisTy = Context.getPointerType(ClassTy);
// If we are within a lambda's call operator, the cv-qualifiers of 'this'
// might need to be adjusted if the lambda or any of its enclosing lambda's
// captures '*this' by copy.
if (!ThisTy.isNull() && isLambdaCallOperator(CurContext))
return adjustCVQualifiersForCXXThisWithinLambda(FunctionScopes, ThisTy,
CurContext, Context);
return ThisTy;
Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S,
Decl *ContextDecl,
Qualifiers CXXThisTypeQuals,
bool Enabled)
: S(S), OldCXXThisTypeOverride(S.CXXThisTypeOverride), Enabled(false)
if (!Enabled || !ContextDecl)
CXXRecordDecl *Record = nullptr;
if (ClassTemplateDecl *Template = dyn_cast<ClassTemplateDecl>(ContextDecl))
Record = Template->getTemplatedDecl();
Record = cast<CXXRecordDecl>(ContextDecl);
QualType T = S.Context.getRecordType(Record);
T = S.getASTContext().getQualifiedType(T, CXXThisTypeQuals);
S.CXXThisTypeOverride = S.Context.getPointerType(T);
this->Enabled = true;
Sema::CXXThisScopeRAII::~CXXThisScopeRAII() {
if (Enabled) {
S.CXXThisTypeOverride = OldCXXThisTypeOverride;
static void buildLambdaThisCaptureFixit(Sema &Sema, LambdaScopeInfo *LSI) {
SourceLocation DiagLoc = LSI->IntroducerRange.getEnd();
// [=, this] {}; // until C++20: Error: this when = is the default
if (LSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval &&
Sema.Diag(DiagLoc, diag::note_lambda_this_capture_fixit)
<< FixItHint::CreateInsertion(
DiagLoc, LSI->NumExplicitCaptures > 0 ? ", this" : "this");
bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
bool BuildAndDiagnose, const unsigned *const FunctionScopeIndexToStopAt,
const bool ByCopy) {
// We don't need to capture this in an unevaluated context.
if (isUnevaluatedContext() && !Explicit)
return true;
assert((!ByCopy || Explicit) && "cannot implicitly capture *this by value");
const int MaxFunctionScopesIndex = FunctionScopeIndexToStopAt
? *FunctionScopeIndexToStopAt
: FunctionScopes.size() - 1;
// Check that we can capture the *enclosing object* (referred to by '*this')
// by the capturing-entity/closure (lambda/block/etc) at
// MaxFunctionScopesIndex-deep on the FunctionScopes stack.
// Note: The *enclosing object* can only be captured by-value by a
// closure that is a lambda, using the explicit notation:
// [*this] { ... }.
// Every other capture of the *enclosing object* results in its by-reference
// capture.
// For a closure 'L' (at MaxFunctionScopesIndex in the FunctionScopes
// stack), we can capture the *enclosing object* only if:
// - 'L' has an explicit byref or byval capture of the *enclosing object*
// - or, 'L' has an implicit capture.
// AND
// -- there is no enclosing closure
// -- or, there is some enclosing closure 'E' that has already captured the
// *enclosing object*, and every intervening closure (if any) between 'E'
// and 'L' can implicitly capture the *enclosing object*.
// -- or, every enclosing closure can implicitly capture the
// *enclosing object*
unsigned NumCapturingClosures = 0;
for (int idx = MaxFunctionScopesIndex; idx >= 0; idx--) {
if (CapturingScopeInfo *CSI =
dyn_cast<CapturingScopeInfo>(FunctionScopes[idx])) {
if (CSI->CXXThisCaptureIndex != 0) {
// 'this' is already being captured; there isn't anything more to do.
CSI->Captures[CSI->CXXThisCaptureIndex - 1].markUsed(BuildAndDiagnose);
LambdaScopeInfo *LSI = dyn_cast<LambdaScopeInfo>(CSI);
if (LSI && isGenericLambdaCallOperatorSpecialization(LSI->CallOperator)) {
// This context can't implicitly capture 'this'; fail out.
if (BuildAndDiagnose) {
Diag(Loc, diag::err_this_capture)
<< (Explicit && idx == MaxFunctionScopesIndex);
if (!Explicit)
buildLambdaThisCaptureFixit(*this, LSI);
return true;
if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByref ||
CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval ||
CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_Block ||
CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_CapturedRegion ||
(Explicit && idx == MaxFunctionScopesIndex)) {
// Regarding (Explicit && idx == MaxFunctionScopesIndex): only the first
// iteration through can be an explicit capture, all enclosing closures,
// if any, must perform implicit captures.
// This closure can capture 'this'; continue looking upwards.
// This context can't implicitly capture 'this'; fail out.
if (BuildAndDiagnose)
Diag(Loc, diag::err_this_capture)
<< (Explicit && idx == MaxFunctionScopesIndex);
if (!Explicit)
buildLambdaThisCaptureFixit(*this, LSI);
return true;
if (!BuildAndDiagnose) return false;
// If we got here, then the closure at MaxFunctionScopesIndex on the
// FunctionScopes stack, can capture the *enclosing object*, so capture it
// (including implicit by-reference captures in any enclosing closures).
// In the loop below, respect the ByCopy flag only for the closure requesting
// the capture (i.e. first iteration through the loop below). Ignore it for
// all enclosing closure's up to NumCapturingClosures (since they must be
// implicitly capturing the *enclosing object* by reference (see loop
// above)).
assert((!ByCopy ||
dyn_cast<LambdaScopeInfo>(FunctionScopes[MaxFunctionScopesIndex])) &&
"Only a lambda can capture the enclosing object (referred to by "
"*this) by copy");
QualType ThisTy = getCurrentThisType();
for (int idx = MaxFunctionScopesIndex; NumCapturingClosures;
--idx, --NumCapturingClosures) {
CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FunctionScopes[idx]);
// The type of the corresponding data member (not a 'this' pointer if 'by
// copy').
QualType CaptureType = ThisTy;
if (ByCopy) {
// If we are capturing the object referred to by '*this' by copy, ignore
// any cv qualifiers inherited from the type of the member function for
// the type of the closure-type's corresponding data member and any use
// of 'this'.
CaptureType = ThisTy->getPointeeType();
bool isNested = NumCapturingClosures > 1;
CSI->addThisCapture(isNested, Loc, CaptureType, ByCopy);
return false;
ExprResult Sema::ActOnCXXThis(SourceLocation Loc) {
/// C++ 9.3.2: In the body of a non-static member function, the keyword this
/// is a non-lvalue expression whose value is the address of the object for
/// which the function is called.
QualType ThisTy = getCurrentThisType();
if (ThisTy.isNull())
return Diag(Loc, diag::err_invalid_this_use);
return BuildCXXThisExpr(Loc, ThisTy, /*IsImplicit=*/false);
Expr *Sema::BuildCXXThisExpr(SourceLocation Loc, QualType Type,
bool IsImplicit) {
auto *This = new (Context) CXXThisExpr(Loc, Type, IsImplicit);
return This;
void Sema::MarkThisReferenced(CXXThisExpr *This) {
bool Sema::isThisOutsideMemberFunctionBody(QualType BaseType) {
// If we're outside the body of a member function, then we'll have a specified
// type for 'this'.
if (CXXThisTypeOverride.isNull())
return false;
// Determine whether we're looking into a class that's currently being
// defined.
CXXRecordDecl *Class = BaseType->getAsCXXRecordDecl();
return Class && Class->isBeingDefined();
/// Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
Sema::ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization) {
if (!TypeRep)
return ExprError();
TypeSourceInfo *TInfo;
QualType Ty = GetTypeFromParser(TypeRep, &TInfo);
if (!TInfo)
TInfo = Context.getTrivialTypeSourceInfo(Ty, SourceLocation());
auto Result = BuildCXXTypeConstructExpr(TInfo, LParenOrBraceLoc, exprs,
RParenOrBraceLoc, ListInitialization);
// Avoid creating a non-type-dependent expression that contains typos.
// Non-type-dependent expressions are liable to be discarded without
// checking for embedded typos.
if (!Result.isInvalid() && Result.get()->isInstantiationDependent() &&
Result = CorrectDelayedTyposInExpr(Result.get());
else if (Result.isInvalid())
Result = CreateRecoveryExpr(TInfo->getTypeLoc().getBeginLoc(),
RParenOrBraceLoc, exprs, Ty);
return Result;
Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization) {
QualType Ty = TInfo->getType();
SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc();
assert((!ListInitialization ||
(Exprs.size() == 1 && isa<InitListExpr>(Exprs[0]))) &&
"List initialization must have initializer list as expression.");
SourceRange FullRange = SourceRange(TyBeginLoc, RParenOrBraceLoc);
InitializedEntity Entity =
InitializedEntity::InitializeTemporary(Context, TInfo);
InitializationKind Kind =
? ListInitialization
? InitializationKind::CreateDirectList(
TyBeginLoc, LParenOrBraceLoc, RParenOrBraceLoc)
: InitializationKind::CreateDirect(TyBeginLoc, LParenOrBraceLoc,
: InitializationKind::CreateValue(TyBeginLoc, LParenOrBraceLoc,
// C++1z [expr.type.conv]p1:
// If the type is a placeholder for a deduced class type, [...perform class
// template argument deduction...]
DeducedType *Deduced = Ty->getContainedDeducedType();
if (Deduced && isa<DeducedTemplateSpecializationType>(Deduced)) {
Ty = DeduceTemplateSpecializationFromInitializer(TInfo, Entity,
Kind, Exprs);
if (Ty.isNull())
return ExprError();
Entity = InitializedEntity::InitializeTemporary(TInfo, Ty);
if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(Exprs)) {
// FIXME: CXXUnresolvedConstructExpr does not model list-initialization
// directly. We work around this by dropping the locations of the braces.
SourceRange Locs = ListInitialization
? SourceRange()
: SourceRange(LParenOrBraceLoc, RParenOrBraceLoc);
return CXXUnresolvedConstructExpr::Create(Context, Ty.getNonReferenceType(),
TInfo, Locs.getBegin(), Exprs,
// C++ [expr.type.conv]p1:
// If the expression list is a parenthesized single expression, the type
// conversion expression is equivalent (in definedness, and if defined in
// meaning) to the corresponding cast expression.
if (Exprs.size() == 1 && !ListInitialization &&
!isa<InitListExpr>(Exprs[0])) {
Expr *Arg = Exprs[0];
return BuildCXXFunctionalCastExpr(TInfo, Ty, LParenOrBraceLoc, Arg,
// For an expression of the form T(), T shall not be an array type.
QualType ElemTy = Ty;
if (Ty->isArrayType()) {
if (!ListInitialization)
return ExprError(Diag(TyBeginLoc, diag::err_value_init_for_array_type)
<< FullRange);
ElemTy = Context.getBaseElementType(Ty);
// Only construct objects with object types.
// There doesn't seem to be an explicit rule for this but functions are
// not objects, so they cannot take initializers.
if (Ty->isFunctionType())
return ExprError(Diag(TyBeginLoc, diag::err_init_for_function_type)
<< Ty << FullRange);
// C++17 [expr.type.conv]p2:
// If the type is cv void and the initializer is (), the expression is a
// prvalue of the specified type that performs no initialization.
if (!Ty->isVoidType() &&
RequireCompleteType(TyBeginLoc, ElemTy,
diag::err_invalid_incomplete_type_use, FullRange))
return ExprError();
// Otherwise, the expression is a prvalue of the specified type whose
// result object is direct-initialized (11.6) with the initializer.
InitializationSequence InitSeq(*this, Entity, Kind, Exprs);
ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Exprs);
if (Result.isInvalid())
return Result;
Expr *Inner = Result.get();
if (CXXBindTemporaryExpr *BTE = dyn_cast_or_null<CXXBindTemporaryExpr>(Inner))
Inner = BTE->getSubExpr();
if (!isa<CXXTemporaryObjectExpr>(Inner) &&
!isa<CXXScalarValueInitExpr>(Inner)) {
// If we created a CXXTemporaryObjectExpr, that node also represents the
// functional cast. Otherwise, create an explicit cast to represent
// the syntactic form of a functional-style cast that was used here.
// FIXME: Creating a CXXFunctionalCastExpr around a CXXConstructExpr
// would give a more consistent AST representation than using a
// CXXTemporaryObjectExpr. It's also weird that the functional cast
// is sometimes handled by initialization and sometimes not.
QualType ResultType = Result.get()->getType();
SourceRange Locs = ListInitialization
? SourceRange()
: SourceRange(LParenOrBraceLoc, RParenOrBraceLoc);
Result = CXXFunctionalCastExpr::Create(
Context, ResultType, Expr::getValueKindForType(Ty), TInfo, CK_NoOp,
Result.get(), /*Path=*/nullptr, CurFPFeatureOverrides(),
Locs.getBegin(), Locs.getEnd());
return Result;
bool Sema::isUsualDeallocationFunction(const CXXMethodDecl *Method) {
// [CUDA] Ignore this function, if we can't call it.
const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
if (getLangOpts().CUDA) {
auto CallPreference = IdentifyCUDAPreference(Caller, Method);
// If it's not callable at all, it's not the right function.
if (CallPreference < CFP_WrongSide)
return false;
if (CallPreference == CFP_WrongSide) {
// Maybe. We have to check if there are better alternatives.
DeclContext::lookup_result R =
for (const auto *D : R) {
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
if (IdentifyCUDAPreference(Caller, FD) > CFP_WrongSide)
return false;
// We've found no better variants.
SmallVector<const FunctionDecl*, 4> PreventedBy;
bool Result = Method->isUsualDeallocationFunction(PreventedBy);
if (Result || !getLangOpts().CUDA || PreventedBy.empty())
return Result;
// In case of CUDA, return true if none of the 1-argument deallocator
// functions are actually callable.
return llvm::none_of(PreventedBy, [&](const FunctionDecl *FD) {
assert(FD->getNumParams() == 1 &&
"Only single-operand functions should be in PreventedBy");
return IdentifyCUDAPreference(Caller, FD) >= CFP_HostDevice;
/// Determine whether the given function is a non-placement
/// deallocation function.
static bool isNonPlacementDeallocationFunction(Sema &S, FunctionDecl *FD) {
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FD))
return S.isUsualDeallocationFunction(Method);
if (FD->getOverloadedOperator() != OO_Delete &&
FD->getOverloadedOperator() != OO_Array_Delete)
return false;
unsigned UsualParams = 1;
if (S.getLangOpts().SizedDeallocation && UsualParams < FD->getNumParams() &&
if (S.getLangOpts().AlignedAllocation && UsualParams < FD->getNumParams() &&
return UsualParams == FD->getNumParams();
namespace {
struct UsualDeallocFnInfo {
UsualDeallocFnInfo() : Found(), FD(nullptr) {}
UsualDeallocFnInfo(Sema &S, DeclAccessPair Found)
: Found(Found), FD(dyn_cast<FunctionDecl>(Found->getUnderlyingDecl())),
Destroying(false), HasSizeT(false), HasAlignValT(false),
CUDAPref(Sema::CFP_Native) {
// A function template declaration is never a usual deallocation function.
if (!FD)
unsigned NumBaseParams = 1;
if (FD->isDestroyingOperatorDelete()) {
Destroying = true;
if (NumBaseParams < FD->getNumParams() &&
S.Context.getSizeType())) {
HasSizeT = true;
if (NumBaseParams < FD->getNumParams() &&
FD->getParamDecl(NumBaseParams)->getType()->isAlignValT()) {
HasAlignValT = true;
// In CUDA, determine how much we'd like / dislike to call this.
if (S.getLangOpts().CUDA)
if (auto *Caller = dyn_cast<FunctionDecl>(S.CurContext))
CUDAPref = S.IdentifyCUDAPreference(Caller, FD);
explicit operator bool() const { return FD; }
bool isBetterThan(const UsualDeallocFnInfo &Other, bool WantSize,
bool WantAlign) const {
// C++ P0722:
// A destroying operator delete is preferred over a non-destroying
// operator delete.
if (Destroying != Other.Destroying)
return Destroying;
// C++17 [expr.delete]p10:
// If the type has new-extended alignment, a function with a parameter
// of type std::align_val_t is preferred; otherwise a function without
// such a parameter is preferred
if (HasAlignValT != Other.HasAlignValT)
return HasAlignValT == WantAlign;
if (HasSizeT != Other.HasSizeT)
return HasSizeT == WantSize;
// Use CUDA call preference as a tiebreaker.
return CUDAPref > Other.CUDAPref;
DeclAccessPair Found;
FunctionDecl *FD;
bool Destroying, HasSizeT, HasAlignValT;
Sema::CUDAFunctionPreference CUDAPref;
/// Determine whether a type has new-extended alignment. This may be called when
/// the type is incomplete (for a delete-expression with an incomplete pointee
/// type), in which case it will conservatively return false if the alignment is
/// not known.
static bool hasNewExtendedAlignment(Sema &S, QualType AllocType) {
return S.getLangOpts().AlignedAllocation &&
S.getASTContext().getTypeAlignIfKnown(AllocType) >
/// Select the correct "usual" deallocation function to use from a selection of
/// deallocation functions (either global or class-scope).
static UsualDeallocFnInfo resolveDeallocationOverload(
Sema &S, LookupResult &R, bool WantSize, bool WantAlign,
llvm::SmallVectorImpl<UsualDeallocFnInfo> *BestFns = nullptr) {
UsualDeallocFnInfo Best;
for (auto I = R.begin(), E = R.end(); I != E; ++I) {
UsualDeallocFnInfo Info(S, I.getPair());
if (!Info || !isNonPlacementDeallocationFunction(S, Info.FD) ||
Info.CUDAPref == Sema::CFP_Never)
if (!Best) {
Best = Info;
if (BestFns)
if (Best.isBetterThan(Info, WantSize, WantAlign))
// If more than one preferred function is found, all non-preferred
// functions are eliminated from further consideration.
if (BestFns && Info.isBetterThan(Best, WantSize, WantAlign))
Best = Info;
if (BestFns)
return Best;
/// Determine whether a given type is a class for which 'delete[]' would call
/// a member 'operator delete[]' with a 'size_t' parameter. This implies that
/// we need to store the array size (even if the type is
/// trivially-destructible).
static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
QualType allocType) {
const RecordType *record =
if (!record) return false;
// Try to find an operator delete[] in class scope.
DeclarationName deleteName =
LookupResult ops(S, deleteName, loc, Sema::LookupOrdinaryName);
S.LookupQualifiedName(ops, record->getDecl());
// We're just doing this for information.
// Very likely: there's no operator delete[].
if (ops.empty()) return false;
// If it's ambiguous, it should be illegal to call operator delete[]
// on this thing, so it doesn't matter if we allocate extra space or not.
if (ops.isAmbiguous()) return false;
// C++17 [expr.delete]p10:
// If the deallocation functions have class scope, the one without a
// parameter of type std::size_t is selected.
auto Best = resolveDeallocationOverload(
S, ops, /*WantSize*/false,
/*WantAlign*/hasNewExtendedAlignment(S, allocType));
return Best && Best.HasSizeT;
/// Parsed a C++ 'new' expression (C++ 5.3.4).
/// E.g.:
/// @code new (memory) int[size][4] @endcode
/// or
/// @code ::new Foo(23, "hello") @endcode
/// \param StartLoc The first location of the expression.
/// \param UseGlobal True if 'new' was prefixed with '::'.
/// \param PlacementLParen Opening paren of the placement arguments.
/// \param PlacementArgs Placement new arguments.
/// \param PlacementRParen Closing paren of the placement arguments.
/// \param TypeIdParens If the type is in parens, the source range.
/// \param D The type to be allocated, as well as array dimensions.
/// \param Initializer The initializing expression or initializer-list, or null
/// if there is none.
Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen, MultiExprArg PlacementArgs,
SourceLocation PlacementRParen, SourceRange TypeIdParens,
Declarator &D, Expr *Initializer) {
Optional<Expr *> ArraySize;
// If the specified type is an array, unwrap it and save the expression.
if (D.getNumTypeObjects() > 0 &&
D.getTypeObject(0).Kind == DeclaratorChunk::Array) {
DeclaratorChunk &Chunk = D.getTypeObject(0);
if (D.getDeclSpec().hasAutoTypeSpec())
return ExprError(Diag(Chunk.Loc, diag::err_new_array_of_auto)
<< D.getSourceRange());
if (Chunk.Arr.hasStatic)
return ExprError(Diag(Chunk.Loc, diag::err_static_illegal_in_new)
<< D.getSourceRange());
if (!Chunk.Arr.NumElts && !Initializer)
return ExprError(Diag(Chunk.Loc, diag::err_array_new_needs_size)
<< D.getSourceRange());
ArraySize = static_cast<Expr*>(Chunk.Arr.NumElts);
// Every dimension shall be of constant size.
if (ArraySize) {
for (unsigned I = 0, N = D.getNumTypeObjects(); I < N; ++I) {
if (D.getTypeObject(I).Kind != DeclaratorChunk::Array)
DeclaratorChunk::ArrayTypeInfo &Array = D.getTypeObject(I).Arr;
if (Expr *NumElts = (Expr *)Array.NumElts) {
if (!NumElts->isTypeDependent() && !NumElts->isValueDependent()) {
// FIXME: GCC permits constant folding here. We should either do so consistently
// or not do so at all, rather than changing behavior in C++14 onwards.
if (getLangOpts().CPlusPlus14) {
// C++1y []p6: Every constant-expression in a noptr-new-declarator
// shall be a converted constant expression (5.19) of type std::size_t
// and shall evaluate to a strictly positive value.
llvm::APSInt Value(Context.getIntWidth(Context.getSizeType()));
= CheckConvertedConstantExpression(NumElts, Context.getSizeType(), Value,
} else {
Array.NumElts =
NumElts, nullptr, diag::err_new_array_nonconst, AllowFold)
if (!Array.NumElts)
return ExprError();
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, /*Scope=*/nullptr);
QualType AllocType = TInfo->getType();
if (D.isInvalidType())
return ExprError();
SourceRange DirectInitRange;
if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer))
DirectInitRange = List->getSourceRange();
return BuildCXXNew(SourceRange(StartLoc, D.getEndLoc()), UseGlobal,
PlacementLParen, PlacementArgs, PlacementRParen,
TypeIdParens, AllocType, TInfo, ArraySize, DirectInitRange,
static bool isLegalArrayNewInitializer(CXXNewExpr::InitializationStyle Style,
Expr *Init) {
if (!Init)
return true;
if (ParenListExpr *PLE = dyn_cast<ParenListExpr>(Init))
return PLE->getNumExprs() == 0;
if (isa<ImplicitValueInitExpr>(Init))
return true;
else if (CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init))
return !CCE->isListInitialization() &&
else if (Style == CXXNewExpr::ListInit) {
assert(isa<InitListExpr>(Init) &&
"Shouldn't create list CXXConstructExprs for arrays.");
return true;
return false;
Sema::isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const {
if (!getLangOpts().AlignedAllocationUnavailable)
return false;
if (FD.isDefined())
return false;
Optional<unsigned> AlignmentParam;
if (FD.isReplaceableGlobalAllocationFunction(&AlignmentParam) &&
return true;
return false;
// Emit a diagnostic if an aligned allocation/deallocation function that is not
// implemented in the standard library is selected.
void Sema::diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc) {
if (isUnavailableAlignedAllocationFunction(FD)) {
const llvm::Triple &T = getASTContext().getTargetInfo().getTriple();
StringRef OSName = AvailabilityAttr::getPlatformNameSourceSpelling(
VersionTuple OSVersion = alignedAllocMinVersion(T.getOS());
OverloadedOperatorKind Kind = FD.getDeclName().getCXXOverloadedOperator();
bool IsDelete = Kind == OO_Delete || Kind == OO_Array_Delete;
Diag(Loc, diag::err_aligned_allocation_unavailable)
<< IsDelete << FD.getType().getAsString() << OSName
<< OSVersion.getAsString() << OSVersion.empty();
Diag(Loc, diag::note_silence_aligned_allocation_unavailable);
Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer) {
SourceRange TypeRange = AllocTypeInfo->getTypeLoc().getSourceRange();
SourceLocation StartLoc = Range.getBegin();
CXXNewExpr::InitializationStyle initStyle;
if (DirectInitRange.isValid()) {
assert(Initializer && "Have parens but no initializer.");
initStyle = CXXNewExpr::CallInit;
} else if (Initializer && isa<InitListExpr>(Initializer))
initStyle = CXXNewExpr::ListInit;
else {
assert((!Initializer || isa<ImplicitValueInitExpr>(Initializer) ||
isa<CXXConstructExpr>(Initializer)) &&
"Initializer expression that cannot have been implicitly created.");
initStyle = CXXNewExpr::NoInit;
Expr **Inits = &Initializer;
unsigned NumInits = Initializer ? 1 : 0;
if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer)) {
assert(initStyle == CXXNewExpr::CallInit && "paren init for non-call init");
Inits = List->getExprs();
NumInits = List->getNumExprs();
// C++11 []p15:
// A new-expression that creates an object of type T initializes that
// object as follows:
InitializationKind Kind
// - If the new-initializer is omitted, the object is default-
// initialized (8.5); if no initialization is performed,
// the object has indeterminate value
= initStyle == CXXNewExpr::NoInit
? InitializationKind::CreateDefault(TypeRange.getBegin())
// - Otherwise, the new-initializer is interpreted according to
// the
// initialization rules of 8.5 for direct-initialization.
: initStyle == CXXNewExpr::ListInit
? InitializationKind::CreateDirectList(
TypeRange.getBegin(), Initializer->getBeginLoc(),
: InitializationKind::CreateDirect(TypeRange.getBegin(),
// C++11 []p6. Deduce the type which 'auto' stands in for.
auto *Deduced = AllocType->getContainedDeducedType();
if (Deduced && isa<DeducedTemplateSpecializationType>(Deduced)) {
if (ArraySize)
return ExprError(
Diag(*ArraySize ? (*ArraySize)->getExprLoc() : TypeRange.getBegin(),
<< /*array*/ 2
<< (*ArraySize ? (*ArraySize)->getSourceRange() : TypeRange));
InitializedEntity Entity
= InitializedEntity::InitializeNew(StartLoc, AllocType);
AllocType = DeduceTemplateSpecializationFromInitializer(
AllocTypeInfo, Entity, Kind, MultiExprArg(Inits, NumInits));
if (AllocType.isNull())
return ExprError();
} else if (Deduced) {
bool Braced = (initStyle == CXXNewExpr::ListInit);
if (NumInits == 1) {
if (auto p = dyn_cast_or_null<InitListExpr>(Inits[0])) {
Inits = p->getInits();
NumInits = p->getNumInits();
Braced = true;
if (initStyle == CXXNewExpr::NoInit || NumInits == 0)
return ExprError(Diag(StartLoc, diag::err_auto_new_requires_ctor_arg)
<< AllocType << TypeRange);
if (NumInits > 1) {
Expr *FirstBad = Inits[1];
return ExprError(Diag(FirstBad->getBeginLoc(),
<< AllocType << TypeRange);
if (Braced && !getLangOpts().CPlusPlus17)
Diag(Initializer->getBeginLoc(), diag::ext_auto_new_list_init)
<< AllocType << TypeRange;
Expr *Deduce = Inits[0];
QualType DeducedType;
if (DeduceAutoType(AllocTypeInfo, Deduce, DeducedType) == DAR_Failed)
return ExprError(Diag(StartLoc, diag::err_auto_new_deduction_failure)
<< AllocType << Deduce->getType()
<< TypeRange << Deduce->getSourceRange());
if (DeducedType.isNull())
return ExprError();
AllocType = DeducedType;
// Per C++0x []p5, the type being constructed may be a
// typedef of an array type.
if (!ArraySize) {
if (const ConstantArrayType *Array
= Context.getAsConstantArrayType(AllocType)) {
ArraySize = IntegerLiteral::Create(Context, Array->getSize(),
AllocType = Array->getElementType();
if (CheckAllocatedType(AllocType, TypeRange.getBegin(), TypeRange))
return ExprError();
// In ARC, infer 'retaining' for the allocated
if (getLangOpts().ObjCAutoRefCount &&
AllocType.getObjCLifetime() == Qualifiers::OCL_None &&
AllocType->isObjCLifetimeType()) {
AllocType = Context.getLifetimeQualifiedType(AllocType,
QualType ResultType = Context.getPointerType(AllocType);
if (ArraySize && *ArraySize &&
(*ArraySize)->getType()->isNonOverloadPlaceholderType()) {
ExprResult result = CheckPlaceholderExpr(*ArraySize);
if (result.isInvalid()) return ExprError();
ArraySize = result.get();
// C++98 5.3.4p6: "The expression in a direct-new-declarator shall have
// integral or enumeration type with a non-negative value."
// C++11 []p6: The expression [...] shall be of integral or unscoped
// enumeration type, or a class type for which a single non-explicit
// conversion function to integral or unscoped enumeration type exists.
// C++1y []p6: The expression [...] is implicitly converted to
// std::size_t.
llvm::Optional<uint64_t> KnownArraySize;
if (ArraySize && *ArraySize && !(*ArraySize)->isTypeDependent()) {
ExprResult ConvertedSize;
if (getLangOpts().CPlusPlus14) {
assert(Context.getTargetInfo().getIntWidth() && "Builtin type of size 0?");
ConvertedSize = PerformImplicitConversion(*ArraySize, Context.getSizeType(),
if (!ConvertedSize.isInvalid() &&
// Diagnose the compatibility of this conversion.
Diag(StartLoc, diag::warn_cxx98_compat_array_size_conversion)
<< (*ArraySize)->getType() << 0 << "'size_t'";
} else {
class SizeConvertDiagnoser : public ICEConvertDiagnoser {
Expr *ArraySize;
SizeConvertDiagnoser(Expr *ArraySize)
: ICEConvertDiagnoser(/*AllowScopedEnumerations*/false, false, false),
ArraySize(ArraySize) {}
SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
QualType T) override {
return S.Diag(Loc, diag::err_array_size_not_integral)
<< S.getLangOpts().CPlusPlus11 << T;
SemaDiagnosticBuilder diagnoseIncomplete(
Sema &S, SourceLocation Loc, QualType T) override {
return S.Diag(Loc, diag::err_array_size_incomplete_type)
<< T << ArraySize->getSourceRange();
SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
return S.Diag(Loc, diag::err_array_size_explicit_conversion) << T << ConvTy;
SemaDiagnosticBuilder noteExplicitConv(
Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
return S.Diag(Conv->getLocation(), diag::note_array_size_conversion)
<< ConvTy->isEnumeralType() << ConvTy;
SemaDiagnosticBuilder diagnoseAmbiguous(
Sema &S, SourceLocation Loc, QualType T) override {
return S.Diag(Loc, diag::err_array_size_ambiguous_conversion) << T;
SemaDiagnosticBuilder noteAmbiguous(
Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
return S.Diag(Conv->getLocation(), diag::note_array_size_conversion)
<< ConvTy->isEnumeralType() << ConvTy;
SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
QualType T,
QualType ConvTy) override {
return S.Diag(Loc,
? diag::warn_cxx98_compat_array_size_conversion
: diag::ext_array_size_conversion)
<< T << ConvTy->isEnumeralType() << ConvTy;
} SizeDiagnoser(*ArraySize);
ConvertedSize = PerformContextualImplicitConversion(StartLoc, *ArraySize,
if (ConvertedSize.isInvalid())
return ExprError();
ArraySize = ConvertedSize.get();
QualType SizeType = (*ArraySize)->getType();
if (!SizeType->isIntegralOrUnscopedEnumerationType())
return ExprError();
// C++98 []p7:
// The expression in a direct-new-declarator shall have integral type
// with a non-negative value.
// Let's see if this is a constant < 0. If so, we reject it out of hand,
// per CWG1464. Otherwise, if it's not a constant, we must have an
// unparenthesized array type.
// We've already performed any required implicit conversion to integer or
// unscoped enumeration type.
// FIXME: Per CWG1464, we are required to check the value prior to
// converting to size_t. This will never find a negative array size in
// C++14 onwards, because Value is always unsigned here!
if (Optional<llvm::APSInt> Value =
(*ArraySize)->getIntegerConstantExpr(Context)) {
if (Value->isSigned() && Value->isNegative()) {
return ExprError(Diag((*ArraySize)->getBeginLoc(),
<< (*ArraySize)->getSourceRange());
if (!AllocType->isDependentType()) {
unsigned ActiveSizeBits =
ConstantArrayType::getNumAddressingBits(Context, AllocType, *Value);
if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context))
return ExprError(
Diag((*ArraySize)->getBeginLoc(), diag::err_array_too_large)
<< toString(*Value, 10) << (*ArraySize)->getSourceRange());
KnownArraySize = Value->getZExtValue();
} else if (TypeIdParens.isValid()) {
// Can't have dynamic array size when the type-id is in parentheses.
Diag((*ArraySize)->getBeginLoc(), diag::ext_new_paren_array_nonconst)
<< (*ArraySize)->getSourceRange()
<< FixItHint::CreateRemoval(TypeIdParens.getBegin())
<< FixItHint::CreateRemoval(TypeIdParens.getEnd());
TypeIdParens = SourceRange();
// Note that we do *not* convert the argument in any way. It can
// be signed, larger than size_t, whatever.
FunctionDecl *OperatorNew = nullptr;
FunctionDecl *OperatorDelete = nullptr;
unsigned Alignment =
AllocType->isDependentType() ? 0 : Context.getTypeAlign(AllocType);
unsigned NewAlignment = Context.getTargetInfo().getNewAlign();
bool PassAlignment = getLangOpts().AlignedAllocation &&
Alignment > NewAlignment;
AllocationFunctionScope Scope = UseGlobal ? AFS_Global : AFS_Both;
if (!AllocType->isDependentType() &&
!Expr::hasAnyTypeDependentArguments(PlacementArgs) &&
StartLoc, SourceRange(PlacementLParen, PlacementRParen), Scope, Scope,
AllocType, ArraySize.hasValue(), PassAlignment, PlacementArgs,
OperatorNew, OperatorDelete))
return ExprError();
// If this is an array allocation, compute whether the usual array
// deallocation function for the type has a size_t parameter.
bool UsualArrayDeleteWantsSize = false;
if (ArraySize && !AllocType->isDependentType())
UsualArrayDeleteWantsSize =
doesUsualArrayDeleteWantSize(*this, StartLoc, AllocType);
SmallVector<Expr *, 8> AllPlaceArgs;
if (OperatorNew) {
auto *Proto = OperatorNew->getType()->castAs<FunctionProtoType>();
VariadicCallType CallType = Proto->isVariadic() ? VariadicFunction
: VariadicDoesNotApply;
// We've already converted the placement args, just fill in any default
// arguments. Skip the first parameter because we don't have a corresponding
// argument. Skip the second parameter too if we're passing in the
// alignment; we've already filled it in.
unsigned NumImplicitArgs = PassAlignment ? 2 : 1;
if (GatherArgumentsForCall(PlacementLParen, OperatorNew, Proto,
NumImplicitArgs, PlacementArgs, AllPlaceArgs,
return ExprError();
if (!AllPlaceArgs.empty())
PlacementArgs = AllPlaceArgs;
// We would like to perform some checking on the given `operator new` call,
// but the PlacementArgs does not contain the implicit arguments,
// namely allocation size and maybe allocation alignment,
// so we need to conjure them.
QualType SizeTy = Context.getSizeType();
unsigned SizeTyWidth = Context.getTypeSize(SizeTy);
llvm::APInt SingleEltSize(
SizeTyWidth, Context.getTypeSizeInChars(AllocType).getQuantity());
// How many bytes do we want to allocate here?
llvm::Optional<llvm::APInt> AllocationSize;
if (!ArraySize.hasValue() && !AllocType->isDependentType()) {
// For non-array operator new, we only want to allocate one element.
AllocationSize = SingleEltSize;
} else if (KnownArraySize.hasValue() && !AllocType->isDependentType()) {
// For array operator new, only deal with static array size case.
bool Overflow;
AllocationSize = llvm::APInt(SizeTyWidth, *KnownArraySize)
.umul_ov(SingleEltSize, Overflow);
!Overflow &&
"Expected that all the overflows would have been handled already.");
IntegerLiteral AllocationSizeLiteral(
Context, AllocationSize.getValueOr(llvm::APInt::getZero(SizeTyWidth)),
SizeTy, SourceLocation());
// Otherwise, if we failed to constant-fold the allocation size, we'll
// just give up and pass-in something opaque, that isn't a null pointer.
OpaqueValueExpr OpaqueAllocationSize(SourceLocation(), SizeTy, VK_PRValue,
OK_Ordinary, /*SourceExpr=*/nullptr);
// Let's synthesize the alignment argument in case we will need it.
// Since we *really* want to allocate these on stack, this is slightly ugly
// because there might not be a `std::align_val_t` type.
EnumDecl *StdAlignValT = getStdAlignValT();
QualType AlignValT =
StdAlignValT ? Context.getTypeDeclType(StdAlignValT) : SizeTy;
IntegerLiteral AlignmentLiteral(
Alignment / Context.getCharWidth()),
SizeTy, SourceLocation());
ImplicitCastExpr DesiredAlignment(ImplicitCastExpr::OnStack, AlignValT,
CK_IntegralCast, &AlignmentLiteral,
VK_PRValue, FPOptionsOverride());
// Adjust placement args by prepending conjured size and alignment exprs.
llvm::SmallVector<Expr *, 8> CallArgs;
CallArgs.reserve(NumImplicitArgs + PlacementArgs.size());
? static_cast<Expr *>(&AllocationSizeLiteral)
: &OpaqueAllocationSize);
if (PassAlignment)
CallArgs.insert(CallArgs.end(), PlacementArgs.begin(), PlacementArgs.end());
DiagnoseSentinelCalls(OperatorNew, PlacementLParen, CallArgs);
checkCall(OperatorNew, Proto, /*ThisArg=*/nullptr, CallArgs,
/*IsMemberFunction=*/false, StartLoc, Range, CallType);
// Warn if the type is over-aligned and is being allocated by (unaligned)
// global operator new.
if (PlacementArgs.empty() && !PassAlignment &&
(OperatorNew->isImplicit() ||
(OperatorNew->getBeginLoc().isValid() &&
getSourceManager().isInSystemHeader(OperatorNew->getBeginLoc())))) {
if (Alignment > NewAlignment)
Diag(StartLoc, diag::warn_overaligned_type)
<< AllocType
<< unsigned(Alignment / Context.getCharWidth())
<< unsigned(NewAlignment / Context.getCharWidth());
// Array 'new' can't have any initializers except empty parentheses.
// Initializer lists are also allowed, in C++11. Rely on the parser for the
// dialect distinction.
if (ArraySize && !isLegalArrayNewInitializer(initStyle, Initializer)) {
SourceRange InitRange(Inits[0]->getBeginLoc(),
Inits[NumInits - 1]->getEndLoc());
Diag(StartLoc, diag::err_new_array_init_args) << InitRange;
return ExprError();
// If we can perform the initialization, and we've not already done so,
// do it now.
if (!AllocType->isDependentType() &&
llvm::makeArrayRef(Inits, NumInits))) {
// The type we initialize is the complete type, including the array bound.
QualType InitType;
if (KnownArraySize)
InitType = Context.getConstantArrayType(
*ArraySize, ArrayType::Normal, 0);
else if (ArraySize)
InitType =
Context.getIncompleteArrayType(AllocType, ArrayType::Normal, 0);
InitType = AllocType;
InitializedEntity Entity
= InitializedEntity::InitializeNew(StartLoc, InitType);
InitializationSequence InitSeq(*this, Entity, Kind,
MultiExprArg(Inits, NumInits));
ExprResult FullInit = InitSeq.Perform(*this, Entity, Kind,
MultiExprArg(Inits, NumInits));
if (FullInit.isInvalid())
return ExprError();
// FullInit is our initializer; strip off CXXBindTemporaryExprs, because
// we don't want the initialized object to be destructed.
// FIXME: We should not create these in the first place.
if (CXXBindTemporaryExpr *Binder =
FullInit = Binder->getSubExpr();
Initializer = FullInit.get();
// FIXME: If we have a KnownArraySize, check that the array bound of the
// initializer is no greater than that constant value.
if (ArraySize && !*ArraySize) {
auto *CAT = Context.getAsConstantArrayType(Initializer->getType());
if (CAT) {
// FIXME: Track that the array size was inferred rather than explicitly
// specified.
ArraySize = IntegerLiteral::Create(
Context, CAT->getSize(), Context.getSizeType(), TypeRange.getEnd());
} else {
Diag(TypeRange.getEnd(), diag::err_new_array_size_unknown_from_init)
<< Initializer->getSourceRange();
// Mark the new and delete operators as referenced.
if (OperatorNew) {
if (DiagnoseUseOfDecl(OperatorNew, StartLoc))
return ExprError();
MarkFunctionReferenced(StartLoc, OperatorNew);
if (OperatorDelete) {
if (DiagnoseUseOfDecl(OperatorDelete, StartLoc))
return ExprError();
MarkFunctionReferenced(StartLoc, OperatorDelete);
return CXXNewExpr::Create(Context, UseGlobal, OperatorNew, OperatorDelete,
PassAlignment, UsualArrayDeleteWantsSize,
PlacementArgs, TypeIdParens, ArraySize, initStyle,
Initializer, ResultType, AllocTypeInfo, Range,
/// Checks that a type is suitable as the allocated type
/// in a new-expression.
bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R) {
// C++ 5.3.4p1: "[The] type shall be a complete object type, but not an
// abstract class type or array thereof.
if (AllocType->isFunctionType())
return Diag(Loc, diag::err_bad_new_type)
<< AllocType << 0 << R;
else if (AllocType->isReferenceType())
return Diag(Loc, diag::err_bad_new_type)
<< AllocType << 1 << R;
else if (!AllocType->isDependentType() &&
Loc, AllocType, diag::err_new_incomplete_or_sizeless_type, R))
return true;
else if (RequireNonAbstractType(Loc, AllocType,
return true;
else if (AllocType->isVariablyModifiedType())
return Diag(Loc, diag::err_variably_modified_new_type)
<< AllocType;
else if (AllocType.getAddressSpace() != LangAS::Default &&
return Diag(Loc, diag::err_address_space_qualified_new)
<< AllocType.getUnqualifiedType()
<< AllocType.getQualifiers().getAddressSpaceAttributePrintValue();
else if (getLangOpts().ObjCAutoRefCount) {
if (const ArrayType *AT = Context.getAsArrayType(AllocType)) {
QualType BaseAllocType = Context.getBaseElementType(AT);
if (BaseAllocType.getObjCLifetime() == Qualifiers::OCL_None &&
return Diag(Loc, diag::err_arc_new_array_without_ownership)
<< BaseAllocType;
return false;
static bool resolveAllocationOverload(
Sema &S, LookupResult &R, SourceRange Range, SmallVectorImpl<Expr *> &Args,
bool &PassAlignment, FunctionDecl *&Operator,
OverloadCandidateSet *AlignedCandidates, Expr *AlignArg, bool Diagnose) {
OverloadCandidateSet Candidates(R.getNameLoc(),
for (LookupResult::iterator Alloc = R.begin(), AllocEnd = R.end();
Alloc != AllocEnd; ++Alloc) {
// Even member operator new/delete are implicitly treated as
// static, so don't use AddMemberCandidate.
NamedDecl *D = (*Alloc)->getUnderlyingDecl();
if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(D)) {
S.AddTemplateOverloadCandidate(FnTemplate, Alloc.getPair(),
/*ExplicitTemplateArgs=*/nullptr, Args,
FunctionDecl *Fn = cast<FunctionDecl>(D);
S.AddOverloadCandidate(Fn, Alloc.getPair(), Args, Candidates,
// Do the resolution.
OverloadCandidateSet::iterator Best;
switch (Candidates.BestViableFunction(S, R.getNameLoc(), Best)) {
case OR_Success: {
// Got one!
FunctionDecl *FnDecl = Best->Function;
if (S.CheckAllocationAccess(R.getNameLoc(), Range, R.getNamingClass(),
Best->FoundDecl) == Sema::AR_inaccessible)
return true;
Operator = FnDecl;
return false;
case OR_No_Viable_Function:
// C++17 []p13:
// If no matching function is found and the allocated object type has
// new-extended alignment, the alignment argument is removed from the
// argument list, and overload resolution is performed again.
if (PassAlignment) {
PassAlignment = false;
AlignArg = Args[1];
Args.erase(Args.begin() + 1);
return resolveAllocationOverload(S, R, Range, Args, PassAlignment,
Operator, &Candidates, AlignArg,
// MSVC will fall back on trying to find a matching global operator new
// if operator new[] cannot be found. Also, MSVC will leak by not
// generating a call to operator delete or operator delete[], but we
// will not replicate that bug.
// FIXME: Find out how this interacts with the std::align_val_t fallback
// once MSVC implements it.
if (R.getLookupName().getCXXOverloadedOperator() == OO_Array_New &&
S.Context.getLangOpts().MSVCCompat) {
S.LookupQualifiedName(R, S.Context.getTranslationUnitDecl());
// FIXME: This will give bad diagnostics pointing at the wrong functions.
return resolveAllocationOverload(S, R, Range, Args, PassAlignment,
Operator, /*Candidates=*/nullptr,
/*AlignArg=*/nullptr, Diagnose);
if (Diagnose) {
// If this is an allocation of the form 'new (p) X' for some object
// pointer p (or an expression that will decay to such a pointer),
// diagnose the missing inclusion of <new>.
if (!R.isClassLookup() && Args.size() == 2 &&
(Args[1]->getType()->isObjectPointerType() ||
Args[1]->getType()->isArrayType())) {
S.Diag(R.getNameLoc(), diag::err_need_header_before_placement_new)
<< R.getLookupName() << Range;
// Listing the candidates is unlikely to be useful; skip it.
return true;
// Finish checking all candidates before we note any. This checking can
// produce additional diagnostics so can't be interleaved with our
// emission of notes.
// For an aligned allocation, separately check the aligned and unaligned
// candidates with their respective argument lists.
SmallVector<OverloadCandidate*, 32> Cands;
SmallVector<OverloadCandidate*, 32> AlignedCands;
llvm::SmallVector<Expr*, 4> AlignedArgs;
if (AlignedCandidates) {
auto IsAligned = [](OverloadCandidate &C) {
return C.Function->getNumParams() > 1 &&
auto IsUnaligned = [&](OverloadCandidate &C) { return !IsAligned(C); };
AlignedArgs.reserve(Args.size() + 1);
AlignedArgs.append(Args.begin() + 1, Args.end());
AlignedCands = AlignedCandidates->CompleteCandidates(
S, OCD_AllCandidates, AlignedArgs, R.getNameLoc(), IsAligned);
Cands = Candidates.CompleteCandidates(S, OCD_AllCandidates, Args,
R.getNameLoc(), IsUnaligned);
} else {
Cands = Candidates.CompleteCandidates(S, OCD_AllCandidates, Args,
S.Diag(R.getNameLoc(), diag::err_ovl_no_viable_function_in_call)
<< R.getLookupName() << Range;
if (AlignedCandidates)
AlignedCandidates->NoteCandidates(S, AlignedArgs, AlignedCands, "",
Candidates.NoteCandidates(S, Args, Cands, "", R.getNameLoc());
return true;
case OR_Ambiguous:
if (Diagnose) {
<< R.getLookupName() << Range),
S, OCD_AmbiguousCandidates, Args);
return true;
case OR_Deleted: {
if (Diagnose) {
<< R.getLookupName() << Range),
S, OCD_AllCandidates, Args);
return true;
llvm_unreachable("Unreachable, bad result from BestViableFunction");
bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose) {
// --- Choosing an allocation function ---
// C++ 5.3.4p8 - 14 & 18
// 1) If looking in AFS_Global scope for allocation functions, only look in
// the global scope. Else, if AFS_Class, only look in the scope of the
// allocated class. If AFS_Both, look in both.
// 2) If an array size is given, look for operator new[], else look for
// operator new.
// 3) The first argument is always size_t. Append the arguments from the
// placement form.
SmallVector<Expr*, 8> AllocArgs;
AllocArgs.reserve((PassAlignment ? 2 : 1) + PlaceArgs.size());
// We don't care about the actual value of these arguments.
// FIXME: Should the Sema create the expression and embed it in the syntax
// tree? Or should the consumer just recalculate the value?
// FIXME: Using a dummy value will interact poorly with attribute enable_if.
IntegerLiteral Size(
Context, llvm::APInt::getZero(Context.getTargetInfo().getPointerWidth(0)),
Context.getSizeType(), SourceLocation());
QualType AlignValT = Context.VoidTy;
if (PassAlignment) {
AlignValT = Context.getTypeDeclType(getStdAlignValT());
CXXScalarValueInitExpr Align(AlignValT, nullptr, SourceLocation());
if (PassAlignment)
AllocArgs.insert(AllocArgs.end(), PlaceArgs.begin(), PlaceArgs.end());
// C++ []p8:
// If the allocated type is a non-array type, the allocation
// function's name is operator new and the deallocation function's
// name is operator delete. If the allocated type is an array
// type, the allocation function's name is operator new[] and the
// deallocation function's name is operator delete[].
DeclarationName NewName = Context.DeclarationNames.getCXXOperatorName(
IsArray ? OO_Array_New : OO_New);
QualType AllocElemType = Context.getBaseElementType(AllocType);
// Find the allocation function.
LookupResult R(*this, NewName, StartLoc, LookupOrdinaryName);
// C++1z []p9:
// If the new-expression begins with a unary :: operator, the allocation
// function's name is looked up in the global scope. Otherwise, if the
// allocated type is a class type T or array thereof, the allocation
// function's name is looked up in the scope of T.
if (AllocElemType->isRecordType() && NewScope != AFS_Global)
LookupQualifiedName(R, AllocElemType->getAsCXXRecordDecl());
// We can see ambiguity here if the allocation function is found in
// multiple base classes.
if (R.isAmbiguous())
return true;
// If this lookup fails to find the name, or if the allocated type is not
// a class type, the allocation function's name is looked up in the
// global scope.
if (R.empty()) {
if (NewScope == AFS_Class)
return true;
LookupQualifiedName(R, Context.getTranslationUnitDecl());
if (getLangOpts().OpenCLCPlusPlus && R.empty()) {
if (PlaceArgs.empty()) {
Diag(StartLoc, diag::err_openclcxx_not_supported) << "default new";
} else {
Diag(StartLoc, diag::err_openclcxx_placement_new);
return true;
assert(!R.empty() && "implicitly declared allocation functions not found");
assert(!R.isAmbiguous() && "global allocation functions are ambiguous");
// We do our own custom access checks below.
if (resolveAllocationOverload(*this, R, Range, AllocArgs, PassAlignment,
OperatorNew, /*Candidates=*/nullptr,
/*AlignArg=*/nullptr, Diagnose))
return true;
// We don't need an operator delete if we're running under -fno-exceptions.
if (!getLangOpts().Exceptions) {
OperatorDelete = nullptr;
return false;
// Note, the name of OperatorNew might have been changed from array to
// non-array by resolveAllocationOverload.
DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName(
OperatorNew->getDeclName().getCXXOverloadedOperator() == OO_Array_New
? OO_Array_Delete
: OO_Delete);
// C++ []p19:
// If the new-expression begins with a unary :: operator, the
// deallocation function's name is looked up in the global
// scope. Otherwise, if the allocated type is a class type T or an
// array thereof, the deallocation function's name is looked up in
// the scope of T. If this lookup fails to find the name, or if
// the allocated type is not a class type or array thereof, the
// deallocation function's name is looked up in the global scope.
LookupResult FoundDelete(*this, DeleteName, StartLoc, LookupOrdinaryName);
if (AllocElemType->isRecordType() && DeleteScope != AFS_Global) {
auto *RD =
LookupQualifiedName(FoundDelete, RD);
if (FoundDelete.isAmbiguous())
return true; // FIXME: clean up expressions?
// Filter out any destroying operator deletes. We can't possibly call such a
// function in this context, because we're handling the case where the object
// was not successfully constructed.
// FIXME: This is not covered by the language rules yet.
LookupResult::Filter Filter = FoundDelete.makeFilter();
while (Filter.hasNext()) {
auto *FD = dyn_cast<FunctionDecl>(>getUnderlyingDecl());
if (FD && FD->isDestroyingOperatorDelete())
bool FoundGlobalDelete = FoundDelete.empty();
if (FoundDelete.empty()) {
if (DeleteScope == AFS_Class)
return true;
LookupQualifiedName(FoundDelete, Context.getTranslationUnitDecl());
SmallVector<std::pair<DeclAccessPair,FunctionDecl*>, 2> Matches;
// Whether we're looking for a placement operator delete is dictated