blob: 99babd58b0276084fac2248a4b48eb25443f6971 [file] [log] [blame]
//===--- ExprConstant.cpp - Expression Constant Evaluator -----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the Expr constant evaluator.
//
// Constant expression evaluation produces four main results:
//
// * A success/failure flag indicating whether constant folding was successful.
// This is the 'bool' return value used by most of the code in this file. A
// 'false' return value indicates that constant folding has failed, and any
// appropriate diagnostic has already been produced.
//
// * An evaluated result, valid only if constant folding has not failed.
//
// * A flag indicating if evaluation encountered (unevaluated) side-effects.
// These arise in cases such as (sideEffect(), 0) and (sideEffect() || 1),
// where it is possible to determine the evaluated result regardless.
//
// * A set of notes indicating why the evaluation was not a constant expression
// (under the C++11 / C++1y rules only, at the moment), or, if folding failed
// too, why the expression could not be folded.
//
// If we are checking for a potential constant expression, failure to constant
// fold a potential constant sub-expression will be indicated by a 'false'
// return value (the expression could not be folded) and no diagnostic (the
// expression is not necessarily non-constant).
//
//===----------------------------------------------------------------------===//
#include "Interp/Context.h"
#include "Interp/Frame.h"
#include "Interp/State.h"
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/CurrentSourceLocExprScope.h"
#include "clang/AST/Expr.h"
#include "clang/AST/OSLog.h"
#include "clang/AST/OptionalDiagnostic.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/APFixedPoint.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
#include <cstring>
#include <functional>
#define DEBUG_TYPE "exprconstant"
using namespace clang;
using llvm::APFixedPoint;
using llvm::APInt;
using llvm::APSInt;
using llvm::APFloat;
using llvm::FixedPointSemantics;
using llvm::Optional;
namespace {
struct LValue;
class CallStackFrame;
class EvalInfo;
using SourceLocExprScopeGuard =
CurrentSourceLocExprScope::SourceLocExprScopeGuard;
static QualType getType(APValue::LValueBase B) {
return B.getType();
}
/// Get an LValue path entry, which is known to not be an array index, as a
/// field declaration.
static const FieldDecl *getAsField(APValue::LValuePathEntry E) {
return dyn_cast_or_null<FieldDecl>(E.getAsBaseOrMember().getPointer());
}
/// Get an LValue path entry, which is known to not be an array index, as a
/// base class declaration.
static const CXXRecordDecl *getAsBaseClass(APValue::LValuePathEntry E) {
return dyn_cast_or_null<CXXRecordDecl>(E.getAsBaseOrMember().getPointer());
}
/// Determine whether this LValue path entry for a base class names a virtual
/// base class.
static bool isVirtualBaseClass(APValue::LValuePathEntry E) {
return E.getAsBaseOrMember().getInt();
}
/// Given an expression, determine the type used to store the result of
/// evaluating that expression.
static QualType getStorageType(const ASTContext &Ctx, const Expr *E) {
if (E->isPRValue())
return E->getType();
return Ctx.getLValueReferenceType(E->getType());
}
/// Given a CallExpr, try to get the alloc_size attribute. May return null.
static const AllocSizeAttr *getAllocSizeAttr(const CallExpr *CE) {
if (const FunctionDecl *DirectCallee = CE->getDirectCallee())
return DirectCallee->getAttr<AllocSizeAttr>();
if (const Decl *IndirectCallee = CE->getCalleeDecl())
return IndirectCallee->getAttr<AllocSizeAttr>();
return nullptr;
}
/// Attempts to unwrap a CallExpr (with an alloc_size attribute) from an Expr.
/// This will look through a single cast.
///
/// Returns null if we couldn't unwrap a function with alloc_size.
static const CallExpr *tryUnwrapAllocSizeCall(const Expr *E) {
if (!E->getType()->isPointerType())
return nullptr;
E = E->IgnoreParens();
// If we're doing a variable assignment from e.g. malloc(N), there will
// probably be a cast of some kind. In exotic cases, we might also see a
// top-level ExprWithCleanups. Ignore them either way.
if (const auto *FE = dyn_cast<FullExpr>(E))
E = FE->getSubExpr()->IgnoreParens();
if (const auto *Cast = dyn_cast<CastExpr>(E))
E = Cast->getSubExpr()->IgnoreParens();
if (const auto *CE = dyn_cast<CallExpr>(E))
return getAllocSizeAttr(CE) ? CE : nullptr;
return nullptr;
}
/// Determines whether or not the given Base contains a call to a function
/// with the alloc_size attribute.
static bool isBaseAnAllocSizeCall(APValue::LValueBase Base) {
const auto *E = Base.dyn_cast<const Expr *>();
return E && E->getType()->isPointerType() && tryUnwrapAllocSizeCall(E);
}
/// Determines whether the given kind of constant expression is only ever
/// used for name mangling. If so, it's permitted to reference things that we
/// can't generate code for (in particular, dllimported functions).
static bool isForManglingOnly(ConstantExprKind Kind) {
switch (Kind) {
case ConstantExprKind::Normal:
case ConstantExprKind::ClassTemplateArgument:
case ConstantExprKind::ImmediateInvocation:
// Note that non-type template arguments of class type are emitted as
// template parameter objects.
return false;
case ConstantExprKind::NonClassTemplateArgument:
return true;
}
llvm_unreachable("unknown ConstantExprKind");
}
static bool isTemplateArgument(ConstantExprKind Kind) {
switch (Kind) {
case ConstantExprKind::Normal:
case ConstantExprKind::ImmediateInvocation:
return false;
case ConstantExprKind::ClassTemplateArgument:
case ConstantExprKind::NonClassTemplateArgument:
return true;
}
llvm_unreachable("unknown ConstantExprKind");
}
/// The bound to claim that an array of unknown bound has.
/// The value in MostDerivedArraySize is undefined in this case. So, set it
/// to an arbitrary value that's likely to loudly break things if it's used.
static const uint64_t AssumedSizeForUnsizedArray =
std::numeric_limits<uint64_t>::max() / 2;
/// Determines if an LValue with the given LValueBase will have an unsized
/// array in its designator.
/// Find the path length and type of the most-derived subobject in the given
/// path, and find the size of the containing array, if any.
static unsigned
findMostDerivedSubobject(ASTContext &Ctx, APValue::LValueBase Base,
ArrayRef<APValue::LValuePathEntry> Path,
uint64_t &ArraySize, QualType &Type, bool &IsArray,
bool &FirstEntryIsUnsizedArray) {
// This only accepts LValueBases from APValues, and APValues don't support
// arrays that lack size info.
assert(!isBaseAnAllocSizeCall(Base) &&
"Unsized arrays shouldn't appear here");
unsigned MostDerivedLength = 0;
Type = getType(Base);
for (unsigned I = 0, N = Path.size(); I != N; ++I) {
if (Type->isArrayType()) {
const ArrayType *AT = Ctx.getAsArrayType(Type);
Type = AT->getElementType();
MostDerivedLength = I + 1;
IsArray = true;
if (auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
ArraySize = CAT->getSize().getZExtValue();
} else {
assert(I == 0 && "unexpected unsized array designator");
FirstEntryIsUnsizedArray = true;
ArraySize = AssumedSizeForUnsizedArray;
}
} else if (Type->isAnyComplexType()) {
const ComplexType *CT = Type->castAs<ComplexType>();
Type = CT->getElementType();
ArraySize = 2;
MostDerivedLength = I + 1;
IsArray = true;
} else if (const FieldDecl *FD = getAsField(Path[I])) {
Type = FD->getType();
ArraySize = 0;
MostDerivedLength = I + 1;
IsArray = false;
} else {
// Path[I] describes a base class.
ArraySize = 0;
IsArray = false;
}
}
return MostDerivedLength;
}
/// A path from a glvalue to a subobject of that glvalue.
struct SubobjectDesignator {
/// True if the subobject was named in a manner not supported by C++11. Such
/// lvalues can still be folded, but they are not core constant expressions
/// and we cannot perform lvalue-to-rvalue conversions on them.
unsigned Invalid : 1;
/// Is this a pointer one past the end of an object?
unsigned IsOnePastTheEnd : 1;
/// Indicator of whether the first entry is an unsized array.
unsigned FirstEntryIsAnUnsizedArray : 1;
/// Indicator of whether the most-derived object is an array element.
unsigned MostDerivedIsArrayElement : 1;
/// The length of the path to the most-derived object of which this is a
/// subobject.
unsigned MostDerivedPathLength : 28;
/// The size of the array of which the most-derived object is an element.
/// This will always be 0 if the most-derived object is not an array
/// element. 0 is not an indicator of whether or not the most-derived object
/// is an array, however, because 0-length arrays are allowed.
///
/// If the current array is an unsized array, the value of this is
/// undefined.
uint64_t MostDerivedArraySize;
/// The type of the most derived object referred to by this address.
QualType MostDerivedType;
typedef APValue::LValuePathEntry PathEntry;
/// The entries on the path from the glvalue to the designated subobject.
SmallVector<PathEntry, 8> Entries;
SubobjectDesignator() : Invalid(true) {}
explicit SubobjectDesignator(QualType T)
: Invalid(false), IsOnePastTheEnd(false),
FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
MostDerivedPathLength(0), MostDerivedArraySize(0),
MostDerivedType(T) {}
SubobjectDesignator(ASTContext &Ctx, const APValue &V)
: Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false),
FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
MostDerivedPathLength(0), MostDerivedArraySize(0) {
assert(V.isLValue() && "Non-LValue used to make an LValue designator?");
if (!Invalid) {
IsOnePastTheEnd = V.isLValueOnePastTheEnd();
ArrayRef<PathEntry> VEntries = V.getLValuePath();
Entries.insert(Entries.end(), VEntries.begin(), VEntries.end());
if (V.getLValueBase()) {
bool IsArray = false;
bool FirstIsUnsizedArray = false;
MostDerivedPathLength = findMostDerivedSubobject(
Ctx, V.getLValueBase(), V.getLValuePath(), MostDerivedArraySize,
MostDerivedType, IsArray, FirstIsUnsizedArray);
MostDerivedIsArrayElement = IsArray;
FirstEntryIsAnUnsizedArray = FirstIsUnsizedArray;
}
}
}
void truncate(ASTContext &Ctx, APValue::LValueBase Base,
unsigned NewLength) {
if (Invalid)
return;
assert(Base && "cannot truncate path for null pointer");
assert(NewLength <= Entries.size() && "not a truncation");
if (NewLength == Entries.size())
return;
Entries.resize(NewLength);
bool IsArray = false;
bool FirstIsUnsizedArray = false;
MostDerivedPathLength = findMostDerivedSubobject(
Ctx, Base, Entries, MostDerivedArraySize, MostDerivedType, IsArray,
FirstIsUnsizedArray);
MostDerivedIsArrayElement = IsArray;
FirstEntryIsAnUnsizedArray = FirstIsUnsizedArray;
}
void setInvalid() {
Invalid = true;
Entries.clear();
}
/// Determine whether the most derived subobject is an array without a
/// known bound.
bool isMostDerivedAnUnsizedArray() const {
assert(!Invalid && "Calling this makes no sense on invalid designators");
return Entries.size() == 1 && FirstEntryIsAnUnsizedArray;
}
/// Determine what the most derived array's size is. Results in an assertion
/// failure if the most derived array lacks a size.
uint64_t getMostDerivedArraySize() const {
assert(!isMostDerivedAnUnsizedArray() && "Unsized array has no size");
return MostDerivedArraySize;
}
/// Determine whether this is a one-past-the-end pointer.
bool isOnePastTheEnd() const {
assert(!Invalid);
if (IsOnePastTheEnd)
return true;
if (!isMostDerivedAnUnsizedArray() && MostDerivedIsArrayElement &&
Entries[MostDerivedPathLength - 1].getAsArrayIndex() ==
MostDerivedArraySize)
return true;
return false;
}
/// Get the range of valid index adjustments in the form
/// {maximum value that can be subtracted from this pointer,
/// maximum value that can be added to this pointer}
std::pair<uint64_t, uint64_t> validIndexAdjustments() {
if (Invalid || isMostDerivedAnUnsizedArray())
return {0, 0};
// [expr.add]p4: For the purposes of these operators, a pointer to a
// nonarray object behaves the same as a pointer to the first element of
// an array of length one with the type of the object as its element type.
bool IsArray = MostDerivedPathLength == Entries.size() &&
MostDerivedIsArrayElement;
uint64_t ArrayIndex = IsArray ? Entries.back().getAsArrayIndex()
: (uint64_t)IsOnePastTheEnd;
uint64_t ArraySize =
IsArray ? getMostDerivedArraySize() : (uint64_t)1;
return {ArrayIndex, ArraySize - ArrayIndex};
}
/// Check that this refers to a valid subobject.
bool isValidSubobject() const {
if (Invalid)
return false;
return !isOnePastTheEnd();
}
/// Check that this refers to a valid subobject, and if not, produce a
/// relevant diagnostic and set the designator as invalid.
bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK);
/// Get the type of the designated object.
QualType getType(ASTContext &Ctx) const {
assert(!Invalid && "invalid designator has no subobject type");
return MostDerivedPathLength == Entries.size()
? MostDerivedType
: Ctx.getRecordType(getAsBaseClass(Entries.back()));
}
/// Update this designator to refer to the first element within this array.
void addArrayUnchecked(const ConstantArrayType *CAT) {
Entries.push_back(PathEntry::ArrayIndex(0));
// This is a most-derived object.
MostDerivedType = CAT->getElementType();
MostDerivedIsArrayElement = true;
MostDerivedArraySize = CAT->getSize().getZExtValue();
MostDerivedPathLength = Entries.size();
}
/// Update this designator to refer to the first element within the array of
/// elements of type T. This is an array of unknown size.
void addUnsizedArrayUnchecked(QualType ElemTy) {
Entries.push_back(PathEntry::ArrayIndex(0));
MostDerivedType = ElemTy;
MostDerivedIsArrayElement = true;
// The value in MostDerivedArraySize is undefined in this case. So, set it
// to an arbitrary value that's likely to loudly break things if it's
// used.
MostDerivedArraySize = AssumedSizeForUnsizedArray;
MostDerivedPathLength = Entries.size();
}
/// Update this designator to refer to the given base or member of this
/// object.
void addDeclUnchecked(const Decl *D, bool Virtual = false) {
Entries.push_back(APValue::BaseOrMemberType(D, Virtual));
// If this isn't a base class, it's a new most-derived object.
if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
MostDerivedType = FD->getType();
MostDerivedIsArrayElement = false;
MostDerivedArraySize = 0;
MostDerivedPathLength = Entries.size();
}
}
/// Update this designator to refer to the given complex component.
void addComplexUnchecked(QualType EltTy, bool Imag) {
Entries.push_back(PathEntry::ArrayIndex(Imag));
// This is technically a most-derived object, though in practice this
// is unlikely to matter.
MostDerivedType = EltTy;
MostDerivedIsArrayElement = true;
MostDerivedArraySize = 2;
MostDerivedPathLength = Entries.size();
}
void diagnoseUnsizedArrayPointerArithmetic(EvalInfo &Info, const Expr *E);
void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E,
const APSInt &N);
/// Add N to the address of this subobject.
void adjustIndex(EvalInfo &Info, const Expr *E, APSInt N) {
if (Invalid || !N) return;
uint64_t TruncatedN = N.extOrTrunc(64).getZExtValue();
if (isMostDerivedAnUnsizedArray()) {
diagnoseUnsizedArrayPointerArithmetic(Info, E);
// Can't verify -- trust that the user is doing the right thing (or if
// not, trust that the caller will catch the bad behavior).
// FIXME: Should we reject if this overflows, at least?
Entries.back() = PathEntry::ArrayIndex(
Entries.back().getAsArrayIndex() + TruncatedN);
return;
}
// [expr.add]p4: For the purposes of these operators, a pointer to a
// nonarray object behaves the same as a pointer to the first element of
// an array of length one with the type of the object as its element type.
bool IsArray = MostDerivedPathLength == Entries.size() &&
MostDerivedIsArrayElement;
uint64_t ArrayIndex = IsArray ? Entries.back().getAsArrayIndex()
: (uint64_t)IsOnePastTheEnd;
uint64_t ArraySize =
IsArray ? getMostDerivedArraySize() : (uint64_t)1;
if (N < -(int64_t)ArrayIndex || N > ArraySize - ArrayIndex) {
// Calculate the actual index in a wide enough type, so we can include
// it in the note.
N = N.extend(std::max<unsigned>(N.getBitWidth() + 1, 65));
(llvm::APInt&)N += ArrayIndex;
assert(N.ugt(ArraySize) && "bounds check failed for in-bounds index");
diagnosePointerArithmetic(Info, E, N);
setInvalid();
return;
}
ArrayIndex += TruncatedN;
assert(ArrayIndex <= ArraySize &&
"bounds check succeeded for out-of-bounds index");
if (IsArray)
Entries.back() = PathEntry::ArrayIndex(ArrayIndex);
else
IsOnePastTheEnd = (ArrayIndex != 0);
}
};
/// A scope at the end of which an object can need to be destroyed.
enum class ScopeKind {
Block,
FullExpression,
Call
};
/// A reference to a particular call and its arguments.
struct CallRef {
CallRef() : OrigCallee(), CallIndex(0), Version() {}
CallRef(const FunctionDecl *Callee, unsigned CallIndex, unsigned Version)
: OrigCallee(Callee), CallIndex(CallIndex), Version(Version) {}
explicit operator bool() const { return OrigCallee; }
/// Get the parameter that the caller initialized, corresponding to the
/// given parameter in the callee.
const ParmVarDecl *getOrigParam(const ParmVarDecl *PVD) const {
return OrigCallee ? OrigCallee->getParamDecl(PVD->getFunctionScopeIndex())
: PVD;
}
/// The callee at the point where the arguments were evaluated. This might
/// be different from the actual callee (a different redeclaration, or a
/// virtual override), but this function's parameters are the ones that
/// appear in the parameter map.
const FunctionDecl *OrigCallee;
/// The call index of the frame that holds the argument values.
unsigned CallIndex;
/// The version of the parameters corresponding to this call.
unsigned Version;
};
/// A stack frame in the constexpr call stack.
class CallStackFrame : public interp::Frame {
public:
EvalInfo &Info;
/// Parent - The caller of this stack frame.
CallStackFrame *Caller;
/// Callee - The function which was called.
const FunctionDecl *Callee;
/// This - The binding for the this pointer in this call, if any.
const LValue *This;
/// Information on how to find the arguments to this call. Our arguments
/// are stored in our parent's CallStackFrame, using the ParmVarDecl* as a
/// key and this value as the version.
CallRef Arguments;
/// Source location information about the default argument or default
/// initializer expression we're evaluating, if any.
CurrentSourceLocExprScope CurSourceLocExprScope;
// Note that we intentionally use std::map here so that references to
// values are stable.
typedef std::pair<const void *, unsigned> MapKeyTy;
typedef std::map<MapKeyTy, APValue> MapTy;
/// Temporaries - Temporary lvalues materialized within this stack frame.
MapTy Temporaries;
/// CallLoc - The location of the call expression for this call.
SourceLocation CallLoc;
/// Index - The call index of this call.
unsigned Index;
/// The stack of integers for tracking version numbers for temporaries.
SmallVector<unsigned, 2> TempVersionStack = {1};
unsigned CurTempVersion = TempVersionStack.back();
unsigned getTempVersion() const { return TempVersionStack.back(); }
void pushTempVersion() {
TempVersionStack.push_back(++CurTempVersion);
}
void popTempVersion() {
TempVersionStack.pop_back();
}
CallRef createCall(const FunctionDecl *Callee) {
return {Callee, Index, ++CurTempVersion};
}
// FIXME: Adding this to every 'CallStackFrame' may have a nontrivial impact
// on the overall stack usage of deeply-recursing constexpr evaluations.
// (We should cache this map rather than recomputing it repeatedly.)
// But let's try this and see how it goes; we can look into caching the map
// as a later change.
/// LambdaCaptureFields - Mapping from captured variables/this to
/// corresponding data members in the closure class.
llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
FieldDecl *LambdaThisCaptureField;
CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
CallRef Arguments);
~CallStackFrame();
// Return the temporary for Key whose version number is Version.
APValue *getTemporary(const void *Key, unsigned Version) {
MapKeyTy KV(Key, Version);
auto LB = Temporaries.lower_bound(KV);
if (LB != Temporaries.end() && LB->first == KV)
return &LB->second;
// Pair (Key,Version) wasn't found in the map. Check that no elements
// in the map have 'Key' as their key.
assert((LB == Temporaries.end() || LB->first.first != Key) &&
(LB == Temporaries.begin() || std::prev(LB)->first.first != Key) &&
"Element with key 'Key' found in map");
return nullptr;
}
// Return the current temporary for Key in the map.
APValue *getCurrentTemporary(const void *Key) {
auto UB = Temporaries.upper_bound(MapKeyTy(Key, UINT_MAX));
if (UB != Temporaries.begin() && std::prev(UB)->first.first == Key)
return &std::prev(UB)->second;
return nullptr;
}
// Return the version number of the current temporary for Key.
unsigned getCurrentTemporaryVersion(const void *Key) const {
auto UB = Temporaries.upper_bound(MapKeyTy(Key, UINT_MAX));
if (UB != Temporaries.begin() && std::prev(UB)->first.first == Key)
return std::prev(UB)->first.second;
return 0;
}
/// Allocate storage for an object of type T in this stack frame.
/// Populates LV with a handle to the created object. Key identifies
/// the temporary within the stack frame, and must not be reused without
/// bumping the temporary version number.
template<typename KeyT>
APValue &createTemporary(const KeyT *Key, QualType T,
ScopeKind Scope, LValue &LV);
/// Allocate storage for a parameter of a function call made in this frame.
APValue &createParam(CallRef Args, const ParmVarDecl *PVD, LValue &LV);
void describe(llvm::raw_ostream &OS) override;
Frame *getCaller() const override { return Caller; }
SourceLocation getCallLocation() const override { return CallLoc; }
const FunctionDecl *getCallee() const override { return Callee; }
bool isStdFunction() const {
for (const DeclContext *DC = Callee; DC; DC = DC->getParent())
if (DC->isStdNamespace())
return true;
return false;
}
private:
APValue &createLocal(APValue::LValueBase Base, const void *Key, QualType T,
ScopeKind Scope);
};
/// Temporarily override 'this'.
class ThisOverrideRAII {
public:
ThisOverrideRAII(CallStackFrame &Frame, const LValue *NewThis, bool Enable)
: Frame(Frame), OldThis(Frame.This) {
if (Enable)
Frame.This = NewThis;
}
~ThisOverrideRAII() {
Frame.This = OldThis;
}
private:
CallStackFrame &Frame;
const LValue *OldThis;
};
}
static bool HandleDestruction(EvalInfo &Info, const Expr *E,
const LValue &This, QualType ThisType);
static bool HandleDestruction(EvalInfo &Info, SourceLocation Loc,
APValue::LValueBase LVBase, APValue &Value,
QualType T);
namespace {
/// A cleanup, and a flag indicating whether it is lifetime-extended.
class Cleanup {
llvm::PointerIntPair<APValue*, 2, ScopeKind> Value;
APValue::LValueBase Base;
QualType T;
public:
Cleanup(APValue *Val, APValue::LValueBase Base, QualType T,
ScopeKind Scope)
: Value(Val, Scope), Base(Base), T(T) {}
/// Determine whether this cleanup should be performed at the end of the
/// given kind of scope.
bool isDestroyedAtEndOf(ScopeKind K) const {
return (int)Value.getInt() >= (int)K;
}
bool endLifetime(EvalInfo &Info, bool RunDestructors) {
if (RunDestructors) {
SourceLocation Loc;
if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>())
Loc = VD->getLocation();
else if (const Expr *E = Base.dyn_cast<const Expr*>())
Loc = E->getExprLoc();
return HandleDestruction(Info, Loc, Base, *Value.getPointer(), T);
}
*Value.getPointer() = APValue();
return true;
}
bool hasSideEffect() {
return T.isDestructedType();
}
};
/// A reference to an object whose construction we are currently evaluating.
struct ObjectUnderConstruction {
APValue::LValueBase Base;
ArrayRef<APValue::LValuePathEntry> Path;
friend bool operator==(const ObjectUnderConstruction &LHS,
const ObjectUnderConstruction &RHS) {
return LHS.Base == RHS.Base && LHS.Path == RHS.Path;
}
friend llvm::hash_code hash_value(const ObjectUnderConstruction &Obj) {
return llvm::hash_combine(Obj.Base, Obj.Path);
}
};
enum class ConstructionPhase {
None,
Bases,
AfterBases,
AfterFields,
Destroying,
DestroyingBases
};
}
namespace llvm {
template<> struct DenseMapInfo<ObjectUnderConstruction> {
using Base = DenseMapInfo<APValue::LValueBase>;
static ObjectUnderConstruction getEmptyKey() {
return {Base::getEmptyKey(), {}}; }
static ObjectUnderConstruction getTombstoneKey() {
return {Base::getTombstoneKey(), {}};
}
static unsigned getHashValue(const ObjectUnderConstruction &Object) {
return hash_value(Object);
}
static bool isEqual(const ObjectUnderConstruction &LHS,
const ObjectUnderConstruction &RHS) {
return LHS == RHS;
}
};
}
namespace {
/// A dynamically-allocated heap object.
struct DynAlloc {
/// The value of this heap-allocated object.
APValue Value;
/// The allocating expression; used for diagnostics. Either a CXXNewExpr
/// or a CallExpr (the latter is for direct calls to operator new inside
/// std::allocator<T>::allocate).
const Expr *AllocExpr = nullptr;
enum Kind {
New,
ArrayNew,
StdAllocator
};
/// Get the kind of the allocation. This must match between allocation
/// and deallocation.
Kind getKind() const {
if (auto *NE = dyn_cast<CXXNewExpr>(AllocExpr))
return NE->isArray() ? ArrayNew : New;
assert(isa<CallExpr>(AllocExpr));
return StdAllocator;
}
};
struct DynAllocOrder {
bool operator()(DynamicAllocLValue L, DynamicAllocLValue R) const {
return L.getIndex() < R.getIndex();
}
};
/// EvalInfo - This is a private struct used by the evaluator to capture
/// information about a subexpression as it is folded. It retains information
/// about the AST context, but also maintains information about the folded
/// expression.
///
/// If an expression could be evaluated, it is still possible it is not a C
/// "integer constant expression" or constant expression. If not, this struct
/// captures information about how and why not.
///
/// One bit of information passed *into* the request for constant folding
/// indicates whether the subexpression is "evaluated" or not according to C
/// rules. For example, the RHS of (0 && foo()) is not evaluated. We can
/// evaluate the expression regardless of what the RHS is, but C only allows
/// certain things in certain situations.
class EvalInfo : public interp::State {
public:
ASTContext &Ctx;
/// EvalStatus - Contains information about the evaluation.
Expr::EvalStatus &EvalStatus;
/// CurrentCall - The top of the constexpr call stack.
CallStackFrame *CurrentCall;
/// CallStackDepth - The number of calls in the call stack right now.
unsigned CallStackDepth;
/// NextCallIndex - The next call index to assign.
unsigned NextCallIndex;
/// StepsLeft - The remaining number of evaluation steps we're permitted
/// to perform. This is essentially a limit for the number of statements
/// we will evaluate.
unsigned StepsLeft;
/// Enable the experimental new constant interpreter. If an expression is
/// not supported by the interpreter, an error is triggered.
bool EnableNewConstInterp;
/// BottomFrame - The frame in which evaluation started. This must be
/// initialized after CurrentCall and CallStackDepth.
CallStackFrame BottomFrame;
/// A stack of values whose lifetimes end at the end of some surrounding
/// evaluation frame.
llvm::SmallVector<Cleanup, 16> CleanupStack;
/// EvaluatingDecl - This is the declaration whose initializer is being
/// evaluated, if any.
APValue::LValueBase EvaluatingDecl;
enum class EvaluatingDeclKind {
None,
/// We're evaluating the construction of EvaluatingDecl.
Ctor,
/// We're evaluating the destruction of EvaluatingDecl.
Dtor,
};
EvaluatingDeclKind IsEvaluatingDecl = EvaluatingDeclKind::None;
/// EvaluatingDeclValue - This is the value being constructed for the
/// declaration whose initializer is being evaluated, if any.
APValue *EvaluatingDeclValue;
/// Set of objects that are currently being constructed.
llvm::DenseMap<ObjectUnderConstruction, ConstructionPhase>
ObjectsUnderConstruction;
/// Current heap allocations, along with the location where each was
/// allocated. We use std::map here because we need stable addresses
/// for the stored APValues.
std::map<DynamicAllocLValue, DynAlloc, DynAllocOrder> HeapAllocs;
/// The number of heap allocations performed so far in this evaluation.
unsigned NumHeapAllocs = 0;
struct EvaluatingConstructorRAII {
EvalInfo &EI;
ObjectUnderConstruction Object;
bool DidInsert;
EvaluatingConstructorRAII(EvalInfo &EI, ObjectUnderConstruction Object,
bool HasBases)
: EI(EI), Object(Object) {
DidInsert =
EI.ObjectsUnderConstruction
.insert({Object, HasBases ? ConstructionPhase::Bases
: ConstructionPhase::AfterBases})
.second;
}
void finishedConstructingBases() {
EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterBases;
}
void finishedConstructingFields() {
EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterFields;
}
~EvaluatingConstructorRAII() {
if (DidInsert) EI.ObjectsUnderConstruction.erase(Object);
}
};
struct EvaluatingDestructorRAII {
EvalInfo &EI;
ObjectUnderConstruction Object;
bool DidInsert;
EvaluatingDestructorRAII(EvalInfo &EI, ObjectUnderConstruction Object)
: EI(EI), Object(Object) {
DidInsert = EI.ObjectsUnderConstruction
.insert({Object, ConstructionPhase::Destroying})
.second;
}
void startedDestroyingBases() {
EI.ObjectsUnderConstruction[Object] =
ConstructionPhase::DestroyingBases;
}
~EvaluatingDestructorRAII() {
if (DidInsert)
EI.ObjectsUnderConstruction.erase(Object);
}
};
ConstructionPhase
isEvaluatingCtorDtor(APValue::LValueBase Base,
ArrayRef<APValue::LValuePathEntry> Path) {
return ObjectsUnderConstruction.lookup({Base, Path});
}
/// If we're currently speculatively evaluating, the outermost call stack
/// depth at which we can mutate state, otherwise 0.
unsigned SpeculativeEvaluationDepth = 0;
/// The current array initialization index, if we're performing array
/// initialization.
uint64_t ArrayInitIndex = -1;
/// HasActiveDiagnostic - Was the previous diagnostic stored? If so, further
/// notes attached to it will also be stored, otherwise they will not be.
bool HasActiveDiagnostic;
/// Have we emitted a diagnostic explaining why we couldn't constant
/// fold (not just why it's not strictly a constant expression)?
bool HasFoldFailureDiagnostic;
/// Whether or not we're in a context where the front end requires a
/// constant value.
bool InConstantContext;
/// Whether we're checking that an expression is a potential constant
/// expression. If so, do not fail on constructs that could become constant
/// later on (such as a use of an undefined global).
bool CheckingPotentialConstantExpression = false;
/// Whether we're checking for an expression that has undefined behavior.
/// If so, we will produce warnings if we encounter an operation that is
/// always undefined.
///
/// Note that we still need to evaluate the expression normally when this
/// is set; this is used when evaluating ICEs in C.
bool CheckingForUndefinedBehavior = false;
enum EvaluationMode {
/// Evaluate as a constant expression. Stop if we find that the expression
/// is not a constant expression.
EM_ConstantExpression,
/// Evaluate as a constant expression. Stop if we find that the expression
/// is not a constant expression. Some expressions can be retried in the
/// optimizer if we don't constant fold them here, but in an unevaluated
/// context we try to fold them immediately since the optimizer never
/// gets a chance to look at it.
EM_ConstantExpressionUnevaluated,
/// Fold the expression to a constant. Stop if we hit a side-effect that
/// we can't model.
EM_ConstantFold,
/// Evaluate in any way we know how. Don't worry about side-effects that
/// can't be modeled.
EM_IgnoreSideEffects,
} EvalMode;
/// Are we checking whether the expression is a potential constant
/// expression?
bool checkingPotentialConstantExpression() const override {
return CheckingPotentialConstantExpression;
}
/// Are we checking an expression for overflow?
// FIXME: We should check for any kind of undefined or suspicious behavior
// in such constructs, not just overflow.
bool checkingForUndefinedBehavior() const override {
return CheckingForUndefinedBehavior;
}
EvalInfo(const ASTContext &C, Expr::EvalStatus &S, EvaluationMode Mode)
: Ctx(const_cast<ASTContext &>(C)), EvalStatus(S), CurrentCall(nullptr),
CallStackDepth(0), NextCallIndex(1),
StepsLeft(C.getLangOpts().ConstexprStepLimit),
EnableNewConstInterp(C.getLangOpts().EnableNewConstInterp),
BottomFrame(*this, SourceLocation(), nullptr, nullptr, CallRef()),
EvaluatingDecl((const ValueDecl *)nullptr),
EvaluatingDeclValue(nullptr), HasActiveDiagnostic(false),
HasFoldFailureDiagnostic(false), InConstantContext(false),
EvalMode(Mode) {}
~EvalInfo() {
discardCleanups();
}
void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value,
EvaluatingDeclKind EDK = EvaluatingDeclKind::Ctor) {
EvaluatingDecl = Base;
IsEvaluatingDecl = EDK;
EvaluatingDeclValue = &Value;
}
bool CheckCallLimit(SourceLocation Loc) {
// Don't perform any constexpr calls (other than the call we're checking)
// when checking a potential constant expression.
if (checkingPotentialConstantExpression() && CallStackDepth > 1)
return false;
if (NextCallIndex == 0) {
// NextCallIndex has wrapped around.
FFDiag(Loc, diag::note_constexpr_call_limit_exceeded);
return false;
}
if (CallStackDepth <= getLangOpts().ConstexprCallDepth)
return true;
FFDiag(Loc, diag::note_constexpr_depth_limit_exceeded)
<< getLangOpts().ConstexprCallDepth;
return false;
}
std::pair<CallStackFrame *, unsigned>
getCallFrameAndDepth(unsigned CallIndex) {
assert(CallIndex && "no call index in getCallFrameAndDepth");
// We will eventually hit BottomFrame, which has Index 1, so Frame can't
// be null in this loop.
unsigned Depth = CallStackDepth;
CallStackFrame *Frame = CurrentCall;
while (Frame->Index > CallIndex) {
Frame = Frame->Caller;
--Depth;
}
if (Frame->Index == CallIndex)
return {Frame, Depth};
return {nullptr, 0};
}
bool nextStep(const Stmt *S) {
if (!StepsLeft) {
FFDiag(S->getBeginLoc(), diag::note_constexpr_step_limit_exceeded);
return false;
}
--StepsLeft;
return true;
}
APValue *createHeapAlloc(const Expr *E, QualType T, LValue &LV);
Optional<DynAlloc*> lookupDynamicAlloc(DynamicAllocLValue DA) {
Optional<DynAlloc*> Result;
auto It = HeapAllocs.find(DA);
if (It != HeapAllocs.end())
Result = &It->second;
return Result;
}
/// Get the allocated storage for the given parameter of the given call.
APValue *getParamSlot(CallRef Call, const ParmVarDecl *PVD) {
CallStackFrame *Frame = getCallFrameAndDepth(Call.CallIndex).first;
return Frame ? Frame->getTemporary(Call.getOrigParam(PVD), Call.Version)
: nullptr;
}
/// Information about a stack frame for std::allocator<T>::[de]allocate.
struct StdAllocatorCaller {
unsigned FrameIndex;
QualType ElemType;
explicit operator bool() const { return FrameIndex != 0; };
};
StdAllocatorCaller getStdAllocatorCaller(StringRef FnName) const {
for (const CallStackFrame *Call = CurrentCall; Call != &BottomFrame;
Call = Call->Caller) {
const auto *MD = dyn_cast_or_null<CXXMethodDecl>(Call->Callee);
if (!MD)
continue;
const IdentifierInfo *FnII = MD->getIdentifier();
if (!FnII || !FnII->isStr(FnName))
continue;
const auto *CTSD =
dyn_cast<ClassTemplateSpecializationDecl>(MD->getParent());
if (!CTSD)
continue;
const IdentifierInfo *ClassII = CTSD->getIdentifier();
const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
if (CTSD->isInStdNamespace() && ClassII &&
ClassII->isStr("allocator") && TAL.size() >= 1 &&
TAL[0].getKind() == TemplateArgument::Type)
return {Call->Index, TAL[0].getAsType()};
}
return {};
}
void performLifetimeExtension() {
// Disable the cleanups for lifetime-extended temporaries.
llvm::erase_if(CleanupStack, [](Cleanup &C) {
return !C.isDestroyedAtEndOf(ScopeKind::FullExpression);
});
}
/// Throw away any remaining cleanups at the end of evaluation. If any
/// cleanups would have had a side-effect, note that as an unmodeled
/// side-effect and return false. Otherwise, return true.
bool discardCleanups() {
for (Cleanup &C : CleanupStack) {
if (C.hasSideEffect() && !noteSideEffect()) {
CleanupStack.clear();
return false;
}
}
CleanupStack.clear();
return true;
}
private:
interp::Frame *getCurrentFrame() override { return CurrentCall; }
const interp::Frame *getBottomFrame() const override { return &BottomFrame; }
bool hasActiveDiagnostic() override { return HasActiveDiagnostic; }
void setActiveDiagnostic(bool Flag) override { HasActiveDiagnostic = Flag; }
void setFoldFailureDiagnostic(bool Flag) override {
HasFoldFailureDiagnostic = Flag;
}
Expr::EvalStatus &getEvalStatus() const override { return EvalStatus; }
ASTContext &getCtx() const override { return Ctx; }
// If we have a prior diagnostic, it will be noting that the expression
// isn't a constant expression. This diagnostic is more important,
// unless we require this evaluation to produce a constant expression.
//
// FIXME: We might want to show both diagnostics to the user in
// EM_ConstantFold mode.
bool hasPriorDiagnostic() override {
if (!EvalStatus.Diag->empty()) {
switch (EvalMode) {
case EM_ConstantFold:
case EM_IgnoreSideEffects:
if (!HasFoldFailureDiagnostic)
break;
// We've already failed to fold something. Keep that diagnostic.
LLVM_FALLTHROUGH;
case EM_ConstantExpression:
case EM_ConstantExpressionUnevaluated:
setActiveDiagnostic(false);
return true;
}
}
return false;
}
unsigned getCallStackDepth() override { return CallStackDepth; }
public:
/// Should we continue evaluation after encountering a side-effect that we
/// couldn't model?
bool keepEvaluatingAfterSideEffect() {
switch (EvalMode) {
case EM_IgnoreSideEffects:
return true;
case EM_ConstantExpression:
case EM_ConstantExpressionUnevaluated:
case EM_ConstantFold:
// By default, assume any side effect might be valid in some other
// evaluation of this expression from a different context.
return checkingPotentialConstantExpression() ||
checkingForUndefinedBehavior();
}
llvm_unreachable("Missed EvalMode case");
}
/// Note that we have had a side-effect, and determine whether we should
/// keep evaluating.
bool noteSideEffect() {
EvalStatus.HasSideEffects = true;
return keepEvaluatingAfterSideEffect();
}
/// Should we continue evaluation after encountering undefined behavior?
bool keepEvaluatingAfterUndefinedBehavior() {
switch (EvalMode) {
case EM_IgnoreSideEffects:
case EM_ConstantFold:
return true;
case EM_ConstantExpression:
case EM_ConstantExpressionUnevaluated:
return checkingForUndefinedBehavior();
}
llvm_unreachable("Missed EvalMode case");
}
/// Note that we hit something that was technically undefined behavior, but
/// that we can evaluate past it (such as signed overflow or floating-point
/// division by zero.)
bool noteUndefinedBehavior() override {
EvalStatus.HasUndefinedBehavior = true;
return keepEvaluatingAfterUndefinedBehavior();
}
/// Should we continue evaluation as much as possible after encountering a
/// construct which can't be reduced to a value?
bool keepEvaluatingAfterFailure() const override {
if (!StepsLeft)
return false;
switch (EvalMode) {
case EM_ConstantExpression:
case EM_ConstantExpressionUnevaluated:
case EM_ConstantFold:
case EM_IgnoreSideEffects:
return checkingPotentialConstantExpression() ||
checkingForUndefinedBehavior();
}
llvm_unreachable("Missed EvalMode case");
}
/// Notes that we failed to evaluate an expression that other expressions
/// directly depend on, and determine if we should keep evaluating. This
/// should only be called if we actually intend to keep evaluating.
///
/// Call noteSideEffect() instead if we may be able to ignore the value that
/// we failed to evaluate, e.g. if we failed to evaluate Foo() in:
///
/// (Foo(), 1) // use noteSideEffect
/// (Foo() || true) // use noteSideEffect
/// Foo() + 1 // use noteFailure
LLVM_NODISCARD bool noteFailure() {
// Failure when evaluating some expression often means there is some
// subexpression whose evaluation was skipped. Therefore, (because we
// don't track whether we skipped an expression when unwinding after an
// evaluation failure) every evaluation failure that bubbles up from a
// subexpression implies that a side-effect has potentially happened. We
// skip setting the HasSideEffects flag to true until we decide to
// continue evaluating after that point, which happens here.
bool KeepGoing = keepEvaluatingAfterFailure();
EvalStatus.HasSideEffects |= KeepGoing;
return KeepGoing;
}
class ArrayInitLoopIndex {
EvalInfo &Info;
uint64_t OuterIndex;
public:
ArrayInitLoopIndex(EvalInfo &Info)
: Info(Info), OuterIndex(Info.ArrayInitIndex) {
Info.ArrayInitIndex = 0;
}
~ArrayInitLoopIndex() { Info.ArrayInitIndex = OuterIndex; }
operator uint64_t&() { return Info.ArrayInitIndex; }
};
};
/// Object used to treat all foldable expressions as constant expressions.
struct FoldConstant {
EvalInfo &Info;
bool Enabled;
bool HadNoPriorDiags;
EvalInfo::EvaluationMode OldMode;
explicit FoldConstant(EvalInfo &Info, bool Enabled)
: Info(Info),
Enabled(Enabled),
HadNoPriorDiags(Info.EvalStatus.Diag &&
Info.EvalStatus.Diag->empty() &&
!Info.EvalStatus.HasSideEffects),
OldMode(Info.EvalMode) {
if (Enabled)
Info.EvalMode = EvalInfo::EM_ConstantFold;
}
void keepDiagnostics() { Enabled = false; }
~FoldConstant() {
if (Enabled && HadNoPriorDiags && !Info.EvalStatus.Diag->empty() &&
!Info.EvalStatus.HasSideEffects)
Info.EvalStatus.Diag->clear();
Info.EvalMode = OldMode;
}
};
/// RAII object used to set the current evaluation mode to ignore
/// side-effects.
struct IgnoreSideEffectsRAII {
EvalInfo &Info;
EvalInfo::EvaluationMode OldMode;
explicit IgnoreSideEffectsRAII(EvalInfo &Info)
: Info(Info), OldMode(Info.EvalMode) {
Info.EvalMode = EvalInfo::EM_IgnoreSideEffects;
}
~IgnoreSideEffectsRAII() { Info.EvalMode = OldMode; }
};
/// RAII object used to optionally suppress diagnostics and side-effects from
/// a speculative evaluation.
class SpeculativeEvaluationRAII {
EvalInfo *Info = nullptr;
Expr::EvalStatus OldStatus;
unsigned OldSpeculativeEvaluationDepth;
void moveFromAndCancel(SpeculativeEvaluationRAII &&Other) {
Info = Other.Info;
OldStatus = Other.OldStatus;
OldSpeculativeEvaluationDepth = Other.OldSpeculativeEvaluationDepth;
Other.Info = nullptr;
}
void maybeRestoreState() {
if (!Info)
return;
Info->EvalStatus = OldStatus;
Info->SpeculativeEvaluationDepth = OldSpeculativeEvaluationDepth;
}
public:
SpeculativeEvaluationRAII() = default;
SpeculativeEvaluationRAII(
EvalInfo &Info, SmallVectorImpl<PartialDiagnosticAt> *NewDiag = nullptr)
: Info(&Info), OldStatus(Info.EvalStatus),
OldSpeculativeEvaluationDepth(Info.SpeculativeEvaluationDepth) {
Info.EvalStatus.Diag = NewDiag;
Info.SpeculativeEvaluationDepth = Info.CallStackDepth + 1;
}
SpeculativeEvaluationRAII(const SpeculativeEvaluationRAII &Other) = delete;
SpeculativeEvaluationRAII(SpeculativeEvaluationRAII &&Other) {
moveFromAndCancel(std::move(Other));
}
SpeculativeEvaluationRAII &operator=(SpeculativeEvaluationRAII &&Other) {
maybeRestoreState();
moveFromAndCancel(std::move(Other));
return *this;
}
~SpeculativeEvaluationRAII() { maybeRestoreState(); }
};
/// RAII object wrapping a full-expression or block scope, and handling
/// the ending of the lifetime of temporaries created within it.
template<ScopeKind Kind>
class ScopeRAII {
EvalInfo &Info;
unsigned OldStackSize;
public:
ScopeRAII(EvalInfo &Info)
: Info(Info), OldStackSize(Info.CleanupStack.size()) {
// Push a new temporary version. This is needed to distinguish between
// temporaries created in different iterations of a loop.
Info.CurrentCall->pushTempVersion();
}
bool destroy(bool RunDestructors = true) {
bool OK = cleanup(Info, RunDestructors, OldStackSize);
OldStackSize = -1U;
return OK;
}
~ScopeRAII() {
if (OldStackSize != -1U)
destroy(false);
// Body moved to a static method to encourage the compiler to inline away
// instances of this class.
Info.CurrentCall->popTempVersion();
}
private:
static bool cleanup(EvalInfo &Info, bool RunDestructors,
unsigned OldStackSize) {
assert(OldStackSize <= Info.CleanupStack.size() &&
"running cleanups out of order?");
// Run all cleanups for a block scope, and non-lifetime-extended cleanups
// for a full-expression scope.
bool Success = true;
for (unsigned I = Info.CleanupStack.size(); I > OldStackSize; --I) {
if (Info.CleanupStack[I - 1].isDestroyedAtEndOf(Kind)) {
if (!Info.CleanupStack[I - 1].endLifetime(Info, RunDestructors)) {
Success = false;
break;
}
}
}
// Compact any retained cleanups.
auto NewEnd = Info.CleanupStack.begin() + OldStackSize;
if (Kind != ScopeKind::Block)
NewEnd =
std::remove_if(NewEnd, Info.CleanupStack.end(), [](Cleanup &C) {
return C.isDestroyedAtEndOf(Kind);
});
Info.CleanupStack.erase(NewEnd, Info.CleanupStack.end());
return Success;
}
};
typedef ScopeRAII<ScopeKind::Block> BlockScopeRAII;
typedef ScopeRAII<ScopeKind::FullExpression> FullExpressionRAII;
typedef ScopeRAII<ScopeKind::Call> CallScopeRAII;
}
bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E,
CheckSubobjectKind CSK) {
if (Invalid)
return false;
if (isOnePastTheEnd()) {
Info.CCEDiag(E, diag::note_constexpr_past_end_subobject)
<< CSK;
setInvalid();
return false;
}
// Note, we do not diagnose if isMostDerivedAnUnsizedArray(), because there
// must actually be at least one array element; even a VLA cannot have a
// bound of zero. And if our index is nonzero, we already had a CCEDiag.
return true;
}
void SubobjectDesignator::diagnoseUnsizedArrayPointerArithmetic(EvalInfo &Info,
const Expr *E) {
Info.CCEDiag(E, diag::note_constexpr_unsized_array_indexed);
// Do not set the designator as invalid: we can represent this situation,
// and correct handling of __builtin_object_size requires us to do so.
}
void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
const Expr *E,
const APSInt &N) {
// If we're complaining, we must be able to statically determine the size of
// the most derived array.
if (MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement)
Info.CCEDiag(E, diag::note_constexpr_array_index)
<< N << /*array*/ 0
<< static_cast<unsigned>(getMostDerivedArraySize());
else
Info.CCEDiag(E, diag::note_constexpr_array_index)
<< N << /*non-array*/ 1;
setInvalid();
}
CallStackFrame::CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
CallRef Call)
: Info(Info), Caller(Info.CurrentCall), Callee(Callee), This(This),
Arguments(Call), CallLoc(CallLoc), Index(Info.NextCallIndex++) {
Info.CurrentCall = this;
++Info.CallStackDepth;
}
CallStackFrame::~CallStackFrame() {
assert(Info.CurrentCall == this && "calls retired out of order");
--Info.CallStackDepth;
Info.CurrentCall = Caller;
}
static bool isRead(AccessKinds AK) {
return AK == AK_Read || AK == AK_ReadObjectRepresentation;
}
static bool isModification(AccessKinds AK) {
switch (AK) {
case AK_Read:
case AK_ReadObjectRepresentation:
case AK_MemberCall:
case AK_DynamicCast:
case AK_TypeId:
return false;
case AK_Assign:
case AK_Increment:
case AK_Decrement:
case AK_Construct:
case AK_Destroy:
return true;
}
llvm_unreachable("unknown access kind");
}
static bool isAnyAccess(AccessKinds AK) {
return isRead(AK) || isModification(AK);
}
/// Is this an access per the C++ definition?
static bool isFormalAccess(AccessKinds AK) {
return isAnyAccess(AK) && AK != AK_Construct && AK != AK_Destroy;
}
/// Is this kind of axcess valid on an indeterminate object value?
static bool isValidIndeterminateAccess(AccessKinds AK) {
switch (AK) {
case AK_Read:
case AK_Increment:
case AK_Decrement:
// These need the object's value.
return false;
case AK_ReadObjectRepresentation:
case AK_Assign:
case AK_Construct:
case AK_Destroy:
// Construction and destruction don't need the value.
return true;
case AK_MemberCall:
case AK_DynamicCast:
case AK_TypeId:
// These aren't really meaningful on scalars.
return true;
}
llvm_unreachable("unknown access kind");
}
namespace {
struct ComplexValue {
private:
bool IsInt;
public:
APSInt IntReal, IntImag;
APFloat FloatReal, FloatImag;
ComplexValue() : FloatReal(APFloat::Bogus()), FloatImag(APFloat::Bogus()) {}
void makeComplexFloat() { IsInt = false; }
bool isComplexFloat() const { return !IsInt; }
APFloat &getComplexFloatReal() { return FloatReal; }
APFloat &getComplexFloatImag() { return FloatImag; }
void makeComplexInt() { IsInt = true; }
bool isComplexInt() const { return IsInt; }
APSInt &getComplexIntReal() { return IntReal; }
APSInt &getComplexIntImag() { return IntImag; }
void moveInto(APValue &v) const {
if (isComplexFloat())
v = APValue(FloatReal, FloatImag);
else
v = APValue(IntReal, IntImag);
}
void setFrom(const APValue &v) {
assert(v.isComplexFloat() || v.isComplexInt());
if (v.isComplexFloat()) {
makeComplexFloat();
FloatReal = v.getComplexFloatReal();
FloatImag = v.getComplexFloatImag();
} else {
makeComplexInt();
IntReal = v.getComplexIntReal();
IntImag = v.getComplexIntImag();
}
}
};
struct LValue {
APValue::LValueBase Base;
CharUnits Offset;
SubobjectDesignator Designator;
bool IsNullPtr : 1;
bool InvalidBase : 1;
const APValue::LValueBase getLValueBase() const { return Base; }
CharUnits &getLValueOffset() { return Offset; }
const CharUnits &getLValueOffset() const { return Offset; }
SubobjectDesignator &getLValueDesignator() { return Designator; }
const SubobjectDesignator &getLValueDesignator() const { return Designator;}
bool isNullPointer() const { return IsNullPtr;}
unsigned getLValueCallIndex() const { return Base.getCallIndex(); }
unsigned getLValueVersion() const { return Base.getVersion(); }
void moveInto(APValue &V) const {
if (Designator.Invalid)
V = APValue(Base, Offset, APValue::NoLValuePath(), IsNullPtr);
else {
assert(!InvalidBase && "APValues can't handle invalid LValue bases");
V = APValue(Base, Offset, Designator.Entries,
Designator.IsOnePastTheEnd, IsNullPtr);
}
}
void setFrom(ASTContext &Ctx, const APValue &V) {
assert(V.isLValue() && "Setting LValue from a non-LValue?");
Base = V.getLValueBase();
Offset = V.getLValueOffset();
InvalidBase = false;
Designator = SubobjectDesignator(Ctx, V);
IsNullPtr = V.isNullPointer();
}
void set(APValue::LValueBase B, bool BInvalid = false) {
#ifndef NDEBUG
// We only allow a few types of invalid bases. Enforce that here.
if (BInvalid) {
const auto *E = B.get<const Expr *>();
assert((isa<MemberExpr>(E) || tryUnwrapAllocSizeCall(E)) &&
"Unexpected type of invalid base");
}
#endif
Base = B;
Offset = CharUnits::fromQuantity(0);
InvalidBase = BInvalid;
Designator = SubobjectDesignator(getType(B));
IsNullPtr = false;
}
void setNull(ASTContext &Ctx, QualType PointerTy) {
Base = (const ValueDecl *)nullptr;
Offset =
CharUnits::fromQuantity(Ctx.getTargetNullPointerValue(PointerTy));
InvalidBase = false;
Designator = SubobjectDesignator(PointerTy->getPointeeType());
IsNullPtr = true;
}
void setInvalid(APValue::LValueBase B, unsigned I = 0) {
set(B, true);
}
std::string toString(ASTContext &Ctx, QualType T) const {
APValue Printable;
moveInto(Printable);
return Printable.getAsString(Ctx, T);
}
private:
// Check that this LValue is not based on a null pointer. If it is, produce
// a diagnostic and mark the designator as invalid.
template <typename GenDiagType>
bool checkNullPointerDiagnosingWith(const GenDiagType &GenDiag) {
if (Designator.Invalid)
return false;
if (IsNullPtr) {
GenDiag();
Designator.setInvalid();
return false;
}
return true;
}
public:
bool checkNullPointer(EvalInfo &Info, const Expr *E,
CheckSubobjectKind CSK) {
return checkNullPointerDiagnosingWith([&Info, E, CSK] {
Info.CCEDiag(E, diag::note_constexpr_null_subobject) << CSK;
});
}
bool checkNullPointerForFoldAccess(EvalInfo &Info, const Expr *E,
AccessKinds AK) {
return checkNullPointerDiagnosingWith([&Info, E, AK] {
Info.FFDiag(E, diag::note_constexpr_access_null) << AK;
});
}
// Check this LValue refers to an object. If not, set the designator to be
// invalid and emit a diagnostic.
bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) {
return (CSK == CSK_ArrayToPointer || checkNullPointer(Info, E, CSK)) &&
Designator.checkSubobject(Info, E, CSK);
}
void addDecl(EvalInfo &Info, const Expr *E,
const Decl *D, bool Virtual = false) {
if (checkSubobject(Info, E, isa<FieldDecl>(D) ? CSK_Field : CSK_Base))
Designator.addDeclUnchecked(D, Virtual);
}
void addUnsizedArray(EvalInfo &Info, const Expr *E, QualType ElemTy) {
if (!Designator.Entries.empty()) {
Info.CCEDiag(E, diag::note_constexpr_unsupported_unsized_array);
Designator.setInvalid();
return;
}
if (checkSubobject(Info, E, CSK_ArrayToPointer)) {
assert(getType(Base)->isPointerType() || getType(Base)->isArrayType());
Designator.FirstEntryIsAnUnsizedArray = true;
Designator.addUnsizedArrayUnchecked(ElemTy);
}
}
void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) {
if (checkSubobject(Info, E, CSK_ArrayToPointer))
Designator.addArrayUnchecked(CAT);
}
void addComplex(EvalInfo &Info, const Expr *E, QualType EltTy, bool Imag) {
if (checkSubobject(Info, E, Imag ? CSK_Imag : CSK_Real))
Designator.addComplexUnchecked(EltTy, Imag);
}
void clearIsNullPointer() {
IsNullPtr = false;
}
void adjustOffsetAndIndex(EvalInfo &Info, const Expr *E,
const APSInt &Index, CharUnits ElementSize) {
// An index of 0 has no effect. (In C, adding 0 to a null pointer is UB,
// but we're not required to diagnose it and it's valid in C++.)
if (!Index)
return;
// Compute the new offset in the appropriate width, wrapping at 64 bits.
// FIXME: When compiling for a 32-bit target, we should use 32-bit
// offsets.
uint64_t Offset64 = Offset.getQuantity();
uint64_t ElemSize64 = ElementSize.getQuantity();
uint64_t Index64 = Index.extOrTrunc(64).getZExtValue();
Offset = CharUnits::fromQuantity(Offset64 + ElemSize64 * Index64);
if (checkNullPointer(Info, E, CSK_ArrayIndex))
Designator.adjustIndex(Info, E, Index);
clearIsNullPointer();
}
void adjustOffset(CharUnits N) {
Offset += N;
if (N.getQuantity())
clearIsNullPointer();
}
};
struct MemberPtr {
MemberPtr() {}
explicit MemberPtr(const ValueDecl *Decl) :
DeclAndIsDerivedMember(Decl, false), Path() {}
/// The member or (direct or indirect) field referred to by this member
/// pointer, or 0 if this is a null member pointer.
const ValueDecl *getDecl() const {
return DeclAndIsDerivedMember.getPointer();
}
/// Is this actually a member of some type derived from the relevant class?
bool isDerivedMember() const {
return DeclAndIsDerivedMember.getInt();
}
/// Get the class which the declaration actually lives in.
const CXXRecordDecl *getContainingRecord() const {
return cast<CXXRecordDecl>(
DeclAndIsDerivedMember.getPointer()->getDeclContext());
}
void moveInto(APValue &V) const {
V = APValue(getDecl(), isDerivedMember(), Path);
}
void setFrom(const APValue &V) {
assert(V.isMemberPointer());
DeclAndIsDerivedMember.setPointer(V.getMemberPointerDecl());
DeclAndIsDerivedMember.setInt(V.isMemberPointerToDerivedMember());
Path.clear();
ArrayRef<const CXXRecordDecl*> P = V.getMemberPointerPath();
Path.insert(Path.end(), P.begin(), P.end());
}
/// DeclAndIsDerivedMember - The member declaration, and a flag indicating
/// whether the member is a member of some class derived from the class type
/// of the member pointer.
llvm::PointerIntPair<const ValueDecl*, 1, bool> DeclAndIsDerivedMember;
/// Path - The path of base/derived classes from the member declaration's
/// class (exclusive) to the class type of the member pointer (inclusive).
SmallVector<const CXXRecordDecl*, 4> Path;
/// Perform a cast towards the class of the Decl (either up or down the
/// hierarchy).
bool castBack(const CXXRecordDecl *Class) {
assert(!Path.empty());
const CXXRecordDecl *Expected;
if (Path.size() >= 2)
Expected = Path[Path.size() - 2];
else
Expected = getContainingRecord();
if (Expected->getCanonicalDecl() != Class->getCanonicalDecl()) {
// C++11 [expr.static.cast]p12: In a conversion from (D::*) to (B::*),
// if B does not contain the original member and is not a base or
// derived class of the class containing the original member, the result
// of the cast is undefined.
// C++11 [conv.mem]p2 does not cover this case for a cast from (B::*) to
// (D::*). We consider that to be a language defect.
return false;
}
Path.pop_back();
return true;
}
/// Perform a base-to-derived member pointer cast.
bool castToDerived(const CXXRecordDecl *Derived) {
if (!getDecl())
return true;
if (!isDerivedMember()) {
Path.push_back(Derived);
return true;
}
if (!castBack(Derived))
return false;
if (Path.empty())
DeclAndIsDerivedMember.setInt(false);
return true;
}
/// Perform a derived-to-base member pointer cast.
bool castToBase(const CXXRecordDecl *Base) {
if (!getDecl())
return true;
if (Path.empty())
DeclAndIsDerivedMember.setInt(true);
if (isDerivedMember()) {
Path.push_back(Base);
return true;
}
return castBack(Base);
}
};
/// Compare two member pointers, which are assumed to be of the same type.
static bool operator==(const MemberPtr &LHS, const MemberPtr &RHS) {
if (!LHS.getDecl() || !RHS.getDecl())
return !LHS.getDecl() && !RHS.getDecl();
if (LHS.getDecl()->getCanonicalDecl() != RHS.getDecl()->getCanonicalDecl())
return false;
return LHS.Path == RHS.Path;
}
}
static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E);
static bool EvaluateInPlace(APValue &Result, EvalInfo &Info,
const LValue &This, const Expr *E,
bool AllowNonLiteralTypes = false);
static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info,
bool InvalidBaseOK = false);
static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info,
bool InvalidBaseOK = false);
static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
EvalInfo &Info);
static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info);
static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info);
static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
EvalInfo &Info);
static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info);
static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info);
static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
EvalInfo &Info);
static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result);
static bool EvaluateBuiltinStrLen(const Expr *E, uint64_t &Result,
EvalInfo &Info);
/// Evaluate an integer or fixed point expression into an APResult.
static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result,
EvalInfo &Info);
/// Evaluate only a fixed point expression into an APResult.
static bool EvaluateFixedPoint(const Expr *E, APFixedPoint &Result,
EvalInfo &Info);
//===----------------------------------------------------------------------===//
// Misc utilities
//===----------------------------------------------------------------------===//
/// Negate an APSInt in place, converting it to a signed form if necessary, and
/// preserving its value (by extending by up to one bit as needed).
static void negateAsSigned(APSInt &Int) {
if (Int.isUnsigned() || Int.isMinSignedValue()) {
Int = Int.extend(Int.getBitWidth() + 1);
Int.setIsSigned(true);
}
Int = -Int;
}
template<typename KeyT>
APValue &CallStackFrame::createTemporary(const KeyT *Key, QualType T,
ScopeKind Scope, LValue &LV) {
unsigned Version = getTempVersion();
APValue::LValueBase Base(Key, Index, Version);
LV.set(Base);
return createLocal(Base, Key, T, Scope);
}
/// Allocate storage for a parameter of a function call made in this frame.
APValue &CallStackFrame::createParam(CallRef Args, const ParmVarDecl *PVD,
LValue &LV) {
assert(Args.CallIndex == Index && "creating parameter in wrong frame");
APValue::LValueBase Base(PVD, Index, Args.Version);
LV.set(Base);
// We always destroy parameters at the end of the call, even if we'd allow
// them to live to the end of the full-expression at runtime, in order to
// give portable results and match other compilers.
return createLocal(Base, PVD, PVD->getType(), ScopeKind::Call);
}
APValue &CallStackFrame::createLocal(APValue::LValueBase Base, const void *Key,
QualType T, ScopeKind Scope) {
assert(Base.getCallIndex() == Index && "lvalue for wrong frame");
unsigned Version = Base.getVersion();
APValue &Result = Temporaries[MapKeyTy(Key, Version)];
assert(Result.isAbsent() && "local created multiple times");
// If we're creating a local immediately in the operand of a speculative
// evaluation, don't register a cleanup to be run outside the speculative
// evaluation context, since we won't actually be able to initialize this
// object.
if (Index <= Info.SpeculativeEvaluationDepth) {
if (T.isDestructedType())
Info.noteSideEffect();
} else {
Info.CleanupStack.push_back(Cleanup(&Result, Base, T, Scope));
}
return Result;
}
APValue *EvalInfo::createHeapAlloc(const Expr *E, QualType T, LValue &LV) {
if (NumHeapAllocs > DynamicAllocLValue::getMaxIndex()) {
FFDiag(E, diag::note_constexpr_heap_alloc_limit_exceeded);
return nullptr;
}
DynamicAllocLValue DA(NumHeapAllocs++);
LV.set(APValue::LValueBase::getDynamicAlloc(DA, T));
auto Result = HeapAllocs.emplace(std::piecewise_construct,
std::forward_as_tuple(DA), std::tuple<>());
assert(Result.second && "reused a heap alloc index?");
Result.first->second.AllocExpr = E;
return &Result.first->second.Value;
}
/// Produce a string describing the given constexpr call.
void CallStackFrame::describe(raw_ostream &Out) {
unsigned ArgIndex = 0;
bool IsMemberCall = isa<CXXMethodDecl>(Callee) &&
!isa<CXXConstructorDecl>(Callee) &&
cast<CXXMethodDecl>(Callee)->isInstance();
if (!IsMemberCall)
Out << *Callee << '(';
if (This && IsMemberCall) {
APValue Val;
This->moveInto(Val);
Val.printPretty(Out, Info.Ctx,
This->Designator.MostDerivedType);
// FIXME: Add parens around Val if needed.
Out << "->" << *Callee << '(';
IsMemberCall = false;
}
for (FunctionDecl::param_const_iterator I = Callee->param_begin(),
E = Callee->param_end(); I != E; ++I, ++ArgIndex) {
if (ArgIndex > (unsigned)IsMemberCall)
Out << ", ";
const ParmVarDecl *Param = *I;
APValue *V = Info.getParamSlot(Arguments, Param);
if (V)
V->printPretty(Out, Info.Ctx, Param->getType());
else
Out << "<...>";
if (ArgIndex == 0 && IsMemberCall)
Out << "->" << *Callee << '(';
}
Out << ')';
}
/// Evaluate an expression to see if it had side-effects, and discard its
/// result.
/// \return \c true if the caller should keep evaluating.
static bool EvaluateIgnoredValue(EvalInfo &Info, const Expr *E) {
assert(!E->isValueDependent());
APValue Scratch;
if (!Evaluate(Scratch, Info, E))
// We don't need the value, but we might have skipped a side effect here.
return Info.noteSideEffect();
return true;
}
/// Should this call expression be treated as a string literal?
static bool IsStringLiteralCall(const CallExpr *E) {
unsigned Builtin = E->getBuiltinCallee();
return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString ||
Builtin == Builtin::BI__builtin___NSStringMakeConstantString);
}
static bool IsGlobalLValue(APValue::LValueBase B) {
// C++11 [expr.const]p3 An address constant expression is a prvalue core
// constant expression of pointer type that evaluates to...
// ... a null pointer value, or a prvalue core constant expression of type
// std::nullptr_t.
if (!B) return true;
if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
// ... the address of an object with static storage duration,
if (const VarDecl *VD = dyn_cast<VarDecl>(D))
return VD->hasGlobalStorage();
if (isa<TemplateParamObjectDecl>(D))
return true;
// ... the address of a function,
// ... the address of a GUID [MS extension],
return isa<FunctionDecl>(D) || isa<MSGuidDecl>(D);
}
if (B.is<TypeInfoLValue>() || B.is<DynamicAllocLValue>())
return true;
const Expr *E = B.get<const Expr*>();
switch (E->getStmtClass()) {
default:
return false;
case Expr::CompoundLiteralExprClass: {
const CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
return CLE->isFileScope() && CLE->isLValue();
}
case Expr::MaterializeTemporaryExprClass:
// A materialized temporary might have been lifetime-extended to static
// storage duration.
return cast<MaterializeTemporaryExpr>(E)->getStorageDuration() == SD_Static;
// A string literal has static storage duration.
case Expr::StringLiteralClass:
case Expr::PredefinedExprClass:
case Expr::ObjCStringLiteralClass:
case Expr::ObjCEncodeExprClass:
return true;
case Expr::ObjCBoxedExprClass:
return cast<ObjCBoxedExpr>(E)->isExpressibleAsConstantInitializer();
case Expr::CallExprClass:
return IsStringLiteralCall(cast<CallExpr>(E));
// For GCC compatibility, &&label has static storage duration.
case Expr::AddrLabelExprClass:
return true;
// A Block literal expression may be used as the initialization value for
// Block variables at global or local static scope.
case Expr::BlockExprClass:
return !cast<BlockExpr>(E)->getBlockDecl()->hasCaptures();
case Expr::ImplicitValueInitExprClass:
// FIXME:
// We can never form an lvalue with an implicit value initialization as its
// base through expression evaluation, so these only appear in one case: the
// implicit variable declaration we invent when checking whether a constexpr
// constructor can produce a constant expression. We must assume that such
// an expression might be a global lvalue.
return true;
}
}
static const ValueDecl *GetLValueBaseDecl(const LValue &LVal) {
return LVal.Base.dyn_cast<const ValueDecl*>();
}
static bool IsLiteralLValue(const LValue &Value) {
if (Value.getLValueCallIndex())
return false;
const Expr *E = Value.Base.dyn_cast<const Expr*>();
return E && !isa<MaterializeTemporaryExpr>(E);
}
static bool IsWeakLValue(const LValue &Value) {
const ValueDecl *Decl = GetLValueBaseDecl(Value);
return Decl && Decl->isWeak();
}
static bool isZeroSized(const LValue &Value) {
const ValueDecl *Decl = GetLValueBaseDecl(Value);
if (Decl && isa<VarDecl>(Decl)) {
QualType Ty = Decl->getType();
if (Ty->isArrayType())
return Ty->isIncompleteType() ||
Decl->getASTContext().getTypeSize(Ty) == 0;
}
return false;
}
static bool HasSameBase(const LValue &A, const LValue &B) {
if (!A.getLValueBase())
return !B.getLValueBase();
if (!B.getLValueBase())
return false;
if (A.getLValueBase().getOpaqueValue() !=
B.getLValueBase().getOpaqueValue())
return false;
return A.getLValueCallIndex() == B.getLValueCallIndex() &&
A.getLValueVersion() == B.getLValueVersion();
}
static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) {
assert(Base && "no location for a null lvalue");
const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
// For a parameter, find the corresponding call stack frame (if it still
// exists), and point at the parameter of the function definition we actually
// invoked.
if (auto *PVD = dyn_cast_or_null<ParmVarDecl>(VD)) {
unsigned Idx = PVD->getFunctionScopeIndex();
for (CallStackFrame *F = Info.CurrentCall; F; F = F->Caller) {
if (F->Arguments.CallIndex == Base.getCallIndex() &&
F->Arguments.Version == Base.getVersion() && F->Callee &&
Idx < F->Callee->getNumParams()) {
VD = F->Callee->getParamDecl(Idx);
break;
}
}
}
if (VD)
Info.Note(VD->getLocation(), diag::note_declared_at);
else if (const Expr *E = Base.dyn_cast<const Expr*>())
Info.Note(E->getExprLoc(), diag::note_constexpr_temporary_here);
else if (DynamicAllocLValue DA = Base.dyn_cast<DynamicAllocLValue>()) {
// FIXME: Produce a note for dangling pointers too.
if (Optional<DynAlloc*> Alloc = Info.lookupDynamicAlloc(DA))
Info.Note((*Alloc)->AllocExpr->getExprLoc(),
diag::note_constexpr_dynamic_alloc_here);
}
// We have no information to show for a typeid(T) object.
}
enum class CheckEvaluationResultKind {
ConstantExpression,
FullyInitialized,
};
/// Materialized temporaries that we've already checked to determine if they're
/// initializsed by a constant expression.
using CheckedTemporaries =
llvm::SmallPtrSet<const MaterializeTemporaryExpr *, 8>;
static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
EvalInfo &Info, SourceLocation DiagLoc,
QualType Type, const APValue &Value,
ConstantExprKind Kind,
SourceLocation SubobjectLoc,
CheckedTemporaries &CheckedTemps);
/// Check that this reference or pointer core constant expression is a valid
/// value for an address or reference constant expression. Return true if we
/// can fold this expression, whether or not it's a constant expression.
static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
QualType Type, const LValue &LVal,
ConstantExprKind Kind,
CheckedTemporaries &CheckedTemps) {
bool IsReferenceType = Type->isReferenceType();
APValue::LValueBase Base = LVal.getLValueBase();
const SubobjectDesignator &Designator = LVal.getLValueDesignator();
const Expr *BaseE = Base.dyn_cast<const Expr *>();
const ValueDecl *BaseVD = Base.dyn_cast<const ValueDecl*>();
// Additional restrictions apply in a template argument. We only enforce the
// C++20 restrictions here; additional syntactic and semantic restrictions
// are applied elsewhere.
if (isTemplateArgument(Kind)) {
int InvalidBaseKind = -1;
StringRef Ident;
if (Base.is<TypeInfoLValue>())
InvalidBaseKind = 0;
else if (isa_and_nonnull<StringLiteral>(BaseE))
InvalidBaseKind = 1;
else if (isa_and_nonnull<MaterializeTemporaryExpr>(BaseE) ||
isa_and_nonnull<LifetimeExtendedTemporaryDecl>(BaseVD))
InvalidBaseKind = 2;
else if (auto *PE = dyn_cast_or_null<PredefinedExpr>(BaseE)) {
InvalidBaseKind = 3;
Ident = PE->getIdentKindName();
}
if (InvalidBaseKind != -1) {
Info.FFDiag(Loc, diag::note_constexpr_invalid_template_arg)
<< IsReferenceType << !Designator.Entries.empty() << InvalidBaseKind
<< Ident;
return false;
}
}
if (auto *FD = dyn_cast_or_null<FunctionDecl>(BaseVD)) {
if (FD->isConsteval()) {
Info.FFDiag(Loc, diag::note_consteval_address_accessible)
<< !Type->isAnyPointerType();
Info.Note(FD->getLocation(), diag::note_declared_at);
return false;
}
}
// Check that the object is a global. Note that the fake 'this' object we
// manufacture when checking potential constant expressions is conservatively
// assumed to be global here.
if (!IsGlobalLValue(Base)) {
if (Info.getLangOpts().CPlusPlus11) {
const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
Info.FFDiag(Loc, diag::note_constexpr_non_global, 1)
<< IsReferenceType << !Designator.Entries.empty()
<< !!VD << VD;
auto *VarD = dyn_cast_or_null<VarDecl>(VD);
if (VarD && VarD->isConstexpr()) {
// Non-static local constexpr variables have unintuitive semantics:
// constexpr int a = 1;
// constexpr const int *p = &a;
// ... is invalid because the address of 'a' is not constant. Suggest
// adding a 'static' in this case.
Info.Note(VarD->getLocation(), diag::note_constexpr_not_static)
<< VarD
<< FixItHint::CreateInsertion(VarD->getBeginLoc(), "static ");
} else {
NoteLValueLocation(Info, Base);
}
} else {
Info.FFDiag(Loc);
}
// Don't allow references to temporaries to escape.
return false;
}
assert((Info.checkingPotentialConstantExpression() ||
LVal.getLValueCallIndex() == 0) &&
"have call index for global lvalue");
if (Base.is<DynamicAllocLValue>()) {
Info.FFDiag(Loc, diag::note_constexpr_dynamic_alloc)
<< IsReferenceType << !Designator.Entries.empty();
NoteLValueLocation(Info, Base);
return false;
}
if (BaseVD) {
if (const VarDecl *Var = dyn_cast<const VarDecl>(BaseVD)) {
// Check if this is a thread-local variable.
if (Var->getTLSKind())
// FIXME: Diagnostic!
return false;
// A dllimport variable never acts like a constant, unless we're
// evaluating a value for use only in name mangling.
if (!isForManglingOnly(Kind) && Var->hasAttr<DLLImportAttr>())
// FIXME: Diagnostic!
return false;
}
if (const auto *FD = dyn_cast<const FunctionDecl>(BaseVD)) {
// __declspec(dllimport) must be handled very carefully:
// We must never initialize an expression with the thunk in C++.
// Doing otherwise would allow the same id-expression to yield
// different addresses for the same function in different translation
// units. However, this means that we must dynamically initialize the
// expression with the contents of the import address table at runtime.
//
// The C language has no notion of ODR; furthermore, it has no notion of
// dynamic initialization. This means that we are permitted to
// perform initialization with the address of the thunk.
if (Info.getLangOpts().CPlusPlus && !isForManglingOnly(Kind) &&
FD->hasAttr<DLLImportAttr>())
// FIXME: Diagnostic!
return false;
}
} else if (const auto *MTE =
dyn_cast_or_null<MaterializeTemporaryExpr>(BaseE)) {
if (CheckedTemps.insert(MTE).second) {
QualType TempType = getType(Base);
if (TempType.isDestructedType()) {
Info.FFDiag(MTE->getExprLoc(),
diag::note_constexpr_unsupported_temporary_nontrivial_dtor)
<< TempType;
return false;
}
APValue *V = MTE->getOrCreateValue(false);
assert(V && "evasluation result refers to uninitialised temporary");
if (!CheckEvaluationResult(CheckEvaluationResultKind::ConstantExpression,
Info, MTE->getExprLoc(), TempType, *V,
Kind, SourceLocation(), CheckedTemps))
return false;
}
}
// Allow address constant expressions to be past-the-end pointers. This is
// an extension: the standard requires them to point to an object.
if (!IsReferenceType)
return true;
// A reference constant expression must refer to an object.
if (!Base) {
// FIXME: diagnostic
Info.CCEDiag(Loc);
return true;
}
// Does this refer one past the end of some object?
if (!Designator.Invalid && Designator.isOnePastTheEnd()) {
Info.FFDiag(Loc, diag::note_constexpr_past_end, 1)
<< !Designator.Entries.empty() << !!BaseVD << BaseVD;
NoteLValueLocation(Info, Base);
}
return true;
}
/// Member pointers are constant expressions unless they point to a
/// non-virtual dllimport member function.
static bool CheckMemberPointerConstantExpression(EvalInfo &Info,
SourceLocation Loc,
QualType Type,
const APValue &Value,
ConstantExprKind Kind) {
const ValueDecl *Member = Value.getMemberPointerDecl();
const auto *FD = dyn_cast_or_null<CXXMethodDecl>(Member);
if (!FD)
return true;
if (FD->isConsteval()) {
Info.FFDiag(Loc, diag::note_consteval_address_accessible) << /*pointer*/ 0;
Info.Note(FD->getLocation(), diag::note_declared_at);
return false;
}
return isForManglingOnly(Kind) || FD->isVirtual() ||
!FD->hasAttr<DLLImportAttr>();
}
/// Check that this core constant expression is of literal type, and if not,
/// produce an appropriate diagnostic.
static bool CheckLiteralType(EvalInfo &Info, const Expr *E,
const LValue *This = nullptr) {
if (!E->isPRValue() || E->getType()->isLiteralType(Info.Ctx))
return true;
// C++1y: A constant initializer for an object o [...] may also invoke
// constexpr constructors for o and its subobjects even if those objects
// are of non-literal class types.
//
// C++11 missed this detail for aggregates, so classes like this:
// struct foo_t { union { int i; volatile int j; } u; };
// are not (obviously) initializable like so:
// __attribute__((__require_constant_initialization__))
// static const foo_t x = {{0}};
// because "i" is a subobject with non-literal initialization (due to the
// volatile member of the union). See:
// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#1677
// Therefore, we use the C++1y behavior.
if (This && Info.EvaluatingDecl == This->getLValueBase())
return true;
// Prvalue constant expressions must be of literal types.
if (Info.getLangOpts().CPlusPlus11)
Info.FFDiag(E, diag::note_constexpr_nonliteral)
<< E->getType();
else
Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
return false;
}
static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
EvalInfo &Info, SourceLocation DiagLoc,
QualType Type, const APValue &Value,
ConstantExprKind Kind,
SourceLocation SubobjectLoc,
CheckedTemporaries &CheckedTemps) {
if (!Value.hasValue()) {
Info.FFDiag(DiagLoc, diag::note_constexpr_uninitialized)
<< true << Type;
if (SubobjectLoc.isValid())
Info.Note(SubobjectLoc, diag::note_constexpr_subobject_declared_here);
return false;
}
// We allow _Atomic(T) to be initialized from anything that T can be
// initialized from.
if (const AtomicType *AT = Type->getAs<AtomicType>())
Type = AT->getValueType();
// Core issue 1454: For a literal constant expression of array or class type,
// each subobject of its value shall have been initialized by a constant
// expression.
if (Value.isArray()) {
QualType EltTy = Type->castAsArrayTypeUnsafe()->getElementType();
for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) {
if (!CheckEvaluationResult(CERK, Info, DiagLoc, EltTy,
Value.getArrayInitializedElt(I), Kind,
SubobjectLoc, CheckedTemps))
return false;
}
if (!Value.hasArrayFiller())
return true;
return CheckEvaluationResult(CERK, Info, DiagLoc, EltTy,
Value.getArrayFiller(), Kind, SubobjectLoc,
CheckedTemps);
}
if (Value.isUnion() && Value.getUnionField()) {
return CheckEvaluationResult(
CERK, Info, DiagLoc, Value.getUnionField()->getType(),
Value.getUnionValue(), Kind, Value.getUnionField()->getLocation(),
CheckedTemps);
}
if (Value.isStruct()) {
RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
unsigned BaseIndex = 0;
for (const CXXBaseSpecifier &BS : CD->bases()) {
if (!CheckEvaluationResult(CERK, Info, DiagLoc, BS.getType(),
Value.getStructBase(BaseIndex), Kind,
BS.getBeginLoc(), CheckedTemps))
return false;
++BaseIndex;
}
}
for (const auto *I : RD->fields()) {
if (I->isUnnamedBitfield())
continue;
if (!CheckEvaluationResult(CERK, Info, DiagLoc, I->getType(),
Value.getStructField(I->getFieldIndex()),
Kind, I->getLocation(), CheckedTemps))
return false;
}
}
if (Value.isLValue() &&
CERK == CheckEvaluationResultKind::ConstantExpression) {
LValue LVal;
LVal.setFrom(Info.Ctx, Value);
return CheckLValueConstantExpression(Info, DiagLoc, Type, LVal, Kind,
CheckedTemps);
}
if (Value.isMemberPointer() &&
CERK == CheckEvaluationResultKind::ConstantExpression)
return CheckMemberPointerConstantExpression(Info, DiagLoc, Type, Value, Kind);
// Everything else is fine.
return true;
}
/// Check that this core constant expression value is a valid value for a
/// constant expression. If not, report an appropriate diagnostic. Does not
/// check that the expression is of literal type.
static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc,
QualType Type, const APValue &Value,
ConstantExprKind Kind) {
// Nothing to check for a constant expression of type 'cv void'.
if (Type->isVoidType())
return true;
CheckedTemporaries CheckedTemps;
return CheckEvaluationResult(CheckEvaluationResultKind::ConstantExpression,
Info, DiagLoc, Type, Value, Kind,
SourceLocation(), CheckedTemps);
}
/// Check that this evaluated value is fully-initialized and can be loaded by
/// an lvalue-to-rvalue conversion.
static bool CheckFullyInitialized(EvalInfo &Info, SourceLocation DiagLoc,
QualType Type, const APValue &Value) {
CheckedTemporaries CheckedTemps;
return CheckEvaluationResult(
CheckEvaluationResultKind::FullyInitialized, Info, DiagLoc, Type, Value,
ConstantExprKind::Normal, SourceLocation(), CheckedTemps);
}
/// Enforce C++2a [expr.const]/4.17, which disallows new-expressions unless
/// "the allocated storage is deallocated within the evaluation".
static bool CheckMemoryLeaks(EvalInfo &Info) {
if (!Info.HeapAllocs.empty()) {
// We can still fold to a constant despite a compile-time memory leak,
// so long as the heap allocation isn't referenced in the result (we check
// that in CheckConstantExpression).
Info.CCEDiag(Info.HeapAllocs.begin()->second.AllocExpr,
diag::note_constexpr_memory_leak)
<< unsigned(Info.HeapAllocs.size() - 1);
}
return true;
}
static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) {
// A null base expression indicates a null pointer. These are always
// evaluatable, and they are false unless the offset is zero.
if (!Value.getLValueBase()) {
Result = !Value.getLValueOffset().isZero();
return true;
}
// We have a non-null base. These are generally known to be true, but if it's
// a weak declaration it can be null at runtime.
Result = true;
const ValueDecl *Decl = Value.getLValueBase().dyn_cast<const ValueDecl*>();
return !Decl || !Decl->isWeak();
}
static bool HandleConversionToBool(const APValue &Val, bool &Result) {
switch (Val.getKind()) {
case APValue::None:
case APValue::Indeterminate:
return false;
case APValue::Int:
Result = Val.getInt().getBoolValue();
return true;
case APValue::FixedPoint:
Result = Val.getFixedPoint().getBoolValue();
return true;
case APValue::Float:
Result = !Val.getFloat().isZero();
return true;
case APValue::ComplexInt:
Result = Val.getComplexIntReal().getBoolValue() ||
Val.getComplexIntImag().getBoolValue();
return true;
case APValue::ComplexFloat:
Result = !Val.getComplexFloatReal().isZero() ||
!Val.getComplexFloatImag().isZero();
return true;
case APValue::LValue:
return EvalPointerValueAsBool(Val, Result);
case APValue::MemberPointer:
Result = Val.getMemberPointerDecl();
return true;
case APValue::Vector:
case APValue::Array:
case APValue::Struct:
case APValue::Union:
case APValue::AddrLabelDiff:
return false;
}
llvm_unreachable("unknown APValue kind");
}
static bool EvaluateAsBooleanCondition(const Expr *E, bool &Result,
EvalInfo &Info) {
assert(!E->isValueDependent());
assert(E->isPRValue() && "missing lvalue-to-rvalue conv in bool condition");
APValue Val;
if (!Evaluate(Val, Info, E))
return false;
return HandleConversionToBool(Val, Result);
}
template<typename T>
static bool HandleOverflow(EvalInfo &Info, const Expr *E,
const T &SrcValue, QualType DestType) {
Info.CCEDiag(E, diag::note_constexpr_overflow)
<< SrcValue << DestType;
return Info.noteUndefinedBehavior();
}
static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
QualType SrcType, const APFloat &Value,
QualType DestType, APSInt &Result) {
unsigned DestWidth = Info.Ctx.getIntWidth(DestType);
// Determine whether we are converting to unsigned or signed.
bool DestSigned = DestType->isSignedIntegerOrEnumerationType();
Result = APSInt(DestWidth, !DestSigned);
bool ignored;
if (Value.convertToInteger(Result, llvm::APFloat::rmTowardZero, &ignored)
& APFloat::opInvalidOp)
return HandleOverflow(Info, E, Value, DestType);
return true;
}
/// Get rounding mode used for evaluation of the specified expression.
/// \param[out] DynamicRM Is set to true is the requested rounding mode is
/// dynamic.
/// If rounding mode is unknown at compile time, still try to evaluate the
/// expression. If the result is exact, it does not depend on rounding mode.
/// So return "tonearest" mode instead of "dynamic".
static llvm::RoundingMode getActiveRoundingMode(EvalInfo &Info, const Expr *E,
bool &DynamicRM) {
llvm::RoundingMode RM =
E->getFPFeaturesInEffect(Info.Ctx.getLangOpts()).getRoundingMode();
DynamicRM = (RM == llvm::RoundingMode::Dynamic);
if (DynamicRM)
RM = llvm::RoundingMode::NearestTiesToEven;
return RM;
}
/// Check if the given evaluation result is allowed for constant evaluation.
static bool checkFloatingPointResult(EvalInfo &Info, const Expr *E,
APFloat::opStatus St) {
// In a constant context, assume that any dynamic rounding mode or FP
// exception state matches the default floating-point environment.
if (Info.InConstantContext)
return true;
FPOptions FPO = E->getFPFeaturesInEffect(Info.Ctx.getLangOpts());
if ((St & APFloat::opInexact) &&
FPO.getRoundingMode() == llvm::RoundingMode::Dynamic) {
// Inexact result means that it depends on rounding mode. If the requested
// mode is dynamic, the evaluation cannot be made in compile time.
Info.FFDiag(E, diag::note_constexpr_dynamic_rounding);
return false;
}
if ((St != APFloat::opOK) &&
(FPO.getRoundingMode() == llvm::RoundingMode::Dynamic ||
FPO.getFPExceptionMode() != LangOptions::FPE_Ignore ||
FPO.getAllowFEnvAccess())) {
Info.FFDiag(E, diag::note_constexpr_float_arithmetic_strict);
return false;
}
if ((St & APFloat::opStatus::opInvalidOp) &&
FPO.getFPExceptionMode() != LangOptions::FPE_Ignore) {
// There is no usefully definable result.
Info.FFDiag(E);
return false;
}
// FIXME: if:
// - evaluation triggered other FP exception, and
// - exception mode is not "ignore", and
// - the expression being evaluated is not a part of global variable
// initializer,
// the evaluation probably need to be rejected.
return true;
}
static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E,
QualType SrcType, QualType DestType,
APFloat &Result) {
assert(isa<CastExpr>(E) || isa<CompoundAssignOperator>(E));
bool DynamicRM;
llvm::RoundingMode RM = getActiveRoundingMode(Info, E, DynamicRM);
APFloat::opStatus St;
APFloat Value = Result;
bool ignored;
St = Result.convert(Info.Ctx.getFloatTypeSemantics(DestType), RM, &ignored);
return checkFloatingPointResult(Info, E, St);
}
static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E,
QualType DestType, QualType SrcType,
const APSInt &Value) {
unsigned DestWidth = Info.Ctx.getIntWidth(DestType);
// Figure out if this is a truncate, extend or noop cast.
// If the input is signed, do a sign extend, noop, or truncate.
APSInt Result = Value.extOrTrunc(DestWidth);
Result.setIsUnsigned(DestType->isUnsignedIntegerOrEnumerationType());
if (DestType->isBooleanType())
Result = Value.getBoolValue();
return Result;
}
static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E,
const FPOptions FPO,
QualType SrcType, const APSInt &Value,
QualType DestType, APFloat &Result) {
Result = APFloat(Info.Ctx.getFloatTypeSemantics(DestType), 1);
APFloat::opStatus St = Result.convertFromAPInt(Value, Value.isSigned(),
APFloat::rmNearestTiesToEven);
if (!Info.InConstantContext && St != llvm::APFloatBase::opOK &&
FPO.isFPConstrained()) {
Info.FFDiag(E, diag::note_constexpr_float_arithmetic_strict);
return false;
}
return true;
}
static bool truncateBitfieldValue(EvalInfo &Info, const Expr *E,
APValue &Value, const FieldDecl *FD) {
assert(FD->isBitField() && "truncateBitfieldValue on non-bitfield");
if (!Value.isInt()) {
// Trying to store a pointer-cast-to-integer into a bitfield.
// FIXME: In this case, we should provide the diagnostic for casting
// a pointer to an integer.
assert(Value.isLValue() && "integral value neither int nor lvalue?");
Info.FFDiag(E);
return false;
}
APSInt &Int = Value.getInt();
unsigned OldBitWidth = Int.getBitWidth();
unsigned NewBitWidth = FD->getBitWidthValue(Info.Ctx);
if (NewBitWidth < OldBitWidth)
Int = Int.trunc(NewBitWidth).extend(OldBitWidth);
return true;
}
static bool EvalAndBitcastToAPInt(EvalInfo &Info, const Expr *E,
llvm::APInt &Res) {
APValue SVal;
if (!Evaluate(SVal, Info, E))
return false;
if (SVal.isInt()) {
Res = SVal.getInt();
return true;
}
if (SVal.isFloat()) {
Res = SVal.getFloat().bitcastToAPInt();
return true;
}
if (SVal.isVector()) {
QualType VecTy = E->getType();
unsigned VecSize = Info.Ctx.getTypeSize(VecTy);
QualType EltTy = VecTy->castAs<VectorType>()->getElementType();
unsigned EltSize = Info.Ctx.getTypeSize(EltTy);
bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
Res = llvm::APInt::getZero(VecSize);
for (unsigned i = 0; i < SVal.getVectorLength(); i++) {
APValue &Elt = SVal.getVectorElt(i);
llvm::APInt EltAsInt;
if (Elt.isInt()) {
EltAsInt = Elt.getInt();
} else if (Elt.isFloat()) {
EltAsInt = Elt.getFloat().bitcastToAPInt();
} else {
// Don't try to handle vectors of anything other than int or float
// (not sure if it's possible to hit this case).
Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
return false;
}
unsigned BaseEltSize = EltAsInt.getBitWidth();
if (BigEndian)
Res |= EltAsInt.zextOrTrunc(VecSize).rotr(i*EltSize+BaseEltSize);
else
Res |= EltAsInt.zextOrTrunc(VecSize).rotl(i*EltSize);
}
return true;
}
// Give up if the input isn't an int, float, or vector. For example, we
// reject "(v4i16)(intptr_t)&a".
Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
return false;
}
/// Perform the given integer operation, which is known to need at most BitWidth
/// bits, and check for overflow in the original type (if that type was not an
/// unsigned type).
template<typename Operation>
static bool CheckedIntArithmetic(EvalInfo &Info, const Expr *E,
const APSInt &LHS, const APSInt &RHS,
unsigned BitWidth, Operation Op,
APSInt &Result) {
if (LHS.isUnsigned()) {
Result = Op(LHS, RHS);
return true;
}
APSInt Value(Op(LHS.extend(BitWidth), RHS.extend(BitWidth)), false);
Result = Value.trunc(LHS.getBitWidth());
if (Result.extend(BitWidth) != Value) {