blob: 5fe1d9a4f2b76d4859b0b3918a5eef262bc0709b [file] [log] [blame] [edit]
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Internal per-function state used for AST-to-ClangIR code gen
//
//===----------------------------------------------------------------------===//
#ifndef CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
#define CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
#include "CIRGenBuilder.h"
#include "CIRGenCall.h"
#include "CIRGenModule.h"
#include "CIRGenTypeCache.h"
#include "CIRGenValue.h"
#include "EHScopeStack.h"
#include "Address.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/BaseSubobject.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/CurrentSourceLocExprScope.h"
#include "clang/AST/Decl.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/Type.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/CIR/Dialect/IR/CIRDialect.h"
#include "clang/CIR/MissingFeatures.h"
#include "clang/CIR/TypeEvaluationKind.h"
#include "llvm/ADT/ScopedHashTable.h"
namespace {
class ScalarExprEmitter;
} // namespace
namespace mlir {
namespace acc {
class LoopOp;
} // namespace acc
} // namespace mlir
namespace clang::CIRGen {
struct CGCoroData;
class CIRGenFunction : public CIRGenTypeCache {
public:
CIRGenModule &cgm;
private:
friend class ::ScalarExprEmitter;
/// The builder is a helper class to create IR inside a function. The
/// builder is stateful, in particular it keeps an "insertion point": this
/// is where the next operations will be introduced.
CIRGenBuilderTy &builder;
/// A jump destination is an abstract label, branching to which may
/// require a jump out through normal cleanups.
struct JumpDest {
JumpDest() = default;
JumpDest(mlir::Block *block, EHScopeStack::stable_iterator depth = {},
unsigned index = 0)
: block(block) {}
bool isValid() const { return block != nullptr; }
mlir::Block *getBlock() const { return block; }
EHScopeStack::stable_iterator getScopeDepth() const { return scopeDepth; }
unsigned getDestIndex() const { return index; }
// This should be used cautiously.
void setScopeDepth(EHScopeStack::stable_iterator depth) {
scopeDepth = depth;
}
private:
mlir::Block *block = nullptr;
EHScopeStack::stable_iterator scopeDepth;
unsigned index;
};
public:
/// The GlobalDecl for the current function being compiled or the global
/// variable currently being initialized.
clang::GlobalDecl curGD;
/// Unified return block.
/// In CIR this is a function because each scope might have
/// its associated return block.
JumpDest returnBlock(mlir::Block *retBlock) {
return getJumpDestInCurrentScope(retBlock);
}
unsigned nextCleanupDestIndex = 1;
/// The compiler-generated variable that holds the return value.
std::optional<mlir::Value> fnRetAlloca;
// Holds coroutine data if the current function is a coroutine. We use a
// wrapper to manage its lifetime, so that we don't have to define CGCoroData
// in this header.
struct CGCoroInfo {
std::unique_ptr<CGCoroData> data;
CGCoroInfo();
~CGCoroInfo();
};
CGCoroInfo curCoro;
bool isCoroutine() const { return curCoro.data != nullptr; }
/// The temporary alloca to hold the return value. This is
/// invalid iff the function has no return value.
Address returnValue = Address::invalid();
/// Tracks function scope overall cleanup handling.
EHScopeStack ehStack;
GlobalDecl curSEHParent;
/// A mapping from NRVO variables to the flags used to indicate
/// when the NRVO has been applied to this variable.
llvm::DenseMap<const VarDecl *, mlir::Value> nrvoFlags;
llvm::DenseMap<const clang::ValueDecl *, clang::FieldDecl *>
lambdaCaptureFields;
clang::FieldDecl *lambdaThisCaptureField = nullptr;
/// CXXThisDecl - When generating code for a C++ member function,
/// this will hold the implicit 'this' declaration.
ImplicitParamDecl *cxxabiThisDecl = nullptr;
mlir::Value cxxabiThisValue = nullptr;
mlir::Value cxxThisValue = nullptr;
clang::CharUnits cxxThisAlignment;
/// When generating code for a constructor or destructor, this will hold the
/// implicit argument (e.g. VTT).
ImplicitParamDecl *cxxStructorImplicitParamDecl{};
mlir::Value cxxStructorImplicitParamValue{};
/// The value of 'this' to sue when evaluating CXXDefaultInitExprs within this
/// expression.
Address cxxDefaultInitExprThis = Address::invalid();
// Holds the Decl for the current outermost non-closure context
const clang::Decl *curFuncDecl = nullptr;
/// This is the inner-most code context, which includes blocks.
const clang::Decl *curCodeDecl = nullptr;
/// The current function or global initializer that is generated code for.
/// This is usually a cir::FuncOp, but it can also be a cir::GlobalOp for
/// global initializers.
mlir::Operation *curFn = nullptr;
/// Save Parameter Decl for coroutine.
llvm::SmallVector<const ParmVarDecl *> fnArgs;
using DeclMapTy = llvm::DenseMap<const clang::Decl *, Address>;
/// This keeps track of the CIR allocas or globals for local C
/// declarations.
DeclMapTy localDeclMap;
/// The type of the condition for the emitting switch statement.
llvm::SmallVector<mlir::Type, 2> condTypeStack;
clang::ASTContext &getContext() const { return cgm.getASTContext(); }
CIRGenBuilderTy &getBuilder() { return builder; }
CIRGenModule &getCIRGenModule() { return cgm; }
const CIRGenModule &getCIRGenModule() const { return cgm; }
mlir::Block *getCurFunctionEntryBlock() {
// We currently assume this isn't called for a global initializer.
auto fn = mlir::cast<cir::FuncOp>(curFn);
return &fn.getRegion().front();
}
/// Sanitizers enabled for this function.
clang::SanitizerSet sanOpts;
/// The symbol table maps a variable name to a value in the current scope.
/// Entering a function creates a new scope, and the function arguments are
/// added to the mapping. When the processing of a function is terminated,
/// the scope is destroyed and the mappings created in this scope are
/// dropped.
using SymTableTy = llvm::ScopedHashTable<const clang::Decl *, mlir::Value>;
SymTableTy symbolTable;
/// Whether a cir.stacksave operation has been added. Used to avoid
/// inserting cir.stacksave for multiple VLAs in the same scope.
bool didCallStackSave = false;
/// Whether or not a Microsoft-style asm block has been processed within
/// this fuction. These can potentially set the return value.
bool sawAsmBlock = false;
mlir::Type convertTypeForMem(QualType t);
mlir::Type convertType(clang::QualType t);
mlir::Type convertType(const TypeDecl *t) {
return convertType(getContext().getTypeDeclType(t));
}
/// Get integer from a mlir::Value that is an int constant or a constant op.
static int64_t getSExtIntValueFromConstOp(mlir::Value val) {
auto constOp = val.getDefiningOp<cir::ConstantOp>();
assert(constOp && "getSExtIntValueFromConstOp call with non ConstantOp");
return constOp.getIntValue().getSExtValue();
}
/// Get zero-extended integer from a mlir::Value that is an int constant or a
/// constant op.
static int64_t getZExtIntValueFromConstOp(mlir::Value val) {
auto constOp = val.getDefiningOp<cir::ConstantOp>();
assert(constOp && "getZExtIntValueFromConstOp call with non ConstantOp");
return constOp.getIntValue().getZExtValue();
}
/// Return the cir::TypeEvaluationKind of QualType \c type.
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type);
static bool hasScalarEvaluationKind(clang::QualType type) {
return getEvaluationKind(type) == cir::TEK_Scalar;
}
static bool hasAggregateEvaluationKind(clang::QualType type) {
return getEvaluationKind(type) == cir::TEK_Aggregate;
}
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder,
bool suppressNewContext = false);
~CIRGenFunction();
CIRGenTypes &getTypes() const { return cgm.getTypes(); }
const TargetInfo &getTarget() const { return cgm.getTarget(); }
mlir::MLIRContext &getMLIRContext() { return cgm.getMLIRContext(); }
const TargetCIRGenInfo &getTargetHooks() const {
return cgm.getTargetCIRGenInfo();
}
// ---------------------
// Opaque value handling
// ---------------------
/// Keeps track of the current set of opaque value expressions.
llvm::DenseMap<const OpaqueValueExpr *, LValue> opaqueLValues;
llvm::DenseMap<const OpaqueValueExpr *, RValue> opaqueRValues;
// This keeps track of the associated size for each VLA type.
// We track this by the size expression rather than the type itself because
// in certain situations, like a const qualifier applied to an VLA typedef,
// multiple VLA types can share the same size expression.
// FIXME: Maybe this could be a stack of maps that is pushed/popped as we
// enter/leave scopes.
llvm::DenseMap<const Expr *, mlir::Value> vlaSizeMap;
public:
/// A non-RAII class containing all the information about a bound
/// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
/// this which makes individual mappings very simple; using this
/// class directly is useful when you have a variable number of
/// opaque values or don't want the RAII functionality for some
/// reason.
class OpaqueValueMappingData {
const OpaqueValueExpr *opaqueValue;
bool boundLValue;
OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue)
: opaqueValue(ov), boundLValue(boundLValue) {}
public:
OpaqueValueMappingData() : opaqueValue(nullptr) {}
static bool shouldBindAsLValue(const Expr *expr) {
// gl-values should be bound as l-values for obvious reasons.
// Records should be bound as l-values because IR generation
// always keeps them in memory. Expressions of function type
// act exactly like l-values but are formally required to be
// r-values in C.
return expr->isGLValue() || expr->getType()->isFunctionType() ||
hasAggregateEvaluationKind(expr->getType());
}
static OpaqueValueMappingData
bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e) {
if (shouldBindAsLValue(ov))
return bind(cgf, ov, cgf.emitLValue(e));
return bind(cgf, ov, cgf.emitAnyExpr(e));
}
static OpaqueValueMappingData
bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv) {
assert(shouldBindAsLValue(ov));
cgf.opaqueLValues.insert(std::make_pair(ov, lv));
return OpaqueValueMappingData(ov, true);
}
static OpaqueValueMappingData
bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv) {
assert(!shouldBindAsLValue(ov));
cgf.opaqueRValues.insert(std::make_pair(ov, rv));
OpaqueValueMappingData data(ov, false);
// Work around an extremely aggressive peephole optimization in
// EmitScalarConversion which assumes that all other uses of a
// value are extant.
assert(!cir::MissingFeatures::peepholeProtection() && "NYI");
return data;
}
bool isValid() const { return opaqueValue != nullptr; }
void clear() { opaqueValue = nullptr; }
void unbind(CIRGenFunction &cgf) {
assert(opaqueValue && "no data to unbind!");
if (boundLValue) {
cgf.opaqueLValues.erase(opaqueValue);
} else {
cgf.opaqueRValues.erase(opaqueValue);
assert(!cir::MissingFeatures::peepholeProtection() && "NYI");
}
}
};
/// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
class OpaqueValueMapping {
CIRGenFunction &cgf;
OpaqueValueMappingData data;
public:
static bool shouldBindAsLValue(const Expr *expr) {
return OpaqueValueMappingData::shouldBindAsLValue(expr);
}
/// Build the opaque value mapping for the given conditional
/// operator if it's the GNU ?: extension. This is a common
/// enough pattern that the convenience operator is really
/// helpful.
///
OpaqueValueMapping(CIRGenFunction &cgf,
const AbstractConditionalOperator *op)
: cgf(cgf) {
if (mlir::isa<ConditionalOperator>(op))
// Leave Data empty.
return;
const BinaryConditionalOperator *e =
mlir::cast<BinaryConditionalOperator>(op);
data = OpaqueValueMappingData::bind(cgf, e->getOpaqueValue(),
e->getCommon());
}
/// Build the opaque value mapping for an OpaqueValueExpr whose source
/// expression is set to the expression the OVE represents.
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *ov)
: cgf(cgf) {
if (ov) {
assert(ov->getSourceExpr() && "wrong form of OpaqueValueMapping used "
"for OVE with no source expression");
data = OpaqueValueMappingData::bind(cgf, ov, ov->getSourceExpr());
}
}
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue,
LValue lvalue)
: cgf(cgf),
data(OpaqueValueMappingData::bind(cgf, opaqueValue, lvalue)) {}
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue,
RValue rvalue)
: cgf(cgf),
data(OpaqueValueMappingData::bind(cgf, opaqueValue, rvalue)) {}
void pop() {
data.unbind(cgf);
data.clear();
}
~OpaqueValueMapping() {
if (data.isValid())
data.unbind(cgf);
}
};
private:
/// Declare a variable in the current scope, return success if the variable
/// wasn't declared yet.
void declare(mlir::Value addrVal, const clang::Decl *var, clang::QualType ty,
mlir::Location loc, clang::CharUnits alignment,
bool isParam = false);
public:
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt);
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty);
private:
// Track current variable initialization (if there's one)
const clang::VarDecl *currVarDecl = nullptr;
class VarDeclContext {
CIRGenFunction &p;
const clang::VarDecl *oldVal = nullptr;
public:
VarDeclContext(CIRGenFunction &p, const VarDecl *value) : p(p) {
if (p.currVarDecl)
oldVal = p.currVarDecl;
p.currVarDecl = value;
}
/// Can be used to restore the state early, before the dtor
/// is run.
void restore() { p.currVarDecl = oldVal; }
~VarDeclContext() { restore(); }
};
public:
/// Use to track source locations across nested visitor traversals.
/// Always use a `SourceLocRAIIObject` to change currSrcLoc.
std::optional<mlir::Location> currSrcLoc;
class SourceLocRAIIObject {
CIRGenFunction &cgf;
std::optional<mlir::Location> oldLoc;
public:
SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value) : cgf(cgf) {
if (cgf.currSrcLoc)
oldLoc = cgf.currSrcLoc;
cgf.currSrcLoc = value;
}
/// Can be used to restore the state early, before the dtor
/// is run.
void restore() { cgf.currSrcLoc = oldLoc; }
~SourceLocRAIIObject() { restore(); }
};
using SymTableScopeTy =
llvm::ScopedHashTableScope<const clang::Decl *, mlir::Value>;
/// Hold counters for incrementally naming temporaries
unsigned counterRefTmp = 0;
unsigned counterAggTmp = 0;
std::string getCounterRefTmpAsString();
std::string getCounterAggTmpAsString();
/// Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::Location getLoc(clang::SourceLocation srcLoc);
mlir::Location getLoc(clang::SourceRange srcLoc);
mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs);
const clang::LangOptions &getLangOpts() const { return cgm.getLangOpts(); }
/// True if an insertion point is defined. If not, this indicates that the
/// current code being emitted is unreachable.
/// FIXME(cir): we need to inspect this and perhaps use a cleaner mechanism
/// since we don't yet force null insertion point to designate behavior (like
/// LLVM's codegen does) and we probably shouldn't.
bool haveInsertPoint() const {
return builder.getInsertionBlock() != nullptr;
}
// Wrapper for function prototype sources. Wraps either a FunctionProtoType or
// an ObjCMethodDecl.
struct PrototypeWrapper {
llvm::PointerUnion<const clang::FunctionProtoType *,
const clang::ObjCMethodDecl *>
p;
PrototypeWrapper(const clang::FunctionProtoType *ft) : p(ft) {}
PrototypeWrapper(const clang::ObjCMethodDecl *md) : p(md) {}
};
bool isLValueSuitableForInlineAtomic(LValue lv);
/// An abstract representation of regular/ObjC call/message targets.
class AbstractCallee {
/// The function declaration of the callee.
[[maybe_unused]] const clang::Decl *calleeDecl;
public:
AbstractCallee() : calleeDecl(nullptr) {}
AbstractCallee(const clang::FunctionDecl *fd) : calleeDecl(fd) {}
bool hasFunctionDecl() const {
return llvm::isa_and_nonnull<clang::FunctionDecl>(calleeDecl);
}
unsigned getNumParams() const {
if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
return fd->getNumParams();
return llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_size();
}
const clang::ParmVarDecl *getParamDecl(unsigned I) const {
if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
return fd->getParamDecl(I);
return *(llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_begin() +
I);
}
};
struct VlaSizePair {
mlir::Value numElts;
QualType type;
VlaSizePair(mlir::Value num, QualType ty) : numElts(num), type(ty) {}
};
/// Return the number of elements for a single dimension
/// for the given array type.
VlaSizePair getVLAElements1D(const VariableArrayType *vla);
/// Returns an MLIR::Value+QualType pair that corresponds to the size,
/// in non-variably-sized elements, of a variable length array type,
/// plus that largest non-variably-sized element type. Assumes that
/// the type has already been emitted with emitVariablyModifiedType.
VlaSizePair getVLASize(const VariableArrayType *type);
VlaSizePair getVLASize(QualType type);
Address getAsNaturalAddressOf(Address addr, QualType pointeeTy);
mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType) {
return getAsNaturalAddressOf(addr, pointeeType).getBasePointer();
}
void finishFunction(SourceLocation endLoc);
/// Determine whether the given initializer is trivial in the sense
/// that it requires no code to be generated.
bool isTrivialInitializer(const Expr *init);
/// If the specified expression does not fold to a constant, or if it does but
/// contains a label, return false. If it constant folds return true and set
/// the boolean result in Result.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool,
bool allowLabels = false);
bool constantFoldsToSimpleInteger(const clang::Expr *cond,
llvm::APSInt &resultInt,
bool allowLabels = false);
/// Return true if the statement contains a label in it. If
/// this statement is not executed normally, it not containing a label means
/// that we can just remove the code.
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts = false);
Address emitExtVectorElementLValue(LValue lv, mlir::Location loc);
class ConstantEmission {
// Cannot use mlir::TypedAttr directly here because of bit availability.
llvm::PointerIntPair<mlir::Attribute, 1, bool> valueAndIsReference;
ConstantEmission(mlir::TypedAttr c, bool isReference)
: valueAndIsReference(c, isReference) {}
public:
ConstantEmission() {}
static ConstantEmission forReference(mlir::TypedAttr c) {
return ConstantEmission(c, true);
}
static ConstantEmission forValue(mlir::TypedAttr c) {
return ConstantEmission(c, false);
}
explicit operator bool() const {
return valueAndIsReference.getOpaqueValue() != nullptr;
}
bool isReference() const { return valueAndIsReference.getInt(); }
LValue getReferenceLValue(CIRGenFunction &cgf, Expr *refExpr) const {
assert(isReference());
cgf.cgm.errorNYI(refExpr->getSourceRange(),
"ConstantEmission::getReferenceLValue");
return {};
}
mlir::TypedAttr getValue() const {
assert(!isReference());
return mlir::cast<mlir::TypedAttr>(valueAndIsReference.getPointer());
}
};
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr);
ConstantEmission tryEmitAsConstant(const MemberExpr *me);
struct AutoVarEmission {
const clang::VarDecl *variable;
/// The address of the alloca for languages with explicit address space
/// (e.g. OpenCL) or alloca casted to generic pointer for address space
/// agnostic languages (e.g. C++). Invalid if the variable was emitted
/// as a global constant.
Address addr;
/// True if the variable is of aggregate type and has a constant
/// initializer.
bool isConstantAggregate = false;
/// True if the variable is a __block variable that is captured by an
/// escaping block.
bool isEscapingByRef = false;
/// True if the variable was emitted as an offload recipe, and thus doesn't
/// have the same sort of alloca initialization.
bool emittedAsOffload = false;
mlir::Value nrvoFlag{};
struct Invalid {};
AutoVarEmission(Invalid) : variable(nullptr), addr(Address::invalid()) {}
AutoVarEmission(const clang::VarDecl &variable)
: variable(&variable), addr(Address::invalid()) {}
static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
bool wasEmittedAsGlobal() const { return !addr.isValid(); }
bool wasEmittedAsOffloadClause() const { return emittedAsOffload; }
/// Returns the raw, allocated address, which is not necessarily
/// the address of the object itself. It is casted to default
/// address space for address space agnostic languages.
Address getAllocatedAddress() const { return addr; }
// Changes the stored address for the emission. This function should only
// be used in extreme cases, and isn't required to model normal AST
// initialization/variables.
void setAllocatedAddress(Address a) { addr = a; }
/// Returns the address of the object within this declaration.
/// Note that this does not chase the forwarding pointer for
/// __block decls.
Address getObjectAddress(CIRGenFunction &cgf) const {
if (!isEscapingByRef)
return addr;
assert(!cir::MissingFeatures::opAllocaEscapeByReference());
return Address::invalid();
}
};
/// The given basic block lies in the current EH scope, but may be a
/// target of a potentially scope-crossing jump; get a stable handle
/// to which we can perform this jump later.
/// CIRGen: this mostly tracks state for figuring out the proper scope
/// information, no actual branches are emitted.
JumpDest getJumpDestInCurrentScope(mlir::Block *target) {
return JumpDest(target, ehStack.getInnermostNormalCleanup(),
nextCleanupDestIndex++);
}
/// IndirectBranch - The first time an indirect goto is seen we create a block
/// reserved for the indirect branch. Unlike before,the actual 'indirectbr'
/// is emitted at the end of the function, once all block destinations have
/// been resolved.
mlir::Block *indirectGotoBlock = nullptr;
void resolveBlockAddresses();
void finishIndirectBranch();
/// Perform the usual unary conversions on the specified expression and
/// compare the result against zero, returning an Int1Ty value.
mlir::Value evaluateExprAsBool(const clang::Expr *e);
cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d,
cir::GlobalOp gv,
cir::GetGlobalOp gvAddr);
/// Enter the cleanups necessary to complete the given phase of destruction
/// for a destructor. The end result should call destructors on members and
/// base classes in reverse order of their construction.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type);
/// Determines whether an EH cleanup is required to destroy a type
/// with the given destruction kind.
/// TODO(cir): could be shared with Clang LLVM codegen
bool needsEHCleanup(QualType::DestructionKind kind) {
switch (kind) {
case QualType::DK_none:
return false;
case QualType::DK_cxx_destructor:
case QualType::DK_objc_weak_lifetime:
case QualType::DK_nontrivial_c_struct:
return getLangOpts().Exceptions;
case QualType::DK_objc_strong_lifetime:
return getLangOpts().Exceptions &&
cgm.getCodeGenOpts().ObjCAutoRefCountExceptions;
}
llvm_unreachable("bad destruction kind");
}
CleanupKind getCleanupKind(QualType::DestructionKind kind) {
return needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup;
}
void pushStackRestore(CleanupKind kind, Address spMem);
/// Set the address of a local variable.
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr) {
assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!");
localDeclMap.insert({vd, addr});
// Add to the symbol table if not there already.
if (symbolTable.count(vd))
return;
symbolTable.insert(vd, addr.getPointer());
}
// Replaces the address of the local variable, if it exists. Else does the
// same thing as setAddrOfLocalVar.
void replaceAddrOfLocalVar(const clang::VarDecl *vd, Address addr) {
localDeclMap.insert_or_assign(vd, addr);
}
// A class to allow reverting changes to a var-decl's registration to the
// localDeclMap. This is used in cases where things are being inserted into
// the variable list but don't follow normal lookup/search rules, like in
// OpenACC recipe generation.
class DeclMapRevertingRAII {
CIRGenFunction &cgf;
const VarDecl *vd;
bool shouldDelete = false;
Address oldAddr = Address::invalid();
public:
DeclMapRevertingRAII(CIRGenFunction &cgf, const VarDecl *vd)
: cgf(cgf), vd(vd) {
auto mapItr = cgf.localDeclMap.find(vd);
if (mapItr != cgf.localDeclMap.end())
oldAddr = mapItr->second;
else
shouldDelete = true;
}
~DeclMapRevertingRAII() {
if (shouldDelete)
cgf.localDeclMap.erase(vd);
else
cgf.localDeclMap.insert_or_assign(vd, oldAddr);
}
};
bool shouldNullCheckClassCastValue(const CastExpr *ce);
RValue convertTempToRValue(Address addr, clang::QualType type,
clang::SourceLocation loc);
static bool
isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor);
struct VPtr {
clang::BaseSubobject base;
const clang::CXXRecordDecl *nearestVBase;
clang::CharUnits offsetFromNearestVBase;
const clang::CXXRecordDecl *vtableClass;
};
using VisitedVirtualBasesSetTy =
llvm::SmallPtrSet<const clang::CXXRecordDecl *, 4>;
using VPtrsVector = llvm::SmallVector<VPtr, 4>;
VPtrsVector getVTablePointers(const clang::CXXRecordDecl *vtableClass);
void getVTablePointers(clang::BaseSubobject base,
const clang::CXXRecordDecl *nearestVBase,
clang::CharUnits offsetFromNearestVBase,
bool baseIsNonVirtualPrimaryBase,
const clang::CXXRecordDecl *vtableClass,
VisitedVirtualBasesSetTy &vbases, VPtrsVector &vptrs);
/// Return the Value of the vtable pointer member pointed to by thisAddr.
mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr,
const clang::CXXRecordDecl *vtableClass);
/// Returns whether we should perform a type checked load when loading a
/// virtual function for virtual calls to members of RD. This is generally
/// true when both vcall CFI and whole-program-vtables are enabled.
bool shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *rd);
/// Source location information about the default argument or member
/// initializer expression we're evaluating, if any.
clang::CurrentSourceLocExprScope curSourceLocExprScope;
using SourceLocExprScopeGuard =
clang::CurrentSourceLocExprScope::SourceLocExprScopeGuard;
/// A scope within which we are constructing the fields of an object which
/// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use if
/// we need to evaluate the CXXDefaultInitExpr within the evaluation.
class FieldConstructionScope {
public:
FieldConstructionScope(CIRGenFunction &cgf, Address thisAddr)
: cgf(cgf), oldCXXDefaultInitExprThis(cgf.cxxDefaultInitExprThis) {
cgf.cxxDefaultInitExprThis = thisAddr;
}
~FieldConstructionScope() {
cgf.cxxDefaultInitExprThis = oldCXXDefaultInitExprThis;
}
private:
CIRGenFunction &cgf;
Address oldCXXDefaultInitExprThis;
};
/// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
/// is overridden to be the object under construction.
class CXXDefaultInitExprScope {
public:
CXXDefaultInitExprScope(CIRGenFunction &cgf, const CXXDefaultInitExpr *e)
: cgf{cgf}, oldCXXThisValue(cgf.cxxThisValue),
oldCXXThisAlignment(cgf.cxxThisAlignment),
sourceLocScope(e, cgf.curSourceLocExprScope) {
cgf.cxxThisValue = cgf.cxxDefaultInitExprThis.getPointer();
cgf.cxxThisAlignment = cgf.cxxDefaultInitExprThis.getAlignment();
}
~CXXDefaultInitExprScope() {
cgf.cxxThisValue = oldCXXThisValue;
cgf.cxxThisAlignment = oldCXXThisAlignment;
}
public:
CIRGenFunction &cgf;
mlir::Value oldCXXThisValue;
clang::CharUnits oldCXXThisAlignment;
SourceLocExprScopeGuard sourceLocScope;
};
struct CXXDefaultArgExprScope : SourceLocExprScopeGuard {
CXXDefaultArgExprScope(CIRGenFunction &cfg, const CXXDefaultArgExpr *e)
: SourceLocExprScopeGuard(e, cfg.curSourceLocExprScope) {}
};
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t);
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty);
/// Construct an address with the natural alignment of T. If a pointer to T
/// is expected to be signed, the pointer passed to this function must have
/// been signed, and the returned Address will have the pointer authentication
/// information needed to authenticate the signed pointer.
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t,
CharUnits alignment,
bool forPointeeType = false,
LValueBaseInfo *baseInfo = nullptr) {
if (alignment.isZero())
alignment = cgm.getNaturalTypeAlignment(t, baseInfo);
return Address(ptr, convertTypeForMem(t), alignment);
}
Address getAddressOfBaseClass(
Address value, const CXXRecordDecl *derived,
llvm::iterator_range<CastExpr::path_const_iterator> path,
bool nullCheckValue, SourceLocation loc);
Address getAddressOfDerivedClass(
mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived,
llvm::iterator_range<CastExpr::path_const_iterator> path,
bool nullCheckValue);
/// Return the VTT parameter that should be passed to a base
/// constructor/destructor with virtual bases.
/// FIXME: VTTs are Itanium ABI-specific, so the definition should move
/// to ItaniumCXXABI.cpp together with all the references to VTT.
mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase,
bool delegating);
LValue makeAddrLValue(Address addr, QualType ty,
AlignmentSource source = AlignmentSource::Type) {
return makeAddrLValue(addr, ty, LValueBaseInfo(source));
}
LValue makeAddrLValue(Address addr, QualType ty, LValueBaseInfo baseInfo) {
return LValue::makeAddr(addr, ty, baseInfo);
}
void initializeVTablePointers(mlir::Location loc,
const clang::CXXRecordDecl *rd);
void initializeVTablePointer(mlir::Location loc, const VPtr &vptr);
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *fd);
/// Return the address of a local variable.
Address getAddrOfLocalVar(const clang::VarDecl *vd) {
auto it = localDeclMap.find(vd);
assert(it != localDeclMap.end() &&
"Invalid argument to getAddrOfLocalVar(), no decl!");
return it->second;
}
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field,
mlir::Type fieldType, unsigned index);
/// Given an opaque value expression, return its LValue mapping if it exists,
/// otherwise create one.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e);
/// Given an opaque value expression, return its RValue mapping if it exists,
/// otherwise create one.
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e);
/// Load the value for 'this'. This function is only valid while generating
/// code for an C++ member function.
/// FIXME(cir): this should return a mlir::Value!
mlir::Value loadCXXThis() {
assert(cxxThisValue && "no 'this' value for this function");
return cxxThisValue;
}
Address loadCXXThisAddress();
/// Load the VTT parameter to base constructors/destructors have virtual
/// bases. FIXME: Every place that calls LoadCXXVTT is something that needs to
/// be abstracted properly.
mlir::Value loadCXXVTT() {
assert(cxxStructorImplicitParamValue && "no VTT value for this function");
return cxxStructorImplicitParamValue;
}
/// Convert the given pointer to a complete class to the given direct base.
Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc,
Address value,
const CXXRecordDecl *derived,
const CXXRecordDecl *base,
bool baseIsVirtual);
/// Determine whether a return value slot may overlap some other object.
AggValueSlot::Overlap_t getOverlapForReturnValue() {
// FIXME: Assuming no overlap here breaks guaranteed copy elision for base
// class subobjects. These cases may need to be revisited depending on the
// resolution of the relevant core issue.
return AggValueSlot::DoesNotOverlap;
}
/// Determine whether a base class initialization may overlap some other
/// object.
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *rd,
const CXXRecordDecl *baseRD,
bool isVirtual);
/// Get an appropriate 'undef' rvalue for the given type.
/// TODO: What's the equivalent for MLIR? Currently we're only using this for
/// void types so it just returns RValue::get(nullptr) but it'll need
/// addressed later.
RValue getUndefRValue(clang::QualType ty);
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
cir::FuncType funcType);
clang::QualType buildFunctionArgList(clang::GlobalDecl gd,
FunctionArgList &args);
/// Emit the function prologue: declare function arguments in the symbol
/// table.
void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB,
const FunctionDecl *fd, SourceLocation bodyBeginLoc);
/// Emit code for the start of a function.
/// \param loc The location to be associated with the function.
/// \param startLoc The location of the function body.
void startFunction(clang::GlobalDecl gd, clang::QualType returnType,
cir::FuncOp fn, cir::FuncType funcType,
FunctionArgList args, clang::SourceLocation loc,
clang::SourceLocation startLoc);
/// returns true if aggregate type has a volatile member.
bool hasVolatileMember(QualType t) {
if (const auto *rd = t->getAsRecordDecl())
return rd->hasVolatileMember();
return false;
}
void populateUnwindResumeBlock(bool isCleanup, cir::TryOp tryOp);
void populateEHCatchRegions(EHScopeStack::stable_iterator scope,
cir::TryOp tryOp);
/// The cleanup depth enclosing all the cleanups associated with the
/// parameters.
EHScopeStack::stable_iterator prologueCleanupDepth;
bool isCatchOrCleanupRequired();
void populateCatchHandlersIfRequired(cir::TryOp tryOp);
/// Takes the old cleanup stack size and emits the cleanup blocks
/// that have been added.
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth);
void popCleanupBlock();
/// Push a cleanup to be run at the end of the current full-expression. Safe
/// against the possibility that we're currently inside a
/// conditionally-evaluated expression.
template <class T, class... As>
void pushFullExprCleanup(CleanupKind kind, As... a) {
// If we're not in a conditional branch, or if none of the
// arguments requires saving, then use the unconditional cleanup.
if (!isInConditionalBranch())
return ehStack.pushCleanup<T>(kind, a...);
cgm.errorNYI("pushFullExprCleanup in conditional branch");
}
/// Enters a new scope for capturing cleanups, all of which
/// will be executed once the scope is exited.
class RunCleanupsScope {
EHScopeStack::stable_iterator cleanupStackDepth, oldCleanupStackDepth;
protected:
bool performCleanup;
bool oldDidCallStackSave;
private:
RunCleanupsScope(const RunCleanupsScope &) = delete;
void operator=(const RunCleanupsScope &) = delete;
protected:
CIRGenFunction &cgf;
public:
/// Enter a new cleanup scope.
explicit RunCleanupsScope(CIRGenFunction &cgf)
: performCleanup(true), cgf(cgf) {
cleanupStackDepth = cgf.ehStack.stable_begin();
oldDidCallStackSave = cgf.didCallStackSave;
cgf.didCallStackSave = false;
oldCleanupStackDepth = cgf.currentCleanupStackDepth;
cgf.currentCleanupStackDepth = cleanupStackDepth;
}
/// Exit this cleanup scope, emitting any accumulated cleanups.
~RunCleanupsScope() {
if (performCleanup)
forceCleanup();
}
/// Force the emission of cleanups now, instead of waiting
/// until this object is destroyed.
void forceCleanup() {
assert(performCleanup && "Already forced cleanup");
{
mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
cgf.didCallStackSave = oldDidCallStackSave;
cgf.popCleanupBlocks(cleanupStackDepth);
performCleanup = false;
cgf.currentCleanupStackDepth = oldCleanupStackDepth;
}
}
};
// Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
EHScopeStack::stable_iterator currentCleanupStackDepth = ehStack.stable_end();
public:
/// Represents a scope, including function bodies, compound statements, and
/// the substatements of if/while/do/for/switch/try statements. This class
/// handles any automatic cleanup, along with the return value.
struct LexicalScope : public RunCleanupsScope {
private:
// Block containing cleanup code for things initialized in this
// lexical context (scope).
mlir::Block *cleanupBlock = nullptr;
// Points to the scope entry block. This is useful, for instance, for
// helping to insert allocas before finalizing any recursive CodeGen from
// switches.
mlir::Block *entryBlock;
LexicalScope *parentScope = nullptr;
// Holds the actual value for ScopeKind::Try
cir::TryOp tryOp = nullptr;
// On a coroutine body, the OnFallthrough sub stmt holds the handler
// (CoreturnStmt) for control flow falling off the body. Keep track
// of emitted co_return in this scope and allow OnFallthrough to be
// skipeed.
bool hasCoreturnStmt = false;
// Only Regular is used at the moment. Support for other kinds will be
// added as the relevant statements/expressions are upstreamed.
enum Kind {
Regular, // cir.if, cir.scope, if_regions
Ternary, // cir.ternary
Switch, // cir.switch
Try, // cir.try
GlobalInit // cir.global initialization code
};
Kind scopeKind = Kind::Regular;
// The scope return value.
mlir::Value retVal = nullptr;
mlir::Location beginLoc;
mlir::Location endLoc;
public:
unsigned depth = 0;
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
: RunCleanupsScope(cgf), entryBlock(eb), parentScope(cgf.curLexScope),
beginLoc(loc), endLoc(loc) {
assert(entryBlock && "LexicalScope requires an entry block");
cgf.curLexScope = this;
if (parentScope)
++depth;
if (const auto fusedLoc = mlir::dyn_cast<mlir::FusedLoc>(loc)) {
assert(fusedLoc.getLocations().size() == 2 && "too many locations");
beginLoc = fusedLoc.getLocations()[0];
endLoc = fusedLoc.getLocations()[1];
}
}
void setRetVal(mlir::Value v) { retVal = v; }
void cleanup();
void restore() { cgf.curLexScope = parentScope; }
~LexicalScope() {
assert(!cir::MissingFeatures::generateDebugInfo());
cleanup();
restore();
}
// ---
// Coroutine tracking
// ---
bool hasCoreturn() const { return hasCoreturnStmt; }
void setCoreturn() { hasCoreturnStmt = true; }
// ---
// Kind
// ---
bool isGlobalInit() { return scopeKind == Kind::GlobalInit; }
bool isRegular() { return scopeKind == Kind::Regular; }
bool isSwitch() { return scopeKind == Kind::Switch; }
bool isTernary() { return scopeKind == Kind::Ternary; }
bool isTry() { return scopeKind == Kind::Try; }
cir::TryOp getClosestTryParent();
void setAsGlobalInit() { scopeKind = Kind::GlobalInit; }
void setAsSwitch() { scopeKind = Kind::Switch; }
void setAsTernary() { scopeKind = Kind::Ternary; }
void setAsTry(cir::TryOp op) {
scopeKind = Kind::Try;
tryOp = op;
}
// Lazy create cleanup block or return what's available.
mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) {
if (cleanupBlock)
return cleanupBlock;
cleanupBlock = createCleanupBlock(builder);
return cleanupBlock;
}
cir::TryOp getTry() {
assert(isTry());
return tryOp;
}
mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) {
return cleanupBlock;
}
mlir::Block *createCleanupBlock(mlir::OpBuilder &builder) {
// Create the cleanup block but dont hook it up around just yet.
mlir::OpBuilder::InsertionGuard guard(builder);
mlir::Region *r = builder.getBlock() ? builder.getBlock()->getParent()
: &cgf.curFn->getRegion(0);
cleanupBlock = builder.createBlock(r);
return cleanupBlock;
}
// ---
// Return handling.
// ---
private:
// On switches we need one return block per region, since cases don't
// have their own scopes but are distinct regions nonetheless.
// TODO: This implementation should change once we have support for early
// exits in MLIR structured control flow (llvm-project#161575)
llvm::SmallVector<mlir::Block *> retBlocks;
llvm::DenseMap<mlir::Block *, mlir::Location> retLocs;
llvm::DenseMap<cir::CaseOp, unsigned> retBlockInCaseIndex;
std::optional<unsigned> normalRetBlockIndex;
// There's usually only one ret block per scope, but this needs to be
// get or create because of potential unreachable return statements, note
// that for those, all source location maps to the first one found.
mlir::Block *createRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
assert((isa_and_nonnull<cir::CaseOp>(
cgf.builder.getBlock()->getParentOp()) ||
retBlocks.size() == 0) &&
"only switches can hold more than one ret block");
// Create the return block but don't hook it up just yet.
mlir::OpBuilder::InsertionGuard guard(cgf.builder);
auto *b = cgf.builder.createBlock(cgf.builder.getBlock()->getParent());
retBlocks.push_back(b);
updateRetLoc(b, loc);
return b;
}
cir::ReturnOp emitReturn(mlir::Location loc);
void emitImplicitReturn();
public:
llvm::ArrayRef<mlir::Block *> getRetBlocks() { return retBlocks; }
mlir::Location getRetLoc(mlir::Block *b) { return retLocs.at(b); }
void updateRetLoc(mlir::Block *b, mlir::Location loc) {
retLocs.insert_or_assign(b, loc);
}
mlir::Block *getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
// Check if we're inside a case region
if (auto caseOp = mlir::dyn_cast_if_present<cir::CaseOp>(
cgf.builder.getBlock()->getParentOp())) {
auto iter = retBlockInCaseIndex.find(caseOp);
if (iter != retBlockInCaseIndex.end()) {
// Reuse existing return block
mlir::Block *ret = retBlocks[iter->second];
updateRetLoc(ret, loc);
return ret;
}
// Create new return block
mlir::Block *ret = createRetBlock(cgf, loc);
retBlockInCaseIndex[caseOp] = retBlocks.size() - 1;
return ret;
}
if (normalRetBlockIndex) {
mlir::Block *ret = retBlocks[*normalRetBlockIndex];
updateRetLoc(ret, loc);
return ret;
}
mlir::Block *ret = createRetBlock(cgf, loc);
normalRetBlockIndex = retBlocks.size() - 1;
return ret;
}
mlir::Block *getEntryBlock() { return entryBlock; }
};
LexicalScope *curLexScope = nullptr;
typedef void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty);
static Destroyer destroyCXXObject;
void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
QualType type);
void pushDestroy(CleanupKind kind, Address addr, QualType type,
Destroyer *destroyer);
Destroyer *getDestroyer(clang::QualType::DestructionKind kind);
/// ----------------------
/// CIR emit functions
/// ----------------------
public:
bool getAArch64SVEProcessedOperands(unsigned builtinID, const CallExpr *expr,
SmallVectorImpl<mlir::Value> &ops,
clang::SVETypeFlags typeFlags);
std::optional<mlir::Value>
emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr,
ReturnValueSlot returnValue,
llvm::Triple::ArchType arch);
std::optional<mlir::Value> emitAArch64SMEBuiltinExpr(unsigned builtinID,
const CallExpr *expr);
std::optional<mlir::Value> emitAArch64SVEBuiltinExpr(unsigned builtinID,
const CallExpr *expr);
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty,
SourceLocation loc,
SourceLocation assumptionLoc,
int64_t alignment,
mlir::Value offsetValue = nullptr);
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, const Expr *expr,
SourceLocation assumptionLoc,
int64_t alignment,
mlir::Value offsetValue = nullptr);
private:
void emitAndUpdateRetAlloca(clang::QualType type, mlir::Location loc,
clang::CharUnits alignment);
CIRGenCallee emitDirectCallee(const GlobalDecl &gd);
public:
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field,
llvm::StringRef fieldName,
unsigned fieldIndex);
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
mlir::Location loc, clang::CharUnits alignment,
bool insertIntoFnEntryBlock,
mlir::Value arraySize = nullptr);
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
mlir::Location loc, clang::CharUnits alignment,
mlir::OpBuilder::InsertPoint ip,
mlir::Value arraySize = nullptr);
void emitAggregateStore(mlir::Value value, Address dest);
void emitAggExpr(const clang::Expr *e, AggValueSlot slot);
enum ExprValueKind { EVK_RValue, EVK_NonRValue };
LValue emitAggExprToLValue(const Expr *e);
/// Emit an aggregate copy.
///
/// \param isVolatile \c true iff either the source or the destination is
/// volatile.
/// \param MayOverlap Whether the tail padding of the destination might be
/// occupied by some other object. More efficient code can often be
/// generated if not.
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy,
AggValueSlot::Overlap_t mayOverlap,
bool isVolatile = false);
/// Emit code to compute the specified expression which can have any type. The
/// result is returned as an RValue struct. If this is an aggregate
/// expression, the aggloc/agglocvolatile arguments indicate where the result
/// should be returned.
RValue emitAnyExpr(const clang::Expr *e,
AggValueSlot aggSlot = AggValueSlot::ignored(),
bool ignoreResult = false);
/// Emits the code necessary to evaluate an arbitrary expression into the
/// given memory location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals,
bool isInitializer);
/// Similarly to emitAnyExpr(), however, the result will always be accessible
/// even if no aggregate location is provided.
RValue emitAnyExprToTemp(const clang::Expr *e);
void emitAnyExprToExn(const Expr *e, Address addr);
void emitArrayDestroy(mlir::Value begin, mlir::Value numElements,
QualType elementType, CharUnits elementAlign,
Destroyer *destroyer);
mlir::Value emitArrayLength(const clang::ArrayType *arrayType,
QualType &baseType, Address &addr);
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e);
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e);
Address emitArrayToPointerDecay(const Expr *e,
LValueBaseInfo *baseInfo = nullptr);
mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s);
RValue emitAtomicExpr(AtomicExpr *e);
void emitAtomicInit(Expr *init, LValue dest);
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit);
void emitAtomicStore(RValue rvalue, LValue dest, cir::MemOrder order,
bool isVolatile, bool isInit);
void emitAtomicExprWithMemOrder(
const Expr *memOrder, bool isStore, bool isLoad, bool isFence,
llvm::function_ref<void(cir::MemOrder)> emitAtomicOp);
AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d,
mlir::OpBuilder::InsertPoint ip = {});
/// Emit code and set up symbol table for a variable declaration with auto,
/// register, or no storage class specifier. These turn into simple stack
/// objects, globals depending on target.
void emitAutoVarDecl(const clang::VarDecl &d);
void emitAutoVarCleanups(const AutoVarEmission &emission);
/// Emit the initializer for an allocated variable. If this call is not
/// associated with the call to emitAutoVarAlloca (as the address of the
/// emission is not directly an alloca), the allocatedSeparately parameter can
/// be used to suppress the assertions. However, this should only be used in
/// extreme cases, as it doesn't properly reflect the language/AST.
void emitAutoVarInit(const AutoVarEmission &emission);
void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
clang::QualType::DestructionKind dtorKind);
void maybeEmitDeferredVarDeclInit(const VarDecl *vd);
void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl,
CXXCtorInitializer *baseInit);
LValue emitBinaryOperatorLValue(const BinaryOperator *e);
cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest);
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s);
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID,
const clang::CallExpr *e, ReturnValueSlot returnValue);
/// Returns a Value corresponding to the size of the given expression by
/// emitting a `cir.objsize` operation.
///
/// \param e The expression whose object size to compute
/// \param type Determines the semantics of the object size computation.
/// The type parameter is a 2-bit value where:
/// bit 0 (type & 1): 0 = whole object, 1 = closest subobject
/// bit 1 (type & 2): 0 = maximum size, 2 = minimum size
/// \param resType The result type for the size value
/// \param emittedE Optional pre-emitted pointer value. If non-null, we'll
/// call `cir.objsize` on this value rather than emitting e.
/// \param isDynamic If true, allows runtime evaluation via dynamic mode
mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type,
cir::IntType resType, mlir::Value emittedE,
bool isDynamic);
mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e,
unsigned type,
cir::IntType resType,
mlir::Value emittedE,
bool isDynamic);
int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts);
void instantiateIndirectGotoBlock();
RValue emitCall(const CIRGenFunctionInfo &funcInfo,
const CIRGenCallee &callee, ReturnValueSlot returnValue,
const CallArgList &args, cir::CIRCallOpInterface *callOp,
mlir::Location loc);
RValue emitCall(const CIRGenFunctionInfo &funcInfo,
const CIRGenCallee &callee, ReturnValueSlot returnValue,
const CallArgList &args,
cir::CIRCallOpInterface *callOrTryCall = nullptr) {
assert(currSrcLoc && "source location must have been set");
return emitCall(funcInfo, callee, returnValue, args, callOrTryCall,
*currSrcLoc);
}
RValue emitCall(clang::QualType calleeTy, const CIRGenCallee &callee,
const clang::CallExpr *e, ReturnValueSlot returnValue);
void emitCallArg(CallArgList &args, const clang::Expr *e,
clang::QualType argType);
void emitCallArgs(
CallArgList &args, PrototypeWrapper prototype,
llvm::iterator_range<clang::CallExpr::const_arg_iterator> argRange,
AbstractCallee callee = AbstractCallee(), unsigned paramsToSkip = 0);
RValue emitCallExpr(const clang::CallExpr *e,
ReturnValueSlot returnValue = ReturnValueSlot());
LValue emitCallExprLValue(const clang::CallExpr *e);
CIRGenCallee emitCallee(const clang::Expr *e);
template <typename T>
mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType,
mlir::ArrayAttr value,
cir::CaseOpKind kind,
bool buildingTopLevelCase);
mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s,
mlir::Type condType,
bool buildingTopLevelCase);
LValue emitCastLValue(const CastExpr *e);
/// Emits an argument for a call to a `__builtin_assume`. If the builtin
/// sanitizer is enabled, a runtime check is also emitted.
mlir::Value emitCheckedArgForAssume(const Expr *e);
/// Emit a conversion from the specified complex type to the specified
/// destination type, where the destination type is an LLVM scalar type.
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy,
QualType dstTy, SourceLocation loc);
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e);
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e);
void emitConstructorBody(FunctionArgList &args);
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s);
cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc);
cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc,
mlir::Value coroframeAddr);
RValue emitCoroutineFrame();
void emitDestroy(Address addr, QualType type, Destroyer *destroyer);
void emitDestructorBody(FunctionArgList &args);
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s);
mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s);
void emitCXXConstructExpr(const clang::CXXConstructExpr *e,
AggValueSlot dest);
void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
const clang::ArrayType *arrayType,
Address arrayBegin, const CXXConstructExpr *e,
bool newPointerIsChecked,
bool zeroInitialize = false);
void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
mlir::Value numElements, Address arrayBase,
const CXXConstructExpr *e,
bool newPointerIsChecked,
bool zeroInitialize);
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d,
clang::CXXCtorType type, bool forVirtualBase,
bool delegating, AggValueSlot thisAVS,
const clang::CXXConstructExpr *e);
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d,
clang::CXXCtorType type, bool forVirtualBase,
bool delegating, Address thisAddr,
CallArgList &args, clang::SourceLocation loc);
void emitCXXDeleteExpr(const CXXDeleteExpr *e);
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type,
bool forVirtualBase, bool delegating,
Address thisAddr, QualType thisTy);
RValue emitCXXDestructorCall(GlobalDecl dtor, const CIRGenCallee &callee,
mlir::Value thisVal, QualType thisTy,
mlir::Value implicitParam,
QualType implicitParamTy, const CallExpr *e);
mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s,
llvm::ArrayRef<const Attr *> attrs);
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e,
ReturnValueSlot returnValue);
Address emitCXXMemberDataPointerAddress(
const Expr *e, Address base, mlir::Value memberPtr,
const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo);
RValue emitCXXMemberOrOperatorCall(
const clang::CXXMethodDecl *md, const CIRGenCallee &callee,
ReturnValueSlot returnValue, mlir::Value thisPtr,
mlir::Value implicitParam, clang::QualType implicitParamTy,
const clang::CallExpr *ce, CallArgList *rtlArgs);
RValue emitCXXMemberOrOperatorMemberCallExpr(
const clang::CallExpr *ce, const clang::CXXMethodDecl *md,
ReturnValueSlot returnValue, bool hasQualifier,
clang::NestedNameSpecifier qualifier, bool isArrow,
const clang::Expr *base);
mlir::Value emitCXXNewExpr(const CXXNewExpr *e);
void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType,
mlir::Type elementTy, Address beginPtr,
mlir::Value numElements,
mlir::Value allocSizeWithoutCookie);
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e,
const CXXMethodDecl *md,
ReturnValueSlot returnValue);
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr);
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type,
const CallExpr *callExpr,
OverloadedOperatorKind op);
void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType,
Address ptr);
void emitCXXThrowExpr(const CXXThrowExpr *e);
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s);
mlir::LogicalResult emitCXXTryStmtUnderScope(const clang::CXXTryStmt &s);
void enterCXXTryStmt(const CXXTryStmt &s, cir::TryOp tryOp,
bool isFnTryBlock = false);
void exitCXXTryStmt(const CXXTryStmt &s, bool isFnTryBlock = false);
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor,
clang::CXXCtorType ctorType, FunctionArgList &args);
// It's important not to confuse this and emitDelegateCXXConstructorCall.
// Delegating constructors are the C++11 feature. The constructor delegate
// optimization is used to reduce duplication in the base and complete
// constructors where they are substantially the same.
void emitDelegatingCXXConstructorCall(const CXXConstructorDecl *ctor,
const FunctionArgList &args);
void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr,
QualType deleteTy);
mlir::LogicalResult emitDoStmt(const clang::DoStmt &s);
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce);
/// Emit an expression as an initializer for an object (variable, field, etc.)
/// at the given location. The expression is not necessarily the normal
/// initializer for the object, and the address is not necessarily
/// its normal location.
///
/// \param init the initializing expression
/// \param d the object to act as if we're initializing
/// \param lvalue the lvalue to initialize
/// \param capturedByInit true if \p d is a __block variable whose address is
/// potentially changed by the initializer
void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d,
LValue lvalue, bool capturedByInit = false);
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body);
mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s);
mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s);
void emitImplicitAssignmentOperatorBody(FunctionArgList &args);
void emitInitializerForField(clang::FieldDecl *field, LValue lhs,
clang::Expr *init);
LValue emitPredefinedLValue(const PredefinedExpr *e);
mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType);
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType);
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType);
void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty);
mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee,
llvm::ArrayRef<mlir::Value> args = {});
void emitInvariantStart(CharUnits size, mlir::Value addr, mlir::Location loc);
/// Emit the computation of the specified expression of scalar type.
mlir::Value emitScalarExpr(const clang::Expr *e,
bool ignoreResultAssign = false);
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
cir::UnaryOpKind kind, bool isPre);
/// Build a debug stoppoint if we are emitting debug info.
void emitStopPoint(const Stmt *s);
// Build CIR for a statement. useCurrentScope should be true if no
// new scopes need be created when finding a compound statement.
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope,
llvm::ArrayRef<const Attr *> attrs = {});
mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s,
bool useCurrentScope);
mlir::LogicalResult emitForStmt(const clang::ForStmt &s);
void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator,
CallArgList &callArgs);
RValue emitCoawaitExpr(const CoawaitExpr &e,
AggValueSlot aggSlot = AggValueSlot::ignored(),
bool ignoreResult = false);
/// Emit the computation of the specified expression of complex type,
/// returning the result.
mlir::Value emitComplexExpr(const Expr *e);
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit);
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv,
cir::UnaryOpKind op, bool isPre);
LValue emitComplexAssignmentLValue(const BinaryOperator *e);
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e);
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e,
mlir::Value &result);
mlir::LogicalResult
emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue = nullptr,
AggValueSlot slot = AggValueSlot::ignored());
mlir::LogicalResult
emitCompoundStmtWithoutScope(const clang::CompoundStmt &s,
Address *lastValue = nullptr,
AggValueSlot slot = AggValueSlot::ignored());
void emitDecl(const clang::Decl &d, bool evaluateConditionDecl = false);
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s);
LValue emitDeclRefLValue(const clang::DeclRefExpr *e);
mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s,
mlir::Type condType,
bool buildingTopLevelCase);
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor,
clang::CXXCtorType ctorType,
const FunctionArgList &args,
clang::SourceLocation loc);
/// We are performing a delegate call; that is, the current function is
/// delegating to another one. Produce a r-value suitable for passing the
/// given parameter.
void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param,
clang::SourceLocation loc);
/// Emit an `if` on a boolean condition to the specified blocks.
/// FIXME: Based on the condition, this might try to simplify the codegen of
/// the conditional based on the branch.
/// In the future, we may apply code generation simplifications here,
/// similar to those used in classic LLVM codegen
/// See `EmitBranchOnBoolExpr` for inspiration.
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond,
const clang::Stmt *thenS,
const clang::Stmt *elseS);
cir::IfOp emitIfOnBoolExpr(const clang::Expr *cond,
BuilderCallbackRef thenBuilder,
mlir::Location thenLoc,
BuilderCallbackRef elseBuilder,
std::optional<mlir::Location> elseLoc = {});
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond);
LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e);
mlir::LogicalResult emitLabel(const clang::LabelDecl &d);
mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s);
void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md);
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md);
void populateCatchHandlers(cir::TryOp tryOp);
mlir::LogicalResult emitIfStmt(const clang::IfStmt &s);
/// Emit code to compute the specified expression,
/// ignoring the result.
void emitIgnoredExpr(const clang::Expr *e);
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc);
/// Load a complex number from the specified l-value.
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc);
RValue emitLoadOfExtVectorElementLValue(LValue lv);
/// Given an expression that represents a value lvalue, this method emits
/// the address of the lvalue, then loads the result as an rvalue,
/// returning the rvalue.
RValue emitLoadOfLValue(LValue lv, SourceLocation loc);
Address emitLoadOfReference(LValue refLVal, mlir::Location loc,
LValueBaseInfo *pointeeBaseInfo);
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc,
QualType refTy, AlignmentSource source);
/// EmitLoadOfScalar - Load a scalar value from an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation. The l-value must be a simple
/// l-value.
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc);
mlir::Value emitLoadOfScalar(Address addr, bool isVolatile, QualType ty,
SourceLocation loc, LValueBaseInfo baseInfo);
/// Emit code to compute a designator that specifies the location
/// of the expression.
/// FIXME: document this function better.
LValue emitLValue(const clang::Expr *e);
LValue emitLValueForBitField(LValue base, const FieldDecl *field);
LValue emitLValueForField(LValue base, const clang::FieldDecl *field);
LValue emitLValueForLambdaField(const FieldDecl *field);
LValue emitLValueForLambdaField(const FieldDecl *field,
mlir::Value thisValue);
/// Like emitLValueForField, excpet that if the Field is a reference, this
/// will return the address of the reference and not the address of the value
/// stored in the reference.
LValue emitLValueForFieldInitialization(LValue base,
const clang::FieldDecl *field,
llvm::StringRef fieldName);
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e);
LValue emitMemberExpr(const MemberExpr *e);
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e);
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr);
/// Given an expression with a pointer type, emit the value and compute our
/// best estimate of the alignment of the pointee.
///
/// One reasonable way to use this information is when there's a language
/// guarantee that the pointer must be aligned to some stricter value, and
/// we're simply trying to ensure that sufficiently obvious uses of under-
/// aligned objects don't get miscompiled; for example, a placement new
/// into the address of a local variable. In such a case, it's quite
/// reasonable to just ignore the returned alignment when it isn't from an
/// explicit source.
Address emitPointerWithAlignment(const clang::Expr *expr,
LValueBaseInfo *baseInfo = nullptr);
/// Emits a reference binding to the passed in expression.
RValue emitReferenceBindingToExpr(const Expr *e);
mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s);
RValue emitRotate(const CallExpr *e, bool isRotateLeft);
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e);
/// Emit a conversion from the specified type to the specified destination
/// type, both of which are CIR scalar types.
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType,
clang::QualType dstType,
clang::SourceLocation loc);
void emitScalarInit(const clang::Expr *init, mlir::Location loc,
LValue lvalue, bool capturedByInit = false);
mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx,
const Expr *argExpr);
void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage);
void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest,
bool isInit);
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile,
clang::QualType ty, LValueBaseInfo baseInfo,
bool isInit = false, bool isNontemporal = false);
void emitStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit);
/// Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit = false);
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult);
LValue emitStringLiteralLValue(const StringLiteral *e,
llvm::StringRef name = ".str");
mlir::LogicalResult emitSwitchBody(const clang::Stmt *s);
mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s,
bool buildingTopLevelCase);
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s);
std::optional<mlir::Value>
emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e,
ReturnValueSlot &returnValue);
/// Given a value and its clang type, returns the value casted to its memory
/// representation.
/// Note: CIR defers most of the special casting to the final lowering passes
/// to conserve the high level information.
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty);
/// Emit a trap instruction, which is used to abort the program in an abnormal
/// way, usually for debugging purposes.
/// \p createNewBlock indicates whether to create a new block for the IR
/// builder. Since the `cir.trap` operation is a terminator, operations that
/// follow a trap cannot be emitted after `cir.trap` in the same block. To
/// ensure these operations get emitted successfully, you need to create a new
/// dummy block and set the insertion point there before continuing from the
/// trap operation.
void emitTrap(mlir::Location loc, bool createNewBlock);
LValue emitUnaryOpLValue(const clang::UnaryOperator *e);
mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType);
/// Emit a reached-unreachable diagnostic if \p loc is valid and runtime
/// checking is enabled. Otherwise, just emit an unreachable instruction.
/// \p createNewBlock indicates whether to create a new block for the IR
/// builder. Since the `cir.unreachable` operation is a terminator, operations
/// that follow an unreachable point cannot be emitted after `cir.unreachable`
/// in the same block. To ensure these operations get emitted successfully,
/// you need to create a dummy block and set the insertion point there before
/// continuing from the unreachable point.
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock);
/// This method handles emission of any variable declaration
/// inside a function, including static vars etc.
void emitVarDecl(const clang::VarDecl &d);
void emitVariablyModifiedType(QualType ty);
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s);
std::optional<mlir::Value> emitX86BuiltinExpr(unsigned builtinID,
const CallExpr *expr);
/// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is
/// nonnull, if 1\p LHS is marked _Nonnull.
void emitNullabilityCheck(LValue lhs, mlir::Value rhs,
clang::SourceLocation loc);
/// An object to manage conditionally-evaluated expressions.
class ConditionalEvaluation {
CIRGenFunction &cgf;
mlir::OpBuilder::InsertPoint insertPt;
public:
ConditionalEvaluation(CIRGenFunction &cgf)
: cgf(cgf), insertPt(cgf.builder.saveInsertionPoint()) {}
ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
: cgf(cgf), insertPt(ip) {}
void beginEvaluation() {
assert(cgf.outermostConditional != this);
if (!cgf.outermostConditional)
cgf.outermostConditional = this;
}
void endEvaluation() {
assert(cgf.outermostConditional != nullptr);
if (cgf.outermostConditional == this)
cgf.outermostConditional = nullptr;
}
/// Returns the insertion point which will be executed prior to each
/// evaluation of the conditional code. In LLVM OG, this method
/// is called getStartingBlock.
mlir::OpBuilder::InsertPoint getInsertPoint() const { return insertPt; }
};
struct ConditionalInfo {
std::optional<LValue> lhs{}, rhs{};
mlir::Value result{};
};
// Return true if we're currently emitting one branch or the other of a
// conditional expression.
bool isInConditionalBranch() const { return outermostConditional != nullptr; }
void setBeforeOutermostConditional(mlir::Value value, Address addr) {
assert(isInConditionalBranch());
{
mlir::OpBuilder::InsertionGuard guard(builder);
builder.restoreInsertionPoint(outermostConditional->getInsertPoint());
builder.createStore(
value.getLoc(), value, addr, /*isVolatile=*/false,
mlir::IntegerAttr::get(
mlir::IntegerType::get(value.getContext(), 64),
(uint64_t)addr.getAlignment().getAsAlign().value()));
}
}
// Points to the outermost active conditional control. This is used so that
// we know if a temporary should be destroyed conditionally.
ConditionalEvaluation *outermostConditional = nullptr;
/// An RAII object to record that we're evaluating a statement
/// expression.
class StmtExprEvaluation {
CIRGenFunction &cgf;
/// We have to save the outermost conditional: cleanups in a
/// statement expression aren't conditional just because the
/// StmtExpr is.
ConditionalEvaluation *savedOutermostConditional;
public:
StmtExprEvaluation(CIRGenFunction &cgf)
: cgf(cgf), savedOutermostConditional(cgf.outermostConditional) {
cgf.outermostConditional = nullptr;
}
~StmtExprEvaluation() {
cgf.outermostConditional = savedOutermostConditional;
}
};
template <typename FuncTy>
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e,
const FuncTy &branchGenFunc);
mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc,
const clang::Stmt *thenS,
const clang::Stmt *elseS);
/// Build a "reference" to a va_list; this is either the address or the value
/// of the expression, depending on how va_list is defined.
Address emitVAListRef(const Expr *e);
/// Emits the start of a CIR variable-argument operation (`cir.va_start`)
///
/// \param vaList A reference to the \c va_list as emitted by either
/// \c emitVAListRef or \c emitMSVAListRef.
///
/// \param count The number of arguments in \c vaList
void emitVAStart(mlir::Value vaList, mlir::Value count);
/// Emits the end of a CIR variable-argument operation (`cir.va_start`)
///
/// \param vaList A reference to the \c va_list as emitted by either
/// \c emitVAListRef or \c emitMSVAListRef.
void emitVAEnd(mlir::Value vaList);
/// Generate code to get an argument from the passed in pointer
/// and update it accordingly.
///
/// \param ve The \c VAArgExpr for which to generate code.
///
/// \param vaListAddr Receives a reference to the \c va_list as emitted by
/// either \c emitVAListRef or \c emitMSVAListRef.
///
/// \returns SSA value with the argument.
mlir::Value emitVAArg(VAArgExpr *ve);
/// ----------------------
/// CIR build helpers
/// -----------------
public:
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
const Twine &name = "tmp",
mlir::Value arraySize = nullptr,
bool insertIntoFnEntryBlock = false);
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
const Twine &name = "tmp",
mlir::OpBuilder::InsertPoint ip = {},
mlir::Value arraySize = nullptr);
Address createTempAlloca(mlir::Type ty, CharUnits align, mlir::Location loc,
const Twine &name = "tmp",
mlir::Value arraySize = nullptr,
Address *alloca = nullptr,
mlir::OpBuilder::InsertPoint ip = {});
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align,
mlir::Location loc,
const Twine &name = "tmp",
mlir::Value arraySize = nullptr,
mlir::OpBuilder::InsertPoint ip = {});
/// Create a temporary memory object of the given type, with
/// appropriate alignmen and cast it to the default address space. Returns
/// the original alloca instruction by \p Alloca if it is not nullptr.
Address createMemTemp(QualType t, mlir::Location loc,
const Twine &name = "tmp", Address *alloca = nullptr,
mlir::OpBuilder::InsertPoint ip = {});
Address createMemTemp(QualType t, CharUnits align, mlir::Location loc,
const Twine &name = "tmp", Address *alloca = nullptr,
mlir::OpBuilder::InsertPoint ip = {});
//===--------------------------------------------------------------------===//
// OpenMP Emission
//===--------------------------------------------------------------------===//
public:
mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s);
mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s);
mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s);
mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s);
mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s);
mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s);
mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s);
mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s);
mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s);
mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s);
mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s);
mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s);
mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s);
mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s);
mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s);
mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s);
mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s);
mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s);
mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s);
mlir::LogicalResult
emitOMPParallelForDirective(const OMPParallelForDirective &s);
mlir::LogicalResult
emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s);
mlir::LogicalResult
emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s);
mlir::LogicalResult
emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s);
mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s);
mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s);
mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s);
mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s);
mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s);
mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s);
mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s);
mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s);
mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s);
mlir::LogicalResult
emitOMPCancellationPointDirective(const OMPCancellationPointDirective &s);
mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s);
mlir::LogicalResult
emitOMPTargetDataDirective(const OMPTargetDataDirective &s);
mlir::LogicalResult
emitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &s);
mlir::LogicalResult
emitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &s);
mlir::LogicalResult
emitOMPTargetParallelDirective(const OMPTargetParallelDirective &s);
mlir::LogicalResult
emitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &s);
mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s);
mlir::LogicalResult
emitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &s);
mlir::LogicalResult
emitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &s);
mlir::LogicalResult
emitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &s);
mlir::LogicalResult
emitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &s);
mlir::LogicalResult
emitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &s);
mlir::LogicalResult
emitOMPParallelGenericLoopDirective(const OMPParallelGenericLoopDirective &s);
mlir::LogicalResult
emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s);
mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(
const OMPParallelMaskedTaskLoopDirective &s);
mlir::LogicalResult emitOMPParallelMaskedTaskLoopSimdDirective(
const OMPParallelMaskedTaskLoopSimdDirective &s);
mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(
const OMPParallelMasterTaskLoopDirective &s);
mlir::LogicalResult emitOMPParallelMasterTaskLoopSimdDirective(
const OMPParallelMasterTaskLoopSimdDirective &s);
mlir::LogicalResult
emitOMPDistributeDirective(const OMPDistributeDirective &s);
mlir::LogicalResult emitOMPDistributeParallelForDirective(
const OMPDistributeParallelForDirective &s);
mlir::LogicalResult emitOMPDistributeParallelForSimdDirective(
const OMPDistributeParallelForSimdDirective &s);
mlir::LogicalResult
emitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &s);
mlir::LogicalResult emitOMPTargetParallelGenericLoopDirective(
const OMPTargetParallelGenericLoopDirective &s);
mlir::LogicalResult emitOMPTargetParallelForSimdDirective(
const OMPTargetParallelForSimdDirective &s);
mlir::LogicalResult
emitOMPTargetSimdDirective(const OMPTargetSimdDirective &s);
mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(
const OMPTargetTeamsGenericLoopDirective &s);
mlir::LogicalResult
emitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &s);
mlir::LogicalResult
emitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &s);
mlir::LogicalResult
emitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &s);
mlir::LogicalResult emitOMPTeamsDistributeParallelForSimdDirective(
const OMPTeamsDistributeParallelForSimdDirective &s);
mlir::LogicalResult emitOMPTeamsDistributeParallelForDirective(
const OMPTeamsDistributeParallelForDirective &s);
mlir::LogicalResult
emitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &s);
mlir::LogicalResult
emitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &s);
mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(
const OMPTargetTeamsDistributeDirective &s);
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForDirective(
const OMPTargetTeamsDistributeParallelForDirective &s);
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForSimdDirective(
const OMPTargetTeamsDistributeParallelForSimdDirective &s);
mlir::LogicalResult emitOMPTargetTeamsDistributeSimdDirective(
const OMPTargetTeamsDistributeSimdDirective &s);
mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s);
mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s);
mlir::LogicalResult
emitOMPGenericLoopDirective(const OMPGenericLoopDirective &s);
mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s);
mlir::LogicalResult
emitOMPInterchangeDirective(const OMPInterchangeDirective &s);
mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s);
mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s);
mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s);
void emitOMPThreadPrivateDecl(const OMPThreadPrivateDecl &d);
void emitOMPGroupPrivateDecl(const OMPGroupPrivateDecl &d);
void emitOMPCapturedExpr(const OMPCapturedExprDecl &d);
void emitOMPAllocateDecl(const OMPAllocateDecl &d);
void emitOMPDeclareReduction(const OMPDeclareReductionDecl &d);
void emitOMPDeclareMapper(const OMPDeclareMapperDecl &d);
void emitOMPRequiresDecl(const OMPRequiresDecl &d);
private:
template <typename Op>
void emitOpenMPClauses(Op &op, ArrayRef<const OMPClause *> clauses);
//===--------------------------------------------------------------------===//
// OpenACC Emission
//===--------------------------------------------------------------------===//
private:
template <typename Op>
Op emitOpenACCOp(mlir::Location start, OpenACCDirectiveKind dirKind,
llvm::ArrayRef<const OpenACCClause *> clauses);
// Function to do the basic implementation of an operation with an Associated
// Statement. Models AssociatedStmtConstruct.
template <typename Op, typename TermOp>
mlir::LogicalResult
emitOpenACCOpAssociatedStmt(mlir::Location start, mlir::Location end,
OpenACCDirectiveKind dirKind,
llvm::ArrayRef<const OpenACCClause *> clauses,
const Stmt *associatedStmt);
template <typename Op, typename TermOp>
mlir::LogicalResult emitOpenACCOpCombinedConstruct(
mlir::Location start, mlir::Location end, OpenACCDirectiveKind dirKind,
llvm::ArrayRef<const OpenACCClause *> clauses, const Stmt *loopStmt);
template <typename Op>
void emitOpenACCClauses(Op &op, OpenACCDirectiveKind dirKind,
ArrayRef<const OpenACCClause *> clauses);
// The second template argument doesn't need to be a template, since it should
// always be an mlir::acc::LoopOp, but as this is a template anyway, we make
// it a template argument as this way we can avoid including the OpenACC MLIR
// headers here. We will count on linker failures/explicit instantiation to
// ensure we don't mess this up, but it is only called from 1 place, and
// instantiated 3x.
template <typename ComputeOp, typename LoopOp>
void emitOpenACCClauses(ComputeOp &op, LoopOp &loopOp,
OpenACCDirectiveKind dirKind,
ArrayRef<const OpenACCClause *> clauses);
// The OpenACC LoopOp requires that we have auto, seq, or independent on all
// LoopOp operations for the 'none' device type case. This function checks if
// the LoopOp has one, else it updates it to have one.
void updateLoopOpParallelism(mlir::acc::LoopOp &op, bool isOrphan,
OpenACCDirectiveKind dk);
// The OpenACC 'cache' construct actually applies to the 'loop' if present. So
// keep track of the 'loop' so that we can add the cache vars to it correctly.
mlir::acc::LoopOp *activeLoopOp = nullptr;
struct ActiveOpenACCLoopRAII {
CIRGenFunction &cgf;
mlir::acc::LoopOp *oldLoopOp;
ActiveOpenACCLoopRAII(CIRGenFunction &cgf, mlir::acc::LoopOp *newOp)
: cgf(cgf), oldLoopOp(cgf.activeLoopOp) {
cgf.activeLoopOp = newOp;
}
~ActiveOpenACCLoopRAII() { cgf.activeLoopOp = oldLoopOp; }
};
// Keep track of the last place we inserted a 'recipe' so that we can insert
// the next one in lexical order.
mlir::OpBuilder::InsertPoint lastRecipeLocation;
public:
// Helper type used to store the list of important information for a 'data'
// clause variable, or a 'cache' variable reference.
struct OpenACCDataOperandInfo {
mlir::Location beginLoc;
mlir::Value varValue;
std::string name;
// The type of the original variable reference: that is, after 'bounds' have
// removed pointers/array types/etc. So in the case of int arr[5], and a
// private(arr[1]), 'origType' is 'int', but 'baseType' is 'int[5]'.
QualType origType;
QualType baseType;
llvm::SmallVector<mlir::Value> bounds;
// The list of types that we found when going through the bounds, which we
// can use to properly set the alloca section.
llvm::SmallVector<QualType> boundTypes;
};
// Gets the collection of info required to lower and OpenACC clause or cache
// construct variable reference.
OpenACCDataOperandInfo getOpenACCDataOperandInfo(const Expr *e);
// Helper function to emit the integer expressions as required by an OpenACC
// clause/construct.
mlir::Value emitOpenACCIntExpr(const Expr *intExpr);
// Helper function to emit an integer constant as an mlir int type, used for
// constants in OpenACC constructs/clauses.
mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width,
int64_t value);
mlir::LogicalResult
emitOpenACCComputeConstruct(const OpenACCComputeConstruct &s);
mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s);
mlir::LogicalResult
emitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &s);
mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s);
mlir::LogicalResult
emitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &s);
mlir::LogicalResult
emitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &s);
mlir::LogicalResult
emitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &s);
mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s);
mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s);
mlir::LogicalResult
emitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &s);
mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s);
mlir::LogicalResult
emitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &s);
mlir::LogicalResult
emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s);
mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s);
void emitOpenACCDeclare(const OpenACCDeclareDecl &d);
void emitOpenACCRoutine(const OpenACCRoutineDecl &d);
/// Create a temporary memory object for the given aggregate type.
AggValueSlot createAggTemp(QualType ty, mlir::Location loc,
const Twine &name = "tmp",
Address *alloca = nullptr) {
assert(!cir::MissingFeatures::aggValueSlot());
return AggValueSlot::forAddr(
createMemTemp(ty, loc, name, alloca), ty.getQualifiers(),
AggValueSlot::IsNotDestructed, AggValueSlot::IsNotAliased,
AggValueSlot::DoesNotOverlap);
}
private:
QualType getVarArgType(const Expr *arg);
};
} // namespace clang::CIRGen
#endif