blob: 24b58e8fd12b0318f8ada6945b62bf7e04224118 [file] [log] [blame]
//===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// \brief This file implements semantic analysis for OpenMP directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#include "TreeTransform.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/PointerEmbeddedInt.h"
using namespace clang;
//===----------------------------------------------------------------------===//
// Stack of data-sharing attributes for variables
//===----------------------------------------------------------------------===//
static Expr *CheckMapClauseExpressionBase(
Sema &SemaRef, Expr *E,
OMPClauseMappableExprCommon::MappableExprComponentList &CurComponents,
OpenMPClauseKind CKind, bool NoDiagnose);
namespace {
/// \brief Default data sharing attributes, which can be applied to directive.
enum DefaultDataSharingAttributes {
DSA_unspecified = 0, /// \brief Data sharing attribute not specified.
DSA_none = 1 << 0, /// \brief Default data sharing attribute 'none'.
DSA_shared = 1 << 1, /// \brief Default data sharing attribute 'shared'.
};
/// Attributes of the defaultmap clause.
enum DefaultMapAttributes {
DMA_unspecified, /// Default mapping is not specified.
DMA_tofrom_scalar, /// Default mapping is 'tofrom:scalar'.
};
/// \brief Stack for tracking declarations used in OpenMP directives and
/// clauses and their data-sharing attributes.
class DSAStackTy final {
public:
struct DSAVarData final {
OpenMPDirectiveKind DKind = OMPD_unknown;
OpenMPClauseKind CKind = OMPC_unknown;
Expr *RefExpr = nullptr;
DeclRefExpr *PrivateCopy = nullptr;
SourceLocation ImplicitDSALoc;
DSAVarData() = default;
DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, Expr *RefExpr,
DeclRefExpr *PrivateCopy, SourceLocation ImplicitDSALoc)
: DKind(DKind), CKind(CKind), RefExpr(RefExpr),
PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc) {}
};
typedef llvm::SmallVector<std::pair<Expr *, OverloadedOperatorKind>, 4>
OperatorOffsetTy;
private:
struct DSAInfo final {
OpenMPClauseKind Attributes = OMPC_unknown;
/// Pointer to a reference expression and a flag which shows that the
/// variable is marked as lastprivate(true) or not (false).
llvm::PointerIntPair<Expr *, 1, bool> RefExpr;
DeclRefExpr *PrivateCopy = nullptr;
};
typedef llvm::DenseMap<ValueDecl *, DSAInfo> DeclSAMapTy;
typedef llvm::DenseMap<ValueDecl *, Expr *> AlignedMapTy;
typedef std::pair<unsigned, VarDecl *> LCDeclInfo;
typedef llvm::DenseMap<ValueDecl *, LCDeclInfo> LoopControlVariablesMapTy;
/// Struct that associates a component with the clause kind where they are
/// found.
struct MappedExprComponentTy {
OMPClauseMappableExprCommon::MappableExprComponentLists Components;
OpenMPClauseKind Kind = OMPC_unknown;
};
typedef llvm::DenseMap<ValueDecl *, MappedExprComponentTy>
MappedExprComponentsTy;
typedef llvm::StringMap<std::pair<OMPCriticalDirective *, llvm::APSInt>>
CriticalsWithHintsTy;
typedef llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>
DoacrossDependMapTy;
struct ReductionData {
typedef llvm::PointerEmbeddedInt<BinaryOperatorKind, 16> BOKPtrType;
SourceRange ReductionRange;
llvm::PointerUnion<const Expr *, BOKPtrType> ReductionOp;
ReductionData() = default;
void set(BinaryOperatorKind BO, SourceRange RR) {
ReductionRange = RR;
ReductionOp = BO;
}
void set(const Expr *RefExpr, SourceRange RR) {
ReductionRange = RR;
ReductionOp = RefExpr;
}
};
typedef llvm::DenseMap<ValueDecl *, ReductionData> DeclReductionMapTy;
struct SharingMapTy final {
DeclSAMapTy SharingMap;
DeclReductionMapTy ReductionMap;
AlignedMapTy AlignedMap;
MappedExprComponentsTy MappedExprComponents;
LoopControlVariablesMapTy LCVMap;
DefaultDataSharingAttributes DefaultAttr = DSA_unspecified;
SourceLocation DefaultAttrLoc;
DefaultMapAttributes DefaultMapAttr = DMA_unspecified;
SourceLocation DefaultMapAttrLoc;
OpenMPDirectiveKind Directive = OMPD_unknown;
DeclarationNameInfo DirectiveName;
Scope *CurScope = nullptr;
SourceLocation ConstructLoc;
/// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
/// get the data (loop counters etc.) about enclosing loop-based construct.
/// This data is required during codegen.
DoacrossDependMapTy DoacrossDepends;
/// \brief first argument (Expr *) contains optional argument of the
/// 'ordered' clause, the second one is true if the regions has 'ordered'
/// clause, false otherwise.
llvm::PointerIntPair<Expr *, 1, bool> OrderedRegion;
bool NowaitRegion = false;
bool CancelRegion = false;
unsigned AssociatedLoops = 1;
SourceLocation InnerTeamsRegionLoc;
/// Reference to the taskgroup task_reduction reference expression.
Expr *TaskgroupReductionRef = nullptr;
SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
Scope *CurScope, SourceLocation Loc)
: Directive(DKind), DirectiveName(Name), CurScope(CurScope),
ConstructLoc(Loc) {}
SharingMapTy() = default;
};
typedef SmallVector<SharingMapTy, 4> StackTy;
/// \brief Stack of used declaration and their data-sharing attributes.
DeclSAMapTy Threadprivates;
const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
SmallVector<std::pair<StackTy, const FunctionScopeInfo *>, 4> Stack;
/// \brief true, if check for DSA must be from parent directive, false, if
/// from current directive.
OpenMPClauseKind ClauseKindMode = OMPC_unknown;
Sema &SemaRef;
bool ForceCapturing = false;
CriticalsWithHintsTy Criticals;
typedef SmallVector<SharingMapTy, 8>::reverse_iterator reverse_iterator;
DSAVarData getDSA(StackTy::reverse_iterator &Iter, ValueDecl *D);
/// \brief Checks if the variable is a local for OpenMP region.
bool isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter);
bool isStackEmpty() const {
return Stack.empty() ||
Stack.back().second != CurrentNonCapturingFunctionScope ||
Stack.back().first.empty();
}
public:
explicit DSAStackTy(Sema &S) : SemaRef(S) {}
bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
OpenMPClauseKind getClauseParsingMode() const {
assert(isClauseParsingMode() && "Must be in clause parsing mode.");
return ClauseKindMode;
}
void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
bool isForceVarCapturing() const { return ForceCapturing; }
void setForceVarCapturing(bool V) { ForceCapturing = V; }
void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
Scope *CurScope, SourceLocation Loc) {
if (Stack.empty() ||
Stack.back().second != CurrentNonCapturingFunctionScope)
Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc);
Stack.back().first.back().DefaultAttrLoc = Loc;
}
void pop() {
assert(!Stack.back().first.empty() &&
"Data-sharing attributes stack is empty!");
Stack.back().first.pop_back();
}
/// Start new OpenMP region stack in new non-capturing function.
void pushFunction() {
const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
assert(!isa<CapturingScopeInfo>(CurFnScope));
CurrentNonCapturingFunctionScope = CurFnScope;
}
/// Pop region stack for non-capturing function.
void popFunction(const FunctionScopeInfo *OldFSI) {
if (!Stack.empty() && Stack.back().second == OldFSI) {
assert(Stack.back().first.empty());
Stack.pop_back();
}
CurrentNonCapturingFunctionScope = nullptr;
for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) {
if (!isa<CapturingScopeInfo>(FSI)) {
CurrentNonCapturingFunctionScope = FSI;
break;
}
}
}
void addCriticalWithHint(OMPCriticalDirective *D, llvm::APSInt Hint) {
Criticals[D->getDirectiveName().getAsString()] = std::make_pair(D, Hint);
}
const std::pair<OMPCriticalDirective *, llvm::APSInt>
getCriticalWithHint(const DeclarationNameInfo &Name) const {
auto I = Criticals.find(Name.getAsString());
if (I != Criticals.end())
return I->second;
return std::make_pair(nullptr, llvm::APSInt());
}
/// \brief If 'aligned' declaration for given variable \a D was not seen yet,
/// add it and return NULL; otherwise return previous occurrence's expression
/// for diagnostics.
Expr *addUniqueAligned(ValueDecl *D, Expr *NewDE);
/// \brief Register specified variable as loop control variable.
void addLoopControlVariable(ValueDecl *D, VarDecl *Capture);
/// \brief Check if the specified variable is a loop control variable for
/// current region.
/// \return The index of the loop control variable in the list of associated
/// for-loops (from outer to inner).
LCDeclInfo isLoopControlVariable(ValueDecl *D);
/// \brief Check if the specified variable is a loop control variable for
/// parent region.
/// \return The index of the loop control variable in the list of associated
/// for-loops (from outer to inner).
LCDeclInfo isParentLoopControlVariable(ValueDecl *D);
/// \brief Get the loop control variable for the I-th loop (or nullptr) in
/// parent directive.
ValueDecl *getParentLoopControlVariable(unsigned I);
/// \brief Adds explicit data sharing attribute to the specified declaration.
void addDSA(ValueDecl *D, Expr *E, OpenMPClauseKind A,
DeclRefExpr *PrivateCopy = nullptr);
/// Adds additional information for the reduction items with the reduction id
/// represented as an operator.
void addTaskgroupReductionData(ValueDecl *D, SourceRange SR,
BinaryOperatorKind BOK);
/// Adds additional information for the reduction items with the reduction id
/// represented as reduction identifier.
void addTaskgroupReductionData(ValueDecl *D, SourceRange SR,
const Expr *ReductionRef);
/// Returns the location and reduction operation from the innermost parent
/// region for the given \p D.
DSAVarData getTopMostTaskgroupReductionData(ValueDecl *D, SourceRange &SR,
BinaryOperatorKind &BOK,
Expr *&TaskgroupDescriptor);
/// Returns the location and reduction operation from the innermost parent
/// region for the given \p D.
DSAVarData getTopMostTaskgroupReductionData(ValueDecl *D, SourceRange &SR,
const Expr *&ReductionRef,
Expr *&TaskgroupDescriptor);
/// Return reduction reference expression for the current taskgroup.
Expr *getTaskgroupReductionRef() const {
assert(Stack.back().first.back().Directive == OMPD_taskgroup &&
"taskgroup reference expression requested for non taskgroup "
"directive.");
return Stack.back().first.back().TaskgroupReductionRef;
}
/// Checks if the given \p VD declaration is actually a taskgroup reduction
/// descriptor variable at the \p Level of OpenMP regions.
bool isTaskgroupReductionRef(ValueDecl *VD, unsigned Level) const {
return Stack.back().first[Level].TaskgroupReductionRef &&
cast<DeclRefExpr>(Stack.back().first[Level].TaskgroupReductionRef)
->getDecl() == VD;
}
/// \brief Returns data sharing attributes from top of the stack for the
/// specified declaration.
DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
/// \brief Returns data-sharing attributes for the specified declaration.
DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent);
/// \brief Checks if the specified variables has data-sharing attributes which
/// match specified \a CPred predicate in any directive which matches \a DPred
/// predicate.
DSAVarData hasDSA(ValueDecl *D,
const llvm::function_ref<bool(OpenMPClauseKind)> &CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
bool FromParent);
/// \brief Checks if the specified variables has data-sharing attributes which
/// match specified \a CPred predicate in any innermost directive which
/// matches \a DPred predicate.
DSAVarData
hasInnermostDSA(ValueDecl *D,
const llvm::function_ref<bool(OpenMPClauseKind)> &CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
bool FromParent);
/// \brief Checks if the specified variables has explicit data-sharing
/// attributes which match specified \a CPred predicate at the specified
/// OpenMP region.
bool hasExplicitDSA(ValueDecl *D,
const llvm::function_ref<bool(OpenMPClauseKind)> &CPred,
unsigned Level, bool NotLastprivate = false);
/// \brief Returns true if the directive at level \Level matches in the
/// specified \a DPred predicate.
bool hasExplicitDirective(
const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
unsigned Level);
/// \brief Finds a directive which matches specified \a DPred predicate.
bool hasDirective(const llvm::function_ref<bool(OpenMPDirectiveKind,
const DeclarationNameInfo &,
SourceLocation)> &DPred,
bool FromParent);
/// \brief Returns currently analyzed directive.
OpenMPDirectiveKind getCurrentDirective() const {
return isStackEmpty() ? OMPD_unknown : Stack.back().first.back().Directive;
}
/// \brief Returns directive kind at specified level.
OpenMPDirectiveKind getDirective(unsigned Level) const {
assert(!isStackEmpty() && "No directive at specified level.");
return Stack.back().first[Level].Directive;
}
/// \brief Returns parent directive.
OpenMPDirectiveKind getParentDirective() const {
if (isStackEmpty() || Stack.back().first.size() == 1)
return OMPD_unknown;
return std::next(Stack.back().first.rbegin())->Directive;
}
/// \brief Set default data sharing attribute to none.
void setDefaultDSANone(SourceLocation Loc) {
assert(!isStackEmpty());
Stack.back().first.back().DefaultAttr = DSA_none;
Stack.back().first.back().DefaultAttrLoc = Loc;
}
/// \brief Set default data sharing attribute to shared.
void setDefaultDSAShared(SourceLocation Loc) {
assert(!isStackEmpty());
Stack.back().first.back().DefaultAttr = DSA_shared;
Stack.back().first.back().DefaultAttrLoc = Loc;
}
/// Set default data mapping attribute to 'tofrom:scalar'.
void setDefaultDMAToFromScalar(SourceLocation Loc) {
assert(!isStackEmpty());
Stack.back().first.back().DefaultMapAttr = DMA_tofrom_scalar;
Stack.back().first.back().DefaultMapAttrLoc = Loc;
}
DefaultDataSharingAttributes getDefaultDSA() const {
return isStackEmpty() ? DSA_unspecified
: Stack.back().first.back().DefaultAttr;
}
SourceLocation getDefaultDSALocation() const {
return isStackEmpty() ? SourceLocation()
: Stack.back().first.back().DefaultAttrLoc;
}
DefaultMapAttributes getDefaultDMA() const {
return isStackEmpty() ? DMA_unspecified
: Stack.back().first.back().DefaultMapAttr;
}
DefaultMapAttributes getDefaultDMAAtLevel(unsigned Level) const {
return Stack.back().first[Level].DefaultMapAttr;
}
SourceLocation getDefaultDMALocation() const {
return isStackEmpty() ? SourceLocation()
: Stack.back().first.back().DefaultMapAttrLoc;
}
/// \brief Checks if the specified variable is a threadprivate.
bool isThreadPrivate(VarDecl *D) {
DSAVarData DVar = getTopDSA(D, false);
return isOpenMPThreadPrivate(DVar.CKind);
}
/// \brief Marks current region as ordered (it has an 'ordered' clause).
void setOrderedRegion(bool IsOrdered, Expr *Param) {
assert(!isStackEmpty());
Stack.back().first.back().OrderedRegion.setInt(IsOrdered);
Stack.back().first.back().OrderedRegion.setPointer(Param);
}
/// \brief Returns true, if parent region is ordered (has associated
/// 'ordered' clause), false - otherwise.
bool isParentOrderedRegion() const {
if (isStackEmpty() || Stack.back().first.size() == 1)
return false;
return std::next(Stack.back().first.rbegin())->OrderedRegion.getInt();
}
/// \brief Returns optional parameter for the ordered region.
Expr *getParentOrderedRegionParam() const {
if (isStackEmpty() || Stack.back().first.size() == 1)
return nullptr;
return std::next(Stack.back().first.rbegin())->OrderedRegion.getPointer();
}
/// \brief Marks current region as nowait (it has a 'nowait' clause).
void setNowaitRegion(bool IsNowait = true) {
assert(!isStackEmpty());
Stack.back().first.back().NowaitRegion = IsNowait;
}
/// \brief Returns true, if parent region is nowait (has associated
/// 'nowait' clause), false - otherwise.
bool isParentNowaitRegion() const {
if (isStackEmpty() || Stack.back().first.size() == 1)
return false;
return std::next(Stack.back().first.rbegin())->NowaitRegion;
}
/// \brief Marks parent region as cancel region.
void setParentCancelRegion(bool Cancel = true) {
if (!isStackEmpty() && Stack.back().first.size() > 1) {
auto &StackElemRef = *std::next(Stack.back().first.rbegin());
StackElemRef.CancelRegion |= StackElemRef.CancelRegion || Cancel;
}
}
/// \brief Return true if current region has inner cancel construct.
bool isCancelRegion() const {
return isStackEmpty() ? false : Stack.back().first.back().CancelRegion;
}
/// \brief Set collapse value for the region.
void setAssociatedLoops(unsigned Val) {
assert(!isStackEmpty());
Stack.back().first.back().AssociatedLoops = Val;
}
/// \brief Return collapse value for region.
unsigned getAssociatedLoops() const {
return isStackEmpty() ? 0 : Stack.back().first.back().AssociatedLoops;
}
/// \brief Marks current target region as one with closely nested teams
/// region.
void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
if (!isStackEmpty() && Stack.back().first.size() > 1) {
std::next(Stack.back().first.rbegin())->InnerTeamsRegionLoc =
TeamsRegionLoc;
}
}
/// \brief Returns true, if current region has closely nested teams region.
bool hasInnerTeamsRegion() const {
return getInnerTeamsRegionLoc().isValid();
}
/// \brief Returns location of the nested teams region (if any).
SourceLocation getInnerTeamsRegionLoc() const {
return isStackEmpty() ? SourceLocation()
: Stack.back().first.back().InnerTeamsRegionLoc;
}
Scope *getCurScope() const {
return isStackEmpty() ? nullptr : Stack.back().first.back().CurScope;
}
Scope *getCurScope() {
return isStackEmpty() ? nullptr : Stack.back().first.back().CurScope;
}
SourceLocation getConstructLoc() {
return isStackEmpty() ? SourceLocation()
: Stack.back().first.back().ConstructLoc;
}
/// Do the check specified in \a Check to all component lists and return true
/// if any issue is found.
bool checkMappableExprComponentListsForDecl(
ValueDecl *VD, bool CurrentRegionOnly,
const llvm::function_ref<
bool(OMPClauseMappableExprCommon::MappableExprComponentListRef,
OpenMPClauseKind)> &Check) {
if (isStackEmpty())
return false;
auto SI = Stack.back().first.rbegin();
auto SE = Stack.back().first.rend();
if (SI == SE)
return false;
if (CurrentRegionOnly) {
SE = std::next(SI);
} else {
++SI;
}
for (; SI != SE; ++SI) {
auto MI = SI->MappedExprComponents.find(VD);
if (MI != SI->MappedExprComponents.end())
for (auto &L : MI->second.Components)
if (Check(L, MI->second.Kind))
return true;
}
return false;
}
/// Do the check specified in \a Check to all component lists at a given level
/// and return true if any issue is found.
bool checkMappableExprComponentListsForDeclAtLevel(
ValueDecl *VD, unsigned Level,
const llvm::function_ref<
bool(OMPClauseMappableExprCommon::MappableExprComponentListRef,
OpenMPClauseKind)> &Check) {
if (isStackEmpty())
return false;
auto StartI = Stack.back().first.begin();
auto EndI = Stack.back().first.end();
if (std::distance(StartI, EndI) <= (int)Level)
return false;
std::advance(StartI, Level);
auto MI = StartI->MappedExprComponents.find(VD);
if (MI != StartI->MappedExprComponents.end())
for (auto &L : MI->second.Components)
if (Check(L, MI->second.Kind))
return true;
return false;
}
/// Create a new mappable expression component list associated with a given
/// declaration and initialize it with the provided list of components.
void addMappableExpressionComponents(
ValueDecl *VD,
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
OpenMPClauseKind WhereFoundClauseKind) {
assert(!isStackEmpty() &&
"Not expecting to retrieve components from a empty stack!");
auto &MEC = Stack.back().first.back().MappedExprComponents[VD];
// Create new entry and append the new components there.
MEC.Components.resize(MEC.Components.size() + 1);
MEC.Components.back().append(Components.begin(), Components.end());
MEC.Kind = WhereFoundClauseKind;
}
unsigned getNestingLevel() const {
assert(!isStackEmpty());
return Stack.back().first.size() - 1;
}
void addDoacrossDependClause(OMPDependClause *C, OperatorOffsetTy &OpsOffs) {
assert(!isStackEmpty() && Stack.back().first.size() > 1);
auto &StackElem = *std::next(Stack.back().first.rbegin());
assert(isOpenMPWorksharingDirective(StackElem.Directive));
StackElem.DoacrossDepends.insert({C, OpsOffs});
}
llvm::iterator_range<DoacrossDependMapTy::const_iterator>
getDoacrossDependClauses() const {
assert(!isStackEmpty());
auto &StackElem = Stack.back().first.back();
if (isOpenMPWorksharingDirective(StackElem.Directive)) {
auto &Ref = StackElem.DoacrossDepends;
return llvm::make_range(Ref.begin(), Ref.end());
}
return llvm::make_range(StackElem.DoacrossDepends.end(),
StackElem.DoacrossDepends.end());
}
};
bool isParallelOrTaskRegion(OpenMPDirectiveKind DKind) {
return isOpenMPParallelDirective(DKind) || isOpenMPTaskingDirective(DKind) ||
isOpenMPTeamsDirective(DKind) || DKind == OMPD_unknown;
}
} // namespace
static Expr *getExprAsWritten(Expr *E) {
if (auto *ExprTemp = dyn_cast<ExprWithCleanups>(E))
E = ExprTemp->getSubExpr();
if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
E = MTE->GetTemporaryExpr();
while (auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
E = Binder->getSubExpr();
if (auto *ICE = dyn_cast<ImplicitCastExpr>(E))
E = ICE->getSubExprAsWritten();
return E->IgnoreParens();
}
static ValueDecl *getCanonicalDecl(ValueDecl *D) {
if (auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
D = ME->getMemberDecl();
auto *VD = dyn_cast<VarDecl>(D);
auto *FD = dyn_cast<FieldDecl>(D);
if (VD != nullptr) {
VD = VD->getCanonicalDecl();
D = VD;
} else {
assert(FD);
FD = FD->getCanonicalDecl();
D = FD;
}
return D;
}
DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator &Iter,
ValueDecl *D) {
D = getCanonicalDecl(D);
auto *VD = dyn_cast<VarDecl>(D);
auto *FD = dyn_cast<FieldDecl>(D);
DSAVarData DVar;
if (isStackEmpty() || Iter == Stack.back().first.rend()) {
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a region but not in construct]
// File-scope or namespace-scope variables referenced in called routines
// in the region are shared unless they appear in a threadprivate
// directive.
if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(D))
DVar.CKind = OMPC_shared;
// OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
// in a region but not in construct]
// Variables with static storage duration that are declared in called
// routines in the region are shared.
if (VD && VD->hasGlobalStorage())
DVar.CKind = OMPC_shared;
// Non-static data members are shared by default.
if (FD)
DVar.CKind = OMPC_shared;
return DVar;
}
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.1]
// Variables with automatic storage duration that are declared in a scope
// inside the construct are private.
if (VD && isOpenMPLocal(VD, Iter) && VD->isLocalVarDecl() &&
(VD->getStorageClass() == SC_Auto || VD->getStorageClass() == SC_None)) {
DVar.CKind = OMPC_private;
return DVar;
}
DVar.DKind = Iter->Directive;
// Explicitly specified attributes and local variables with predetermined
// attributes.
if (Iter->SharingMap.count(D)) {
DVar.RefExpr = Iter->SharingMap[D].RefExpr.getPointer();
DVar.PrivateCopy = Iter->SharingMap[D].PrivateCopy;
DVar.CKind = Iter->SharingMap[D].Attributes;
DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
return DVar;
}
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, implicitly determined, p.1]
// In a parallel or task construct, the data-sharing attributes of these
// variables are determined by the default clause, if present.
switch (Iter->DefaultAttr) {
case DSA_shared:
DVar.CKind = OMPC_shared;
DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
return DVar;
case DSA_none:
return DVar;
case DSA_unspecified:
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, implicitly determined, p.2]
// In a parallel construct, if no default clause is present, these
// variables are shared.
DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
if (isOpenMPParallelDirective(DVar.DKind) ||
isOpenMPTeamsDirective(DVar.DKind)) {
DVar.CKind = OMPC_shared;
return DVar;
}
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, implicitly determined, p.4]
// In a task construct, if no default clause is present, a variable that in
// the enclosing context is determined to be shared by all implicit tasks
// bound to the current team is shared.
if (isOpenMPTaskingDirective(DVar.DKind)) {
DSAVarData DVarTemp;
auto I = Iter, E = Stack.back().first.rend();
do {
++I;
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
// Referenced in a Construct, implicitly determined, p.6]
// In a task construct, if no default clause is present, a variable
// whose data-sharing attribute is not determined by the rules above is
// firstprivate.
DVarTemp = getDSA(I, D);
if (DVarTemp.CKind != OMPC_shared) {
DVar.RefExpr = nullptr;
DVar.CKind = OMPC_firstprivate;
return DVar;
}
} while (I != E && !isParallelOrTaskRegion(I->Directive));
DVar.CKind =
(DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
return DVar;
}
}
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, implicitly determined, p.3]
// For constructs other than task, if no default clause is present, these
// variables inherit their data-sharing attributes from the enclosing
// context.
return getDSA(++Iter, D);
}
Expr *DSAStackTy::addUniqueAligned(ValueDecl *D, Expr *NewDE) {
assert(!isStackEmpty() && "Data sharing attributes stack is empty");
D = getCanonicalDecl(D);
auto &StackElem = Stack.back().first.back();
auto It = StackElem.AlignedMap.find(D);
if (It == StackElem.AlignedMap.end()) {
assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
StackElem.AlignedMap[D] = NewDE;
return nullptr;
} else {
assert(It->second && "Unexpected nullptr expr in the aligned map");
return It->second;
}
return nullptr;
}
void DSAStackTy::addLoopControlVariable(ValueDecl *D, VarDecl *Capture) {
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
auto &StackElem = Stack.back().first.back();
StackElem.LCVMap.insert(
{D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture)});
}
DSAStackTy::LCDeclInfo DSAStackTy::isLoopControlVariable(ValueDecl *D) {
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
auto &StackElem = Stack.back().first.back();
auto It = StackElem.LCVMap.find(D);
if (It != StackElem.LCVMap.end())
return It->second;
return {0, nullptr};
}
DSAStackTy::LCDeclInfo DSAStackTy::isParentLoopControlVariable(ValueDecl *D) {
assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
"Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
auto &StackElem = *std::next(Stack.back().first.rbegin());
auto It = StackElem.LCVMap.find(D);
if (It != StackElem.LCVMap.end())
return It->second;
return {0, nullptr};
}
ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) {
assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
"Data-sharing attributes stack is empty");
auto &StackElem = *std::next(Stack.back().first.rbegin());
if (StackElem.LCVMap.size() < I)
return nullptr;
for (auto &Pair : StackElem.LCVMap)
if (Pair.second.first == I)
return Pair.first;
return nullptr;
}
void DSAStackTy::addDSA(ValueDecl *D, Expr *E, OpenMPClauseKind A,
DeclRefExpr *PrivateCopy) {
D = getCanonicalDecl(D);
if (A == OMPC_threadprivate) {
auto &Data = Threadprivates[D];
Data.Attributes = A;
Data.RefExpr.setPointer(E);
Data.PrivateCopy = nullptr;
} else {
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
auto &Data = Stack.back().first.back().SharingMap[D];
assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
(A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
(A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
(isLoopControlVariable(D).first && A == OMPC_private));
if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
Data.RefExpr.setInt(/*IntVal=*/true);
return;
}
const bool IsLastprivate =
A == OMPC_lastprivate || Data.Attributes == OMPC_lastprivate;
Data.Attributes = A;
Data.RefExpr.setPointerAndInt(E, IsLastprivate);
Data.PrivateCopy = PrivateCopy;
if (PrivateCopy) {
auto &Data = Stack.back().first.back().SharingMap[PrivateCopy->getDecl()];
Data.Attributes = A;
Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
Data.PrivateCopy = nullptr;
}
}
}
/// \brief Build a variable declaration for OpenMP loop iteration variable.
static VarDecl *buildVarDecl(Sema &SemaRef, SourceLocation Loc, QualType Type,
StringRef Name, const AttrVec *Attrs = nullptr) {
DeclContext *DC = SemaRef.CurContext;
IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
VarDecl *Decl =
VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
if (Attrs) {
for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
I != E; ++I)
Decl->addAttr(*I);
}
Decl->setImplicit();
return Decl;
}
static DeclRefExpr *buildDeclRefExpr(Sema &S, VarDecl *D, QualType Ty,
SourceLocation Loc,
bool RefersToCapture = false) {
D->setReferenced();
D->markUsed(S.Context);
return DeclRefExpr::Create(S.getASTContext(), NestedNameSpecifierLoc(),
SourceLocation(), D, RefersToCapture, Loc, Ty,
VK_LValue);
}
void DSAStackTy::addTaskgroupReductionData(ValueDecl *D, SourceRange SR,
BinaryOperatorKind BOK) {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
assert(
Stack.back().first.back().SharingMap[D].Attributes == OMPC_reduction &&
"Additional reduction info may be specified only for reduction items.");
auto &ReductionData = Stack.back().first.back().ReductionMap[D];
assert(ReductionData.ReductionRange.isInvalid() &&
Stack.back().first.back().Directive == OMPD_taskgroup &&
"Additional reduction info may be specified only once for reduction "
"items.");
ReductionData.set(BOK, SR);
Expr *&TaskgroupReductionRef =
Stack.back().first.back().TaskgroupReductionRef;
if (!TaskgroupReductionRef) {
auto *VD = buildVarDecl(SemaRef, SR.getBegin(),
SemaRef.Context.VoidPtrTy, ".task_red.");
TaskgroupReductionRef =
buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
}
}
void DSAStackTy::addTaskgroupReductionData(ValueDecl *D, SourceRange SR,
const Expr *ReductionRef) {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
assert(
Stack.back().first.back().SharingMap[D].Attributes == OMPC_reduction &&
"Additional reduction info may be specified only for reduction items.");
auto &ReductionData = Stack.back().first.back().ReductionMap[D];
assert(ReductionData.ReductionRange.isInvalid() &&
Stack.back().first.back().Directive == OMPD_taskgroup &&
"Additional reduction info may be specified only once for reduction "
"items.");
ReductionData.set(ReductionRef, SR);
Expr *&TaskgroupReductionRef =
Stack.back().first.back().TaskgroupReductionRef;
if (!TaskgroupReductionRef) {
auto *VD = buildVarDecl(SemaRef, SR.getBegin(), SemaRef.Context.VoidPtrTy,
".task_red.");
TaskgroupReductionRef =
buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
}
}
DSAStackTy::DSAVarData
DSAStackTy::getTopMostTaskgroupReductionData(ValueDecl *D, SourceRange &SR,
BinaryOperatorKind &BOK,
Expr *&TaskgroupDescriptor) {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
if (Stack.back().first.empty())
return DSAVarData();
for (auto I = std::next(Stack.back().first.rbegin(), 1),
E = Stack.back().first.rend();
I != E; std::advance(I, 1)) {
auto &Data = I->SharingMap[D];
if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
continue;
auto &ReductionData = I->ReductionMap[D];
if (!ReductionData.ReductionOp ||
ReductionData.ReductionOp.is<const Expr *>())
return DSAVarData();
SR = ReductionData.ReductionRange;
BOK = ReductionData.ReductionOp.get<ReductionData::BOKPtrType>();
assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
"expression for the descriptor is not "
"set.");
TaskgroupDescriptor = I->TaskgroupReductionRef;
return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
Data.PrivateCopy, I->DefaultAttrLoc);
}
return DSAVarData();
}
DSAStackTy::DSAVarData
DSAStackTy::getTopMostTaskgroupReductionData(ValueDecl *D, SourceRange &SR,
const Expr *&ReductionRef,
Expr *&TaskgroupDescriptor) {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
if (Stack.back().first.empty())
return DSAVarData();
for (auto I = std::next(Stack.back().first.rbegin(), 1),
E = Stack.back().first.rend();
I != E; std::advance(I, 1)) {
auto &Data = I->SharingMap[D];
if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
continue;
auto &ReductionData = I->ReductionMap[D];
if (!ReductionData.ReductionOp ||
!ReductionData.ReductionOp.is<const Expr *>())
return DSAVarData();
SR = ReductionData.ReductionRange;
ReductionRef = ReductionData.ReductionOp.get<const Expr *>();
assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
"expression for the descriptor is not "
"set.");
TaskgroupDescriptor = I->TaskgroupReductionRef;
return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
Data.PrivateCopy, I->DefaultAttrLoc);
}
return DSAVarData();
}
bool DSAStackTy::isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter) {
D = D->getCanonicalDecl();
if (!isStackEmpty() && Stack.back().first.size() > 1) {
reverse_iterator I = Iter, E = Stack.back().first.rend();
Scope *TopScope = nullptr;
while (I != E && !isParallelOrTaskRegion(I->Directive))
++I;
if (I == E)
return false;
TopScope = I->CurScope ? I->CurScope->getParent() : nullptr;
Scope *CurScope = getCurScope();
while (CurScope != TopScope && !CurScope->isDeclScope(D))
CurScope = CurScope->getParent();
return CurScope != TopScope;
}
return false;
}
DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D, bool FromParent) {
D = getCanonicalDecl(D);
DSAVarData DVar;
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.1]
// Variables appearing in threadprivate directives are threadprivate.
auto *VD = dyn_cast<VarDecl>(D);
if ((VD && VD->getTLSKind() != VarDecl::TLS_None &&
!(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
SemaRef.getLangOpts().OpenMPUseTLS &&
SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
(VD && VD->getStorageClass() == SC_Register &&
VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) {
addDSA(D, buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
D->getLocation()),
OMPC_threadprivate);
}
auto TI = Threadprivates.find(D);
if (TI != Threadprivates.end()) {
DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
DVar.CKind = OMPC_threadprivate;
return DVar;
} else if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
DVar.RefExpr = buildDeclRefExpr(
SemaRef, VD, D->getType().getNonReferenceType(),
VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
DVar.CKind = OMPC_threadprivate;
addDSA(D, DVar.RefExpr, OMPC_threadprivate);
}
if (isStackEmpty())
// Not in OpenMP execution region and top scope was already checked.
return DVar;
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.4]
// Static data members are shared.
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.7]
// Variables with static storage duration that are declared in a scope
// inside the construct are shared.
auto &&MatchesAlways = [](OpenMPDirectiveKind) -> bool { return true; };
if (VD && VD->isStaticDataMember()) {
DSAVarData DVarTemp = hasDSA(D, isOpenMPPrivate, MatchesAlways, FromParent);
if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
return DVar;
DVar.CKind = OMPC_shared;
return DVar;
}
QualType Type = D->getType().getNonReferenceType().getCanonicalType();
bool IsConstant = Type.isConstant(SemaRef.getASTContext());
Type = SemaRef.getASTContext().getBaseElementType(Type);
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.6]
// Variables with const qualified type having no mutable member are
// shared.
CXXRecordDecl *RD =
SemaRef.getLangOpts().CPlusPlus ? Type->getAsCXXRecordDecl() : nullptr;
if (auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
if (auto *CTD = CTSD->getSpecializedTemplate())
RD = CTD->getTemplatedDecl();
if (IsConstant &&
!(SemaRef.getLangOpts().CPlusPlus && RD && RD->hasDefinition() &&
RD->hasMutableFields())) {
// Variables with const-qualified type having no mutable member may be
// listed in a firstprivate clause, even if they are static data members.
DSAVarData DVarTemp = hasDSA(
D, [](OpenMPClauseKind C) -> bool { return C == OMPC_firstprivate; },
MatchesAlways, FromParent);
if (DVarTemp.CKind == OMPC_firstprivate && DVarTemp.RefExpr)
return DVar;
DVar.CKind = OMPC_shared;
return DVar;
}
// Explicitly specified attributes and local variables with predetermined
// attributes.
auto I = Stack.back().first.rbegin();
auto EndI = Stack.back().first.rend();
if (FromParent && I != EndI)
std::advance(I, 1);
if (I->SharingMap.count(D)) {
DVar.RefExpr = I->SharingMap[D].RefExpr.getPointer();
DVar.PrivateCopy = I->SharingMap[D].PrivateCopy;
DVar.CKind = I->SharingMap[D].Attributes;
DVar.ImplicitDSALoc = I->DefaultAttrLoc;
DVar.DKind = I->Directive;
}
return DVar;
}
DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
bool FromParent) {
if (isStackEmpty()) {
StackTy::reverse_iterator I;
return getDSA(I, D);
}
D = getCanonicalDecl(D);
auto StartI = Stack.back().first.rbegin();
auto EndI = Stack.back().first.rend();
if (FromParent && StartI != EndI)
std::advance(StartI, 1);
return getDSA(StartI, D);
}
DSAStackTy::DSAVarData
DSAStackTy::hasDSA(ValueDecl *D,
const llvm::function_ref<bool(OpenMPClauseKind)> &CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
bool FromParent) {
if (isStackEmpty())
return {};
D = getCanonicalDecl(D);
auto I = Stack.back().first.rbegin();
auto EndI = Stack.back().first.rend();
if (FromParent && I != EndI)
std::advance(I, 1);
for (; I != EndI; std::advance(I, 1)) {
if (!DPred(I->Directive) && !isParallelOrTaskRegion(I->Directive))
continue;
auto NewI = I;
DSAVarData DVar = getDSA(NewI, D);
if (I == NewI && CPred(DVar.CKind))
return DVar;
}
return {};
}
DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> &CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
bool FromParent) {
if (isStackEmpty())
return {};
D = getCanonicalDecl(D);
auto StartI = Stack.back().first.rbegin();
auto EndI = Stack.back().first.rend();
if (FromParent && StartI != EndI)
std::advance(StartI, 1);
if (StartI == EndI || !DPred(StartI->Directive))
return {};
auto NewI = StartI;
DSAVarData DVar = getDSA(NewI, D);
return (NewI == StartI && CPred(DVar.CKind)) ? DVar : DSAVarData();
}
bool DSAStackTy::hasExplicitDSA(
ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> &CPred,
unsigned Level, bool NotLastprivate) {
if (isStackEmpty())
return false;
D = getCanonicalDecl(D);
auto StartI = Stack.back().first.begin();
auto EndI = Stack.back().first.end();
if (std::distance(StartI, EndI) <= (int)Level)
return false;
std::advance(StartI, Level);
return (StartI->SharingMap.count(D) > 0) &&
StartI->SharingMap[D].RefExpr.getPointer() &&
CPred(StartI->SharingMap[D].Attributes) &&
(!NotLastprivate || !StartI->SharingMap[D].RefExpr.getInt());
}
bool DSAStackTy::hasExplicitDirective(
const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
unsigned Level) {
if (isStackEmpty())
return false;
auto StartI = Stack.back().first.begin();
auto EndI = Stack.back().first.end();
if (std::distance(StartI, EndI) <= (int)Level)
return false;
std::advance(StartI, Level);
return DPred(StartI->Directive);
}
bool DSAStackTy::hasDirective(
const llvm::function_ref<bool(OpenMPDirectiveKind,
const DeclarationNameInfo &, SourceLocation)>
&DPred,
bool FromParent) {
// We look only in the enclosing region.
if (isStackEmpty())
return false;
auto StartI = std::next(Stack.back().first.rbegin());
auto EndI = Stack.back().first.rend();
if (FromParent && StartI != EndI)
StartI = std::next(StartI);
for (auto I = StartI, EE = EndI; I != EE; ++I) {
if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
return true;
}
return false;
}
void Sema::InitDataSharingAttributesStack() {
VarDataSharingAttributesStack = new DSAStackTy(*this);
}
#define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
void Sema::pushOpenMPFunctionRegion() {
DSAStack->pushFunction();
}
void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
DSAStack->popFunction(OldFSI);
}
bool Sema::IsOpenMPCapturedByRef(ValueDecl *D, unsigned Level) {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
auto &Ctx = getASTContext();
bool IsByRef = true;
// Find the directive that is associated with the provided scope.
D = cast<ValueDecl>(D->getCanonicalDecl());
auto Ty = D->getType();
if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level)) {
// This table summarizes how a given variable should be passed to the device
// given its type and the clauses where it appears. This table is based on
// the description in OpenMP 4.5 [2.10.4, target Construct] and
// OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
//
// =========================================================================
// | type | defaultmap | pvt | first | is_device_ptr | map | res. |
// | |(tofrom:scalar)| | pvt | | | |
// =========================================================================
// | scl | | | | - | | bycopy|
// | scl | | - | x | - | - | bycopy|
// | scl | | x | - | - | - | null |
// | scl | x | | | - | | byref |
// | scl | x | - | x | - | - | bycopy|
// | scl | x | x | - | - | - | null |
// | scl | | - | - | - | x | byref |
// | scl | x | - | - | - | x | byref |
//
// | agg | n.a. | | | - | | byref |
// | agg | n.a. | - | x | - | - | byref |
// | agg | n.a. | x | - | - | - | null |
// | agg | n.a. | - | - | - | x | byref |
// | agg | n.a. | - | - | - | x[] | byref |
//
// | ptr | n.a. | | | - | | bycopy|
// | ptr | n.a. | - | x | - | - | bycopy|
// | ptr | n.a. | x | - | - | - | null |
// | ptr | n.a. | - | - | - | x | byref |
// | ptr | n.a. | - | - | - | x[] | bycopy|
// | ptr | n.a. | - | - | x | | bycopy|
// | ptr | n.a. | - | - | x | x | bycopy|
// | ptr | n.a. | - | - | x | x[] | bycopy|
// =========================================================================
// Legend:
// scl - scalar
// ptr - pointer
// agg - aggregate
// x - applies
// - - invalid in this combination
// [] - mapped with an array section
// byref - should be mapped by reference
// byval - should be mapped by value
// null - initialize a local variable to null on the device
//
// Observations:
// - All scalar declarations that show up in a map clause have to be passed
// by reference, because they may have been mapped in the enclosing data
// environment.
// - If the scalar value does not fit the size of uintptr, it has to be
// passed by reference, regardless the result in the table above.
// - For pointers mapped by value that have either an implicit map or an
// array section, the runtime library may pass the NULL value to the
// device instead of the value passed to it by the compiler.
if (Ty->isReferenceType())
Ty = Ty->castAs<ReferenceType>()->getPointeeType();
// Locate map clauses and see if the variable being captured is referred to
// in any of those clauses. Here we only care about variables, not fields,
// because fields are part of aggregates.
bool IsVariableUsedInMapClause = false;
bool IsVariableAssociatedWithSection = false;
DSAStack->checkMappableExprComponentListsForDeclAtLevel(
D, Level, [&](OMPClauseMappableExprCommon::MappableExprComponentListRef
MapExprComponents,
OpenMPClauseKind WhereFoundClauseKind) {
// Only the map clause information influences how a variable is
// captured. E.g. is_device_ptr does not require changing the default
// behavior.
if (WhereFoundClauseKind != OMPC_map)
return false;
auto EI = MapExprComponents.rbegin();
auto EE = MapExprComponents.rend();
assert(EI != EE && "Invalid map expression!");
if (isa<DeclRefExpr>(EI->getAssociatedExpression()))
IsVariableUsedInMapClause |= EI->getAssociatedDeclaration() == D;
++EI;
if (EI == EE)
return false;
if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
isa<MemberExpr>(EI->getAssociatedExpression())) {
IsVariableAssociatedWithSection = true;
// There is nothing more we need to know about this variable.
return true;
}
// Keep looking for more map info.
return false;
});
if (IsVariableUsedInMapClause) {
// If variable is identified in a map clause it is always captured by
// reference except if it is a pointer that is dereferenced somehow.
IsByRef = !(Ty->isPointerType() && IsVariableAssociatedWithSection);
} else {
// By default, all the data that has a scalar type is mapped by copy
// (except for reduction variables).
IsByRef =
!Ty->isScalarType() ||
DSAStack->getDefaultDMAAtLevel(Level) == DMA_tofrom_scalar ||
DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K) { return K == OMPC_reduction; }, Level);
}
}
if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
IsByRef =
!DSAStack->hasExplicitDSA(
D,
[](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
Level, /*NotLastprivate=*/true) &&
// If the variable is artificial and must be captured by value - try to
// capture by value.
!(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
!cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue());
}
// When passing data by copy, we need to make sure it fits the uintptr size
// and alignment, because the runtime library only deals with uintptr types.
// If it does not fit the uintptr size, we need to pass the data by reference
// instead.
if (!IsByRef &&
(Ctx.getTypeSizeInChars(Ty) >
Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
Ctx.getDeclAlign(D) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
IsByRef = true;
}
return IsByRef;
}
unsigned Sema::getOpenMPNestingLevel() const {
assert(getLangOpts().OpenMP);
return DSAStack->getNestingLevel();
}
bool Sema::isInOpenMPTargetExecutionDirective() const {
return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
!DSAStack->isClauseParsingMode()) ||
DSAStack->hasDirective(
[](OpenMPDirectiveKind K, const DeclarationNameInfo &,
SourceLocation) -> bool {
return isOpenMPTargetExecutionDirective(K);
},
false);
}
VarDecl *Sema::IsOpenMPCapturedDecl(ValueDecl *D) {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
D = getCanonicalDecl(D);
// If we are attempting to capture a global variable in a directive with
// 'target' we return true so that this global is also mapped to the device.
//
// FIXME: If the declaration is enclosed in a 'declare target' directive,
// then it should not be captured. Therefore, an extra check has to be
// inserted here once support for 'declare target' is added.
//
auto *VD = dyn_cast<VarDecl>(D);
if (VD && !VD->hasLocalStorage() && isInOpenMPTargetExecutionDirective())
return VD;
if (DSAStack->getCurrentDirective() != OMPD_unknown &&
(!DSAStack->isClauseParsingMode() ||
DSAStack->getParentDirective() != OMPD_unknown)) {
auto &&Info = DSAStack->isLoopControlVariable(D);
if (Info.first ||
(VD && VD->hasLocalStorage() &&
isParallelOrTaskRegion(DSAStack->getCurrentDirective())) ||
(VD && DSAStack->isForceVarCapturing()))
return VD ? VD : Info.second;
auto DVarPrivate = DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind))
return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
DVarPrivate = DSAStack->hasDSA(
D, isOpenMPPrivate, [](OpenMPDirectiveKind) -> bool { return true; },
DSAStack->isClauseParsingMode());
if (DVarPrivate.CKind != OMPC_unknown)
return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
}
return nullptr;
}
void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const {
SmallVector<OpenMPDirectiveKind, 4> Regions;
getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
FunctionScopesIndex -= Regions.size();
}
bool Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level) {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
return DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K) -> bool { return K == OMPC_private; },
Level) ||
(DSAStack->isClauseParsingMode() &&
DSAStack->getClauseParsingMode() == OMPC_private) ||
// Consider taskgroup reduction descriptor variable a private to avoid
// possible capture in the region.
(DSAStack->hasExplicitDirective(
[](OpenMPDirectiveKind K) { return K == OMPD_taskgroup; },
Level) &&
DSAStack->isTaskgroupReductionRef(D, Level));
}
void Sema::setOpenMPCaptureKind(FieldDecl *FD, ValueDecl *D, unsigned Level) {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
D = getCanonicalDecl(D);
OpenMPClauseKind OMPC = OMPC_unknown;
for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) {
const unsigned NewLevel = I - 1;
if (DSAStack->hasExplicitDSA(D,
[&OMPC](const OpenMPClauseKind K) {
if (isOpenMPPrivate(K)) {
OMPC = K;
return true;
}
return false;
},
NewLevel))
break;
if (DSAStack->checkMappableExprComponentListsForDeclAtLevel(
D, NewLevel,
[](OMPClauseMappableExprCommon::MappableExprComponentListRef,
OpenMPClauseKind) { return true; })) {
OMPC = OMPC_map;
break;
}
if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
NewLevel)) {
OMPC = OMPC_firstprivate;
break;
}
}
if (OMPC != OMPC_unknown)
FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, OMPC));
}
bool Sema::isOpenMPTargetCapturedDecl(ValueDecl *D, unsigned Level) {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
// Return true if the current level is no longer enclosed in a target region.
auto *VD = dyn_cast<VarDecl>(D);
return VD && !VD->hasLocalStorage() &&
DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
Level);
}
void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
void Sema::StartOpenMPDSABlock(OpenMPDirectiveKind DKind,
const DeclarationNameInfo &DirName,
Scope *CurScope, SourceLocation Loc) {
DSAStack->push(DKind, DirName, CurScope, Loc);
PushExpressionEvaluationContext(
ExpressionEvaluationContext::PotentiallyEvaluated);
}
void Sema::StartOpenMPClause(OpenMPClauseKind K) {
DSAStack->setClauseParsingMode(K);
}
void Sema::EndOpenMPClause() {
DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
}
void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
// OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
// A variable of class type (or array thereof) that appears in a lastprivate
// clause requires an accessible, unambiguous default constructor for the
// class type, unless the list item is also specified in a firstprivate
// clause.
if (auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
for (auto *C : D->clauses()) {
if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
SmallVector<Expr *, 8> PrivateCopies;
for (auto *DE : Clause->varlists()) {
if (DE->isValueDependent() || DE->isTypeDependent()) {
PrivateCopies.push_back(nullptr);
continue;
}
auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
VarDecl *VD = cast<VarDecl>(DRE->getDecl());
QualType Type = VD->getType().getNonReferenceType();
auto DVar = DSAStack->getTopDSA(VD, false);
if (DVar.CKind == OMPC_lastprivate) {
// Generate helper private variable and initialize it with the
// default value. The address of the original variable is replaced
// by the address of the new private variable in CodeGen. This new
// variable is not added to IdResolver, so the code in the OpenMP
// region uses original variable for proper diagnostics.
auto *VDPrivate = buildVarDecl(
*this, DE->getExprLoc(), Type.getUnqualifiedType(),
VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr);
ActOnUninitializedDecl(VDPrivate);
if (VDPrivate->isInvalidDecl())
continue;
PrivateCopies.push_back(buildDeclRefExpr(
*this, VDPrivate, DE->getType(), DE->getExprLoc()));
} else {
// The variable is also a firstprivate, so initialization sequence
// for private copy is generated already.
PrivateCopies.push_back(nullptr);
}
}
// Set initializers to private copies if no errors were found.
if (PrivateCopies.size() == Clause->varlist_size())
Clause->setPrivateCopies(PrivateCopies);
}
}
}
DSAStack->pop();
DiscardCleanupsInEvaluationContext();
PopExpressionEvaluationContext();
}
static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
Expr *NumIterations, Sema &SemaRef,
Scope *S, DSAStackTy *Stack);
namespace {
class VarDeclFilterCCC : public CorrectionCandidateCallback {
private:
Sema &SemaRef;
public:
explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
bool ValidateCandidate(const TypoCorrection &Candidate) override {
NamedDecl *ND = Candidate.getCorrectionDecl();
if (auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
return VD->hasGlobalStorage() &&
SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
SemaRef.getCurScope());
}
return false;
}
};
class VarOrFuncDeclFilterCCC : public CorrectionCandidateCallback {
private:
Sema &SemaRef;
public:
explicit VarOrFuncDeclFilterCCC(Sema &S) : SemaRef(S) {}
bool ValidateCandidate(const TypoCorrection &Candidate) override {
NamedDecl *ND = Candidate.getCorrectionDecl();
if (ND && (isa<VarDecl>(ND) || isa<FunctionDecl>(ND))) {
return SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
SemaRef.getCurScope());
}
return false;
}
};
} // namespace
ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id) {
LookupResult Lookup(*this, Id, LookupOrdinaryName);
LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
if (Lookup.isAmbiguous())
return ExprError();
VarDecl *VD;
if (!Lookup.isSingleResult()) {
if (TypoCorrection Corrected = CorrectTypo(
Id, LookupOrdinaryName, CurScope, nullptr,
llvm::make_unique<VarDeclFilterCCC>(*this), CTK_ErrorRecovery)) {
diagnoseTypo(Corrected,
PDiag(Lookup.empty()
? diag::err_undeclared_var_use_suggest
: diag::err_omp_expected_var_arg_suggest)
<< Id.getName());
VD = Corrected.getCorrectionDeclAs<VarDecl>();
} else {
Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
: diag::err_omp_expected_var_arg)
<< Id.getName();
return ExprError();
}
} else {
if (!(VD = Lookup.getAsSingle<VarDecl>())) {
Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
return ExprError();
}
}
Lookup.suppressDiagnostics();
// OpenMP [2.9.2, Syntax, C/C++]
// Variables must be file-scope, namespace-scope, or static block-scope.
if (!VD->hasGlobalStorage()) {
Diag(Id.getLoc(), diag::err_omp_global_var_arg)
<< getOpenMPDirectiveName(OMPD_threadprivate) << !VD->isStaticLocal();
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
return ExprError();
}
VarDecl *CanonicalVD = VD->getCanonicalDecl();
NamedDecl *ND = cast<NamedDecl>(CanonicalVD);
// OpenMP [2.9.2, Restrictions, C/C++, p.2]
// A threadprivate directive for file-scope variables must appear outside
// any definition or declaration.
if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
!getCurLexicalContext()->isTranslationUnit()) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
<< getOpenMPDirectiveName(OMPD_threadprivate) << VD;
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
return ExprError();
}
// OpenMP [2.9.2, Restrictions, C/C++, p.3]
// A threadprivate directive for static class member variables must appear
// in the class definition, in the same scope in which the member
// variables are declared.
if (CanonicalVD->isStaticDataMember() &&
!CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
<< getOpenMPDirectiveName(OMPD_threadprivate) << VD;
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
return ExprError();
}
// OpenMP [2.9.2, Restrictions, C/C++, p.4]
// A threadprivate directive for namespace-scope variables must appear
// outside any definition or declaration other than the namespace
// definition itself.
if (CanonicalVD->getDeclContext()->isNamespace() &&
(!getCurLexicalContext()->isFileContext() ||
!getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
<< getOpenMPDirectiveName(OMPD_threadprivate) << VD;
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
return ExprError();
}
// OpenMP [2.9.2, Restrictions, C/C++, p.6]
// A threadprivate directive for static block-scope variables must appear
// in the scope of the variable and not in a nested scope.
if (CanonicalVD->isStaticLocal() && CurScope &&
!isDeclInScope(ND, getCurLexicalContext(), CurScope)) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
<< getOpenMPDirectiveName(OMPD_threadprivate) << VD;
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
return ExprError();
}
// OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
// A threadprivate directive must lexically precede all references to any
// of the variables in its list.
if (VD->isUsed() && !DSAStack->isThreadPrivate(VD)) {
Diag(Id.getLoc(), diag::err_omp_var_used)
<< getOpenMPDirectiveName(OMPD_threadprivate) << VD;
return ExprError();
}
QualType ExprType = VD->getType().getNonReferenceType();
return DeclRefExpr::Create(Context, NestedNameSpecifierLoc(),
SourceLocation(), VD,
/*RefersToEnclosingVariableOrCapture=*/false,
Id.getLoc(), ExprType, VK_LValue);
}
Sema::DeclGroupPtrTy
Sema::ActOnOpenMPThreadprivateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList) {
if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) {
CurContext->addDecl(D);
return DeclGroupPtrTy::make(DeclGroupRef(D));
}
return nullptr;
}
namespace {
class LocalVarRefChecker : public ConstStmtVisitor<LocalVarRefChecker, bool> {
Sema &SemaRef;
public:
bool VisitDeclRefExpr(const DeclRefExpr *E) {
if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
if (VD->hasLocalStorage()) {
SemaRef.Diag(E->getLocStart(),
diag::err_omp_local_var_in_threadprivate_init)
<< E->getSourceRange();
SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
<< VD << VD->getSourceRange();
return true;
}
}
return false;
}
bool VisitStmt(const Stmt *S) {
for (auto Child : S->children()) {
if (Child && Visit(Child))
return true;
}
return false;
}
explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {}
};
} // namespace
OMPThreadPrivateDecl *
Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
SmallVector<Expr *, 8> Vars;
for (auto &RefExpr : VarList) {
DeclRefExpr *DE = cast<DeclRefExpr>(RefExpr);
VarDecl *VD = cast<VarDecl>(DE->getDecl());
SourceLocation ILoc = DE->getExprLoc();
// Mark variable as used.
VD->setReferenced();
VD->markUsed(Context);
QualType QType = VD->getType();
if (QType->isDependentType() || QType->isInstantiationDependentType()) {
// It will be analyzed later.
Vars.push_back(DE);
continue;
}
// OpenMP [2.9.2, Restrictions, C/C++, p.10]
// A threadprivate variable must not have an incomplete type.
if (RequireCompleteType(ILoc, VD->getType(),
diag::err_omp_threadprivate_incomplete_type)) {
continue;
}
// OpenMP [2.9.2, Restrictions, C/C++, p.10]
// A threadprivate variable must not have a reference type.
if (VD->getType()->isReferenceType()) {
Diag(ILoc, diag::err_omp_ref_type_arg)
<< getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType();
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
continue;
}
// Check if this is a TLS variable. If TLS is not being supported, produce
// the corresponding diagnostic.
if ((VD->getTLSKind() != VarDecl::TLS_None &&
!(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
getLangOpts().OpenMPUseTLS &&
getASTContext().getTargetInfo().isTLSSupported())) ||
(VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
!VD->isLocalVarDecl())) {
Diag(ILoc, diag::err_omp_var_thread_local)
<< VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
continue;
}
// Check if initial value of threadprivate variable reference variable with
// local storage (it is not supported by runtime).
if (auto Init = VD->getAnyInitializer()) {
LocalVarRefChecker Checker(*this);
if (Checker.Visit(Init))
continue;
}
Vars.push_back(RefExpr);
DSAStack->addDSA(VD, DE, OMPC_threadprivate);
VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
Context, SourceRange(Loc, Loc)));
if (auto *ML = Context.getASTMutationListener())
ML->DeclarationMarkedOpenMPThreadPrivate(VD);
}
OMPThreadPrivateDecl *D = nullptr;
if (!Vars.empty()) {
D = OMPThreadPrivateDecl::Create(Context, getCurLexicalContext(), Loc,
Vars);
D->setAccess(AS_public);
}
return D;
}
static void ReportOriginalDSA(Sema &SemaRef, DSAStackTy *Stack,
const ValueDecl *D, DSAStackTy::DSAVarData DVar,
bool IsLoopIterVar = false) {
if (DVar.RefExpr) {
SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
<< getOpenMPClauseName(DVar.CKind);
return;
}
enum {
PDSA_StaticMemberShared,
PDSA_StaticLocalVarShared,
PDSA_LoopIterVarPrivate,
PDSA_LoopIterVarLinear,
PDSA_LoopIterVarLastprivate,
PDSA_ConstVarShared,
PDSA_GlobalVarShared,
PDSA_TaskVarFirstprivate,
PDSA_LocalVarPrivate,
PDSA_Implicit
} Reason = PDSA_Implicit;
bool ReportHint = false;
auto ReportLoc = D->getLocation();
auto *VD = dyn_cast<VarDecl>(D);
if (IsLoopIterVar) {
if (DVar.CKind == OMPC_private)
Reason = PDSA_LoopIterVarPrivate;
else if (DVar.CKind == OMPC_lastprivate)
Reason = PDSA_LoopIterVarLastprivate;
else
Reason = PDSA_LoopIterVarLinear;
} else if (isOpenMPTaskingDirective(DVar.DKind) &&
DVar.CKind == OMPC_firstprivate) {
Reason = PDSA_TaskVarFirstprivate;
ReportLoc = DVar.ImplicitDSALoc;
} else if (VD && VD->isStaticLocal())
Reason = PDSA_StaticLocalVarShared;
else if (VD && VD->isStaticDataMember())
Reason = PDSA_StaticMemberShared;
else if (VD && VD->isFileVarDecl())
Reason = PDSA_GlobalVarShared;
else if (D->getType().isConstant(SemaRef.getASTContext()))
Reason = PDSA_ConstVarShared;
else if (VD && VD->isLocalVarDecl() && DVar.CKind == OMPC_private) {
ReportHint = true;
Reason = PDSA_LocalVarPrivate;
}
if (Reason != PDSA_Implicit) {
SemaRef.Diag(ReportLoc, diag::note_omp_predetermined_dsa)
<< Reason << ReportHint
<< getOpenMPDirectiveName(Stack->getCurrentDirective());
} else if (DVar.ImplicitDSALoc.isValid()) {
SemaRef.Diag(DVar.ImplicitDSALoc, diag::note_omp_implicit_dsa)
<< getOpenMPClauseName(DVar.CKind);
}
}
namespace {
class DSAAttrChecker : public StmtVisitor<DSAAttrChecker, void> {
DSAStackTy *Stack;
Sema &SemaRef;
bool ErrorFound;
CapturedStmt *CS;
llvm::SmallVector<Expr *, 8> ImplicitFirstprivate;
llvm::SmallVector<Expr *, 8> ImplicitMap;
llvm::DenseMap<ValueDecl *, Expr *> VarsWithInheritedDSA;
llvm::DenseSet<ValueDecl *> ImplicitDeclarations;
public:
void VisitDeclRefExpr(DeclRefExpr *E) {
if (E->isTypeDependent() || E->isValueDependent() ||
E->containsUnexpandedParameterPack() || E->isInstantiationDependent())
return;
if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
VD = VD->getCanonicalDecl();
// Skip internally declared variables.
if (VD->hasLocalStorage() && !CS->capturesVariable(VD))
return;
auto DVar = Stack->getTopDSA(VD, false);
// Check if the variable has explicit DSA set and stop analysis if it so.
if (DVar.RefExpr || !ImplicitDeclarations.insert(VD).second)
return;
// Skip internally declared static variables.
if (VD->hasGlobalStorage() && !CS->capturesVariable(VD))
return;
auto ELoc = E->getExprLoc();
auto DKind = Stack->getCurrentDirective();
// The default(none) clause requires that each variable that is referenced
// in the construct, and does not have a predetermined data-sharing
// attribute, must have its data-sharing attribute explicitly determined
// by being listed in a data-sharing attribute clause.
if (DVar.CKind == OMPC_unknown && Stack->getDefaultDSA() == DSA_none &&
isParallelOrTaskRegion(DKind) &&
VarsWithInheritedDSA.count(VD) == 0) {
VarsWithInheritedDSA[VD] = E;
return;
}
if (isOpenMPTargetExecutionDirective(DKind) &&
!Stack->isLoopControlVariable(VD).first) {
if (!Stack->checkMappableExprComponentListsForDecl(
VD, /*CurrentRegionOnly=*/true,
[](OMPClauseMappableExprCommon::MappableExprComponentListRef
StackComponents,
OpenMPClauseKind) {
// Variable is used if it has been marked as an array, array
// section or the variable iself.
return StackComponents.size() == 1 ||
std::all_of(
std::next(StackComponents.rbegin()),
StackComponents.rend(),
[](const OMPClauseMappableExprCommon::
MappableComponent &MC) {
return MC.getAssociatedDeclaration() ==
nullptr &&
(isa<OMPArraySectionExpr>(
MC.getAssociatedExpression()) ||
isa<ArraySubscriptExpr>(
MC.getAssociatedExpression()));
});
})) {
bool IsFirstprivate = false;
// By default lambdas are captured as firstprivates.
if (const auto *RD =
VD->getType().getNonReferenceType()->getAsCXXRecordDecl())
IsFirstprivate = RD->isLambda();
IsFirstprivate =
IsFirstprivate ||
(VD->getType().getNonReferenceType()->isScalarType() &&
Stack->getDefaultDMA() != DMA_tofrom_scalar);
if (IsFirstprivate)
ImplicitFirstprivate.emplace_back(E);
else
ImplicitMap.emplace_back(E);
return;
}
}
// OpenMP [2.9.3.6, Restrictions, p.2]
// A list item that appears in a reduction clause of the innermost
// enclosing worksharing or parallel construct may not be accessed in an
// explicit task.
DVar = Stack->hasInnermostDSA(
VD, [](OpenMPClauseKind C) -> bool { return C == OMPC_reduction; },
[](OpenMPDirectiveKind K) -> bool {
return isOpenMPParallelDirective(K) ||
isOpenMPWorksharingDirective(K) || isOpenMPTeamsDirective(K);
},
/*FromParent=*/true);
if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
ErrorFound = true;
SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
ReportOriginalDSA(SemaRef, Stack, VD, DVar);
return;
}
// Define implicit data-sharing attributes for task.
DVar = Stack->getImplicitDSA(VD, false);
if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
!Stack->isLoopControlVariable(VD).first)
ImplicitFirstprivate.push_back(E);
}
}
void VisitMemberExpr(MemberExpr *E) {
if (E->isTypeDependent() || E->isValueDependent() ||
E->containsUnexpandedParameterPack() || E->isInstantiationDependent())
return;
auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
if (isa<CXXThisExpr>(E->getBase()->IgnoreParens())) {
if (!FD)
return;
auto DVar = Stack->getTopDSA(FD, false);
// Check if the variable has explicit DSA set and stop analysis if it
// so.
if (DVar.RefExpr || !ImplicitDeclarations.insert(FD).second)
return;
if (isOpenMPTargetExecutionDirective(DKind) &&
!Stack->isLoopControlVariable(FD).first &&
!Stack->checkMappableExprComponentListsForDecl(
FD, /*CurrentRegionOnly=*/true,
[](OMPClauseMappableExprCommon::MappableExprComponentListRef
StackComponents,
OpenMPClauseKind) {
return isa<CXXThisExpr>(
cast<MemberExpr>(
StackComponents.back().getAssociatedExpression())
->getBase()
->IgnoreParens());
})) {
// OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
// A bit-field cannot appear in a map clause.
//
if (FD->isBitField())
return;
ImplicitMap.emplace_back(E);
return;
}
auto ELoc = E->getExprLoc();
// OpenMP [2.9.3.6, Restrictions, p.2]
// A list item that appears in a reduction clause of the innermost
// enclosing worksharing or parallel construct may not be accessed in
// an explicit task.
DVar = Stack->hasInnermostDSA(
FD, [](OpenMPClauseKind C) -> bool { return C == OMPC_reduction; },
[](OpenMPDirectiveKind K) -> bool {
return isOpenMPParallelDirective(K) ||
isOpenMPWorksharingDirective(K) || isOpenMPTeamsDirective(K);
},
/*FromParent=*/true);
if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
ErrorFound = true;
SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
ReportOriginalDSA(SemaRef, Stack, FD, DVar);
return;
}
// Define implicit data-sharing attributes for task.
DVar = Stack->getImplicitDSA(FD, false);
if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
!Stack->isLoopControlVariable(FD).first)
ImplicitFirstprivate.push_back(E);
return;
}
if (isOpenMPTargetExecutionDirective(DKind)) {
OMPClauseMappableExprCommon::MappableExprComponentList CurComponents;
if (!CheckMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
/*NoDiagnose=*/true))
return;
auto *VD = cast<ValueDecl>(
CurComponents.back().getAssociatedDeclaration()->getCanonicalDecl());
if (!Stack->checkMappableExprComponentListsForDecl(
VD, /*CurrentRegionOnly=*/true,
[&CurComponents](
OMPClauseMappableExprCommon::MappableExprComponentListRef
StackComponents,
OpenMPClauseKind) {
auto CCI = CurComponents.rbegin();
auto CCE = CurComponents.rend();
for (const auto &SC : llvm::reverse(StackComponents)) {
// Do both expressions have the same kind?
if (CCI->getAssociatedExpression()->getStmtClass() !=
SC.getAssociatedExpression()->getStmtClass())
if (!(isa<OMPArraySectionExpr>(
SC.getAssociatedExpression()) &&
isa<ArraySubscriptExpr>(
CCI->getAssociatedExpression())))
return false;
Decl *CCD = CCI->getAssociatedDeclaration();
Decl *SCD = SC.getAssociatedDeclaration();
CCD = CCD ? CCD->getCanonicalDecl() : nullptr;
SCD = SCD ? SCD->getCanonicalDecl() : nullptr;
if (SCD != CCD)
return false;
std::advance(CCI, 1);
if (CCI == CCE)
break;
}
return true;
})) {
Visit(E->getBase());
}
} else
Visit(E->getBase());
}
void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
for (auto *C : S->clauses()) {
// Skip analysis of arguments of implicitly defined firstprivate clause
// for task|target directives.
// Skip analysis of arguments of implicitly defined map clause for target
// directives.
if (C && !((isa<OMPFirstprivateClause>(C) || isa<OMPMapClause>(C)) &&
C->isImplicit())) {
for (auto *CC : C->children()) {
if (CC)
Visit(CC);
}
}
}
}
void VisitStmt(Stmt *S) {
for (auto *C : S->children()) {
if (C && !isa<OMPExecutableDirective>(C))
Visit(C);
}
}
bool isErrorFound() { return ErrorFound; }
ArrayRef<Expr *> getImplicitFirstprivate() const {
return ImplicitFirstprivate;
}
ArrayRef<Expr *> getImplicitMap() const { return ImplicitMap; }
llvm::DenseMap<ValueDecl *, Expr *> &getVarsWithInheritedDSA() {
return VarsWithInheritedDSA;
}
DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS)
: Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {}
};
} // namespace
void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
switch (DKind) {
case OMPD_parallel:
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
case OMPD_parallel_sections:
case OMPD_teams:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
Params);
break;
}
case OMPD_target_teams:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd: {
Sema::CapturedParamNameType ParamsTarget[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
// Start a captured region for 'target' with no implicit parameters.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
ParamsTarget);
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
Sema::CapturedParamNameType ParamsTeamsOrParallel[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
// Start a captured region for 'teams' or 'parallel'. Both regions have
// the same implicit parameters.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
ParamsTeamsOrParallel);
break;
}
case OMPD_simd:
case OMPD_for:
case OMPD_for_simd:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
case OMPD_critical:
case OMPD_taskgroup:
case OMPD_distribute:
case OMPD_distribute_simd:
case OMPD_ordered:
case OMPD_atomic:
case OMPD_target_data:
case OMPD_target:
case OMPD_target_simd: {
Sema::CapturedParamNameType Params[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
Params);
break;
}
case OMPD_task: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
QualType Args[] = {Context.VoidPtrTy.withConst().withRestrict()};
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = true;
QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32Ty),
std::make_pair(".part_id.", Context.getPointerType(KmpInt32Ty)),
std::make_pair(".privates.", Context.VoidPtrTy.withConst()),
std::make_pair(".copy_fn.",
Context.getPointerType(CopyFnType).withConst()),
std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
Params);
// Mark this captured region as inlined, because we don't use outlined
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
Context, AlwaysInlineAttr::Keyword_forceinline, SourceRange()));
break;
}
case OMPD_taskloop:
case OMPD_taskloop_simd: {
QualType KmpInt32Ty =
Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
QualType KmpUInt64Ty =
Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
QualType KmpInt64Ty =
Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
QualType Args[] = {Context.VoidPtrTy.withConst().withRestrict()};
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = true;
QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32Ty),
std::make_pair(".part_id.", Context.getPointerType(KmpInt32Ty)),
std::make_pair(".privates.",
Context.VoidPtrTy.withConst().withRestrict()),
std::make_pair(
".copy_fn.",
Context.getPointerType(CopyFnType).withConst().withRestrict()),
std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
std::make_pair(".lb.", KmpUInt64Ty),
std::make_pair(".ub.", KmpUInt64Ty), std::make_pair(".st.", KmpInt64Ty),
std::make_pair(".liter.", KmpInt32Ty),
std::make_pair(".reductions.",
Context.VoidPtrTy.withConst().withRestrict()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
Params);
// Mark this captured region as inlined, because we don't use outlined
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
Context, AlwaysInlineAttr::Keyword_forceinline, SourceRange()));
break;
}
case OMPD_distribute_parallel_for_simd:
case OMPD_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
std::make_pair(".previous.lb.", Context.getSizeType()),
std::make_pair(".previous.ub.", Context.getSizeType()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
Params);
break;
}
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
Sema::CapturedParamNameType ParamsTeams[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
// Start a captured region for 'target' with no implicit parameters.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
ParamsTeams);
Sema::CapturedParamNameType ParamsParallel[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
std::make_pair(".previous.lb.", Context.getSizeType()),
std::make_pair(".previous.ub.", Context.getSizeType()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
// Start a captured region for 'teams' or 'parallel'. Both regions have
// the same implicit parameters.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
ParamsParallel);
break;
}
case OMPD_target_update:
case OMPD_target_enter_data:
case OMPD_target_exit_data: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
QualType Args[] = {Context.VoidPtrTy.withConst().withRestrict()};
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = true;
QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32Ty),
std::make_pair(".part_id.", Context.getPointerType(KmpInt32Ty)),
std::make_pair(".privates.", Context.VoidPtrTy.withConst()),
std::make_pair(".copy_fn.",
Context.getPointerType(CopyFnType).withConst()),
std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
Params);
// Mark this captured region as inlined, because we don't use outlined
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
Context, AlwaysInlineAttr::Keyword_forceinline, SourceRange()));
break;
}
case OMPD_threadprivate:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_cancel:
case OMPD_flush:
case OMPD_declare_reduction:
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
llvm_unreachable("OpenMP Directive is not allowed");
case OMPD_unknown:
llvm_unreachable("Unknown OpenMP directive");
}
}
int Sema::getOpenMPCaptureLevels(OpenMPDirectiveKind DKind) {
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, DKind);
return CaptureRegions.size();
}
static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
Expr *CaptureExpr, bool WithInit,
bool AsExpression) {
assert(CaptureExpr);
ASTContext &C = S.getASTContext();
Expr *Init = AsExpression ? CaptureExpr : CaptureExpr->IgnoreImpCasts();
QualType Ty = Init->getType();
if (CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue()) {
if (S.getLangOpts().CPlusPlus) {
Ty = C.getLValueReferenceType(Ty);
} else {
Ty = C.getPointerType(Ty);
ExprResult Res =
S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_AddrOf, Init);
if (!Res.isUsable())
return nullptr;
Init = Res.get();
}
WithInit = true;
}
auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
CaptureExpr->getLocStart());
if (!WithInit)
CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C, SourceRange()));
S.CurContext->addHiddenDecl(CED);
S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
return CED;
}
static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
bool WithInit) {
OMPCapturedExprDecl *CD;
if (auto *VD = S.IsOpenMPCapturedDecl(D)) {
CD = cast<OMPCapturedExprDecl>(VD);
} else {
CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
/*AsExpression=*/false);
}
return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
CaptureExpr->getExprLoc());
}
static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref) {
CaptureExpr = S.DefaultLvalueConversion(CaptureExpr).get();
if (!Ref) {
OMPCapturedExprDecl *CD = buildCaptureDecl(
S, &S.getASTContext().Idents.get(".capture_expr."), CaptureExpr,
/*WithInit=*/true, /*AsExpression=*/true);
Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
CaptureExpr->getExprLoc());
}
ExprResult Res = Ref;
if (!S.getLangOpts().CPlusPlus &&
CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue() &&
Ref->getType()->isPointerType()) {
Res = S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_Deref, Ref);
if (!Res.isUsable())
return ExprError();
}
return S.DefaultLvalueConversion(Res.get());
}
namespace {
// OpenMP directives parsed in this section are represented as a
// CapturedStatement with an associated statement. If a syntax error
// is detected during the parsing of the associated statement, the
// compiler must abort processing and close the CapturedStatement.
//
// Combined directives such as 'target parallel' have more than one
// nested CapturedStatements. This RAII ensures that we unwind out
// of all the nested CapturedStatements when an error is found.
class CaptureRegionUnwinderRAII {
private:
Sema &S;
bool &ErrorFound;
OpenMPDirectiveKind DKind;
public:
CaptureRegionUnwinderRAII(Sema &S, bool &ErrorFound,
OpenMPDirectiveKind DKind)
: S(S), ErrorFound(ErrorFound), DKind(DKind) {}
~CaptureRegionUnwinderRAII() {
if (ErrorFound) {
int ThisCaptureLevel = S.getOpenMPCaptureLevels(DKind);
while (--ThisCaptureLevel >= 0)
S.ActOnCapturedRegionError();
}
}
};
} // namespace
StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
ArrayRef<OMPClause *> Clauses) {
bool ErrorFound = false;
CaptureRegionUnwinderRAII CaptureRegionUnwinder(
*this, ErrorFound, DSAStack->getCurrentDirective());
if (!S.isUsable()) {
ErrorFound = true;
return StmtError();
}
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, DSAStack->getCurrentDirective());
OMPOrderedClause *OC = nullptr;
OMPScheduleClause *SC = nullptr;
SmallVector<OMPLinearClause *, 4> LCs;
SmallVector<OMPClauseWithPreInit *, 8> PICs;
// This is required for proper codegen.
for (auto *Clause : Clauses) {
if (isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
Clause->getClauseKind() == OMPC_in_reduction) {
// Capture taskgroup task_reduction descriptors inside the tasking regions
// with the corresponding in_reduction items.
auto *IRC = cast<OMPInReductionClause>(Clause);
for (auto *E : IRC->taskgroup_descriptors())
if (E)
MarkDeclarationsReferencedInExpr(E);
}
if (isOpenMPPrivate(Clause->getClauseKind()) ||
Clause->getClauseKind() == OMPC_copyprivate ||
(getLangOpts().OpenMPUseTLS &&
getASTContext().getTargetInfo().isTLSSupported() &&
Clause->getClauseKind() == OMPC_copyin)) {
DSAStack->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin);
// Mark all variables in private list clauses as used in inner region.
for (auto *VarRef : Clause->children()) {
if (auto *E = cast_or_null<Expr>(VarRef)) {
MarkDeclarationsReferencedInExpr(E);
}
}
DSAStack->setForceVarCapturing(/*V=*/false);
} else if (CaptureRegions.size() > 1 ||
CaptureRegions.back() != OMPD_unknown) {
if (auto *C = OMPClauseWithPreInit::get(Clause))
PICs.push_back(C);
if (auto *C = OMPClauseWithPostUpdate::get(Clause)) {
if (auto *E = C->getPostUpdateExpr())
MarkDeclarationsReferencedInExpr(E);
}
}
if (Clause->getClauseKind() == OMPC_schedule)
SC = cast<OMPScheduleClause>(Clause);
else if (Clause->getClauseKind() == OMPC_ordered)
OC = cast<OMPOrderedClause>(Clause);
else if (Clause->getClauseKind() == OMPC_linear)
LCs.push_back(cast<OMPLinearClause>(Clause));
}
// OpenMP, 2.7.1 Loop Construct, Restrictions
// The nonmonotonic modifier cannot be specified if an ordered clause is
// specified.
if (SC &&
(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
SC->getSecondScheduleModifier() ==
OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
OC) {
Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
? SC->getFirstScheduleModifierLoc()
: SC->getSecondScheduleModifierLoc(),
diag::err_omp_schedule_nonmonotonic_ordered)
<< SourceRange(OC->getLocStart(), OC->getLocEnd());
ErrorFound = true;
}
if (!LCs.empty() && OC && OC->getNumForLoops()) {
for (auto *C : LCs) {
Diag(C->getLocStart(), diag::err_omp_linear_ordered)
<< SourceRange(OC->getLocStart(), OC->getLocEnd());
}
ErrorFound = true;
}
if (isOpenMPWorksharingDirective(DSAStack->getCurrentDirective()) &&
isOpenMPSimdDirective(DSAStack->getCurrentDirective()) && OC &&
OC->getNumForLoops()) {
Diag(OC->getLocStart(), diag::err_omp_ordered_simd)
<< getOpenMPDirectiveName(DSAStack->getCurrentDirective());
ErrorFound = true;
}
if (ErrorFound) {
return StmtError();
}
StmtResult SR = S;
for (OpenMPDirectiveKind ThisCaptureRegion : llvm::reverse(CaptureRegions)) {
// Mark all variables in private list clauses as used in inner region.
// Required for proper codegen of combined directives.
// TODO: add processing for other clauses.
if (ThisCaptureRegion != OMPD_unknown) {
for (auto *C : PICs) {
OpenMPDirectiveKind CaptureRegion = C->getCaptureRegion();
// Find the particular capture region for the clause if the
// directive is a combined one with multiple capture regions.
// If the directive is not a combined one, the capture region
// associated with the clause is OMPD_unknown and is generated
// only once.
if (CaptureRegion == ThisCaptureRegion ||
CaptureRegion == OMPD_unknown) {
if (auto *DS = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
for (auto *D : DS->decls())
MarkVariableReferenced(D->getLocation(), cast<VarDecl>(D));
}
}
}
}
SR = ActOnCapturedRegionEnd(SR.get());
}
return SR;
}
static bool checkCancelRegion(Sema &SemaRef, OpenMPDirectiveKind CurrentRegion,
OpenMPDirectiveKind CancelRegion,
SourceLocation StartLoc) {
// CancelRegion is only needed for cancel and cancellation_point.
if (CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_cancellation_point)
return false;
if (CancelRegion == OMPD_parallel || CancelRegion == OMPD_for ||
CancelRegion == OMPD_sections || CancelRegion == OMPD_taskgroup)
return false;
SemaRef.Diag(StartLoc, diag::err_omp_wrong_cancel_region)
<< getOpenMPDirectiveName(CancelRegion);
return true;
}
static bool checkNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
OpenMPDirectiveKind CurrentRegion,
const DeclarationNameInfo &CurrentName,
OpenMPDirectiveKind CancelRegion,
SourceLocation StartLoc) {
if (Stack->getCurScope()) {
auto ParentRegion = Stack->getParentDirective();
auto OffendingRegion = ParentRegion;
bool NestingProhibited = false;
bool CloseNesting = true;
bool OrphanSeen = false;
enum {
NoRecommend,
ShouldBeInParallelRegion,
ShouldBeInOrderedRegion,
ShouldBeInTargetRegion,
ShouldBeInTeamsRegion
} Recommend = NoRecommend;
if (isOpenMPSimdDirective(ParentRegion) && CurrentRegion != OMPD_ordered) {
// OpenMP [2.16, Nesting of Regions]
// OpenMP constructs may not be nested inside a simd region.
// OpenMP [2.8.1,simd Construct, Restrictions]
// An ordered construct with the simd clause is the only OpenMP
// construct that can appear in the simd region.
// Allowing a SIMD construct nested in another SIMD construct is an