blob: c7e0d2aee036e0a1172fda3850a54f0f770d7857 [file] [log] [blame]
//===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file implements semantic analysis for OpenMP directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#include "TreeTransform.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/PointerEmbeddedInt.h"
using namespace clang;
//===----------------------------------------------------------------------===//
// Stack of data-sharing attributes for variables
//===----------------------------------------------------------------------===//
static const Expr *checkMapClauseExpressionBase(
Sema &SemaRef, Expr *E,
OMPClauseMappableExprCommon::MappableExprComponentList &CurComponents,
OpenMPClauseKind CKind, bool NoDiagnose);
namespace {
/// Default data sharing attributes, which can be applied to directive.
enum DefaultDataSharingAttributes {
DSA_unspecified = 0, /// Data sharing attribute not specified.
DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
};
/// Attributes of the defaultmap clause.
enum DefaultMapAttributes {
DMA_unspecified, /// Default mapping is not specified.
DMA_tofrom_scalar, /// Default mapping is 'tofrom:scalar'.
};
/// Stack for tracking declarations used in OpenMP directives and
/// clauses and their data-sharing attributes.
class DSAStackTy {
public:
struct DSAVarData {
OpenMPDirectiveKind DKind = OMPD_unknown;
OpenMPClauseKind CKind = OMPC_unknown;
const Expr *RefExpr = nullptr;
DeclRefExpr *PrivateCopy = nullptr;
SourceLocation ImplicitDSALoc;
DSAVarData() = default;
DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
const Expr *RefExpr, DeclRefExpr *PrivateCopy,
SourceLocation ImplicitDSALoc)
: DKind(DKind), CKind(CKind), RefExpr(RefExpr),
PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc) {}
};
using OperatorOffsetTy =
llvm::SmallVector<std::pair<Expr *, OverloadedOperatorKind>, 4>;
using DoacrossDependMapTy =
llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
private:
struct DSAInfo {
OpenMPClauseKind Attributes = OMPC_unknown;
/// Pointer to a reference expression and a flag which shows that the
/// variable is marked as lastprivate(true) or not (false).
llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
DeclRefExpr *PrivateCopy = nullptr;
};
using DeclSAMapTy = llvm::SmallDenseMap<const ValueDecl *, DSAInfo, 8>;
using AlignedMapTy = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 8>;
using LCDeclInfo = std::pair<unsigned, VarDecl *>;
using LoopControlVariablesMapTy =
llvm::SmallDenseMap<const ValueDecl *, LCDeclInfo, 8>;
/// Struct that associates a component with the clause kind where they are
/// found.
struct MappedExprComponentTy {
OMPClauseMappableExprCommon::MappableExprComponentLists Components;
OpenMPClauseKind Kind = OMPC_unknown;
};
using MappedExprComponentsTy =
llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
using CriticalsWithHintsTy =
llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
struct ReductionData {
using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
SourceRange ReductionRange;
llvm::PointerUnion<const Expr *, BOKPtrType> ReductionOp;
ReductionData() = default;
void set(BinaryOperatorKind BO, SourceRange RR) {
ReductionRange = RR;
ReductionOp = BO;
}
void set(const Expr *RefExpr, SourceRange RR) {
ReductionRange = RR;
ReductionOp = RefExpr;
}
};
using DeclReductionMapTy =
llvm::SmallDenseMap<const ValueDecl *, ReductionData, 4>;
struct SharingMapTy {
DeclSAMapTy SharingMap;
DeclReductionMapTy ReductionMap;
AlignedMapTy AlignedMap;
MappedExprComponentsTy MappedExprComponents;
LoopControlVariablesMapTy LCVMap;
DefaultDataSharingAttributes DefaultAttr = DSA_unspecified;
SourceLocation DefaultAttrLoc;
DefaultMapAttributes DefaultMapAttr = DMA_unspecified;
SourceLocation DefaultMapAttrLoc;
OpenMPDirectiveKind Directive = OMPD_unknown;
DeclarationNameInfo DirectiveName;
Scope *CurScope = nullptr;
SourceLocation ConstructLoc;
/// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
/// get the data (loop counters etc.) about enclosing loop-based construct.
/// This data is required during codegen.
DoacrossDependMapTy DoacrossDepends;
/// First argument (Expr *) contains optional argument of the
/// 'ordered' clause, the second one is true if the regions has 'ordered'
/// clause, false otherwise.
llvm::Optional<std::pair<const Expr *, OMPOrderedClause *>> OrderedRegion;
unsigned AssociatedLoops = 1;
bool HasMutipleLoops = false;
const Decl *PossiblyLoopCounter = nullptr;
bool NowaitRegion = false;
bool CancelRegion = false;
bool LoopStart = false;
bool BodyComplete = false;
SourceLocation InnerTeamsRegionLoc;
/// Reference to the taskgroup task_reduction reference expression.
Expr *TaskgroupReductionRef = nullptr;
llvm::DenseSet<QualType> MappedClassesQualTypes;
/// List of globals marked as declare target link in this target region
/// (isOpenMPTargetExecutionDirective(Directive) == true).
llvm::SmallVector<DeclRefExpr *, 4> DeclareTargetLinkVarDecls;
SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
Scope *CurScope, SourceLocation Loc)
: Directive(DKind), DirectiveName(Name), CurScope(CurScope),
ConstructLoc(Loc) {}
SharingMapTy() = default;
};
using StackTy = SmallVector<SharingMapTy, 4>;
/// Stack of used declaration and their data-sharing attributes.
DeclSAMapTy Threadprivates;
const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
SmallVector<std::pair<StackTy, const FunctionScopeInfo *>, 4> Stack;
/// true, if check for DSA must be from parent directive, false, if
/// from current directive.
OpenMPClauseKind ClauseKindMode = OMPC_unknown;
Sema &SemaRef;
bool ForceCapturing = false;
/// true if all the variables in the target executable directives must be
/// captured by reference.
bool ForceCaptureByReferenceInTargetExecutable = false;
CriticalsWithHintsTy Criticals;
unsigned IgnoredStackElements = 0;
/// Iterators over the stack iterate in order from innermost to outermost
/// directive.
using const_iterator = StackTy::const_reverse_iterator;
const_iterator begin() const {
return Stack.empty() ? const_iterator()
: Stack.back().first.rbegin() + IgnoredStackElements;
}
const_iterator end() const {
return Stack.empty() ? const_iterator() : Stack.back().first.rend();
}
using iterator = StackTy::reverse_iterator;
iterator begin() {
return Stack.empty() ? iterator()
: Stack.back().first.rbegin() + IgnoredStackElements;
}
iterator end() {
return Stack.empty() ? iterator() : Stack.back().first.rend();
}
// Convenience operations to get at the elements of the stack.
bool isStackEmpty() const {
return Stack.empty() ||
Stack.back().second != CurrentNonCapturingFunctionScope ||
Stack.back().first.size() <= IgnoredStackElements;
}
size_t getStackSize() const {
return isStackEmpty() ? 0
: Stack.back().first.size() - IgnoredStackElements;
}
SharingMapTy *getTopOfStackOrNull() {
size_t Size = getStackSize();
if (Size == 0)
return nullptr;
return &Stack.back().first[Size - 1];
}
const SharingMapTy *getTopOfStackOrNull() const {
return const_cast<DSAStackTy&>(*this).getTopOfStackOrNull();
}
SharingMapTy &getTopOfStack() {
assert(!isStackEmpty() && "no current directive");
return *getTopOfStackOrNull();
}
const SharingMapTy &getTopOfStack() const {
return const_cast<DSAStackTy&>(*this).getTopOfStack();
}
SharingMapTy *getSecondOnStackOrNull() {
size_t Size = getStackSize();
if (Size <= 1)
return nullptr;
return &Stack.back().first[Size - 2];
}
const SharingMapTy *getSecondOnStackOrNull() const {
return const_cast<DSAStackTy&>(*this).getSecondOnStackOrNull();
}
/// Get the stack element at a certain level (previously returned by
/// \c getNestingLevel).
///
/// Note that nesting levels count from outermost to innermost, and this is
/// the reverse of our iteration order where new inner levels are pushed at
/// the front of the stack.
SharingMapTy &getStackElemAtLevel(unsigned Level) {
assert(Level < getStackSize() && "no such stack element");
return Stack.back().first[Level];
}
const SharingMapTy &getStackElemAtLevel(unsigned Level) const {
return const_cast<DSAStackTy&>(*this).getStackElemAtLevel(Level);
}
DSAVarData getDSA(const_iterator &Iter, ValueDecl *D) const;
/// Checks if the variable is a local for OpenMP region.
bool isOpenMPLocal(VarDecl *D, const_iterator Iter) const;
/// Vector of previously declared requires directives
SmallVector<const OMPRequiresDecl *, 2> RequiresDecls;
/// omp_allocator_handle_t type.
QualType OMPAllocatorHandleT;
/// Expression for the predefined allocators.
Expr *OMPPredefinedAllocators[OMPAllocateDeclAttr::OMPUserDefinedMemAlloc] = {
nullptr};
/// Vector of previously encountered target directives
SmallVector<SourceLocation, 2> TargetLocations;
public:
explicit DSAStackTy(Sema &S) : SemaRef(S) {}
/// Sets omp_allocator_handle_t type.
void setOMPAllocatorHandleT(QualType Ty) { OMPAllocatorHandleT = Ty; }
/// Gets omp_allocator_handle_t type.
QualType getOMPAllocatorHandleT() const { return OMPAllocatorHandleT; }
/// Sets the given default allocator.
void setAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
Expr *Allocator) {
OMPPredefinedAllocators[AllocatorKind] = Allocator;
}
/// Returns the specified default allocator.
Expr *getAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind) const {
return OMPPredefinedAllocators[AllocatorKind];
}
bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
OpenMPClauseKind getClauseParsingMode() const {
assert(isClauseParsingMode() && "Must be in clause parsing mode.");
return ClauseKindMode;
}
void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
bool isBodyComplete() const {
const SharingMapTy *Top = getTopOfStackOrNull();
return Top && Top->BodyComplete;
}
void setBodyComplete() {
getTopOfStack().BodyComplete = true;
}
bool isForceVarCapturing() const { return ForceCapturing; }
void setForceVarCapturing(bool V) { ForceCapturing = V; }
void setForceCaptureByReferenceInTargetExecutable(bool V) {
ForceCaptureByReferenceInTargetExecutable = V;
}
bool isForceCaptureByReferenceInTargetExecutable() const {
return ForceCaptureByReferenceInTargetExecutable;
}
void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
Scope *CurScope, SourceLocation Loc) {
assert(!IgnoredStackElements &&
"cannot change stack while ignoring elements");
if (Stack.empty() ||
Stack.back().second != CurrentNonCapturingFunctionScope)
Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc);
Stack.back().first.back().DefaultAttrLoc = Loc;
}
void pop() {
assert(!IgnoredStackElements &&
"cannot change stack while ignoring elements");
assert(!Stack.back().first.empty() &&
"Data-sharing attributes stack is empty!");
Stack.back().first.pop_back();
}
/// RAII object to temporarily leave the scope of a directive when we want to
/// logically operate in its parent.
class ParentDirectiveScope {
DSAStackTy &Self;
bool Active;
public:
ParentDirectiveScope(DSAStackTy &Self, bool Activate)
: Self(Self), Active(false) {
if (Activate)
enable();
}
~ParentDirectiveScope() { disable(); }
void disable() {
if (Active) {
--Self.IgnoredStackElements;
Active = false;
}
}
void enable() {
if (!Active) {
++Self.IgnoredStackElements;
Active = true;
}
}
};
/// Marks that we're started loop parsing.
void loopInit() {
assert(isOpenMPLoopDirective(getCurrentDirective()) &&
"Expected loop-based directive.");
getTopOfStack().LoopStart = true;
}
/// Start capturing of the variables in the loop context.
void loopStart() {
assert(isOpenMPLoopDirective(getCurrentDirective()) &&
"Expected loop-based directive.");
getTopOfStack().LoopStart = false;
}
/// true, if variables are captured, false otherwise.
bool isLoopStarted() const {
assert(isOpenMPLoopDirective(getCurrentDirective()) &&
"Expected loop-based directive.");
return !getTopOfStack().LoopStart;
}
/// Marks (or clears) declaration as possibly loop counter.
void resetPossibleLoopCounter(const Decl *D = nullptr) {
getTopOfStack().PossiblyLoopCounter =
D ? D->getCanonicalDecl() : D;
}
/// Gets the possible loop counter decl.
const Decl *getPossiblyLoopCunter() const {
return getTopOfStack().PossiblyLoopCounter;
}
/// Start new OpenMP region stack in new non-capturing function.
void pushFunction() {
assert(!IgnoredStackElements &&
"cannot change stack while ignoring elements");
const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
assert(!isa<CapturingScopeInfo>(CurFnScope));
CurrentNonCapturingFunctionScope = CurFnScope;
}
/// Pop region stack for non-capturing function.
void popFunction(const FunctionScopeInfo *OldFSI) {
assert(!IgnoredStackElements &&
"cannot change stack while ignoring elements");
if (!Stack.empty() && Stack.back().second == OldFSI) {
assert(Stack.back().first.empty());
Stack.pop_back();
}
CurrentNonCapturingFunctionScope = nullptr;
for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) {
if (!isa<CapturingScopeInfo>(FSI)) {
CurrentNonCapturingFunctionScope = FSI;
break;
}
}
}
void addCriticalWithHint(const OMPCriticalDirective *D, llvm::APSInt Hint) {
Criticals.try_emplace(D->getDirectiveName().getAsString(), D, Hint);
}
const std::pair<const OMPCriticalDirective *, llvm::APSInt>
getCriticalWithHint(const DeclarationNameInfo &Name) const {
auto I = Criticals.find(Name.getAsString());
if (I != Criticals.end())
return I->second;
return std::make_pair(nullptr, llvm::APSInt());
}
/// If 'aligned' declaration for given variable \a D was not seen yet,
/// add it and return NULL; otherwise return previous occurrence's expression
/// for diagnostics.
const Expr *addUniqueAligned(const ValueDecl *D, const Expr *NewDE);
/// Register specified variable as loop control variable.
void addLoopControlVariable(const ValueDecl *D, VarDecl *Capture);
/// Check if the specified variable is a loop control variable for
/// current region.
/// \return The index of the loop control variable in the list of associated
/// for-loops (from outer to inner).
const LCDeclInfo isLoopControlVariable(const ValueDecl *D) const;
/// Check if the specified variable is a loop control variable for
/// parent region.
/// \return The index of the loop control variable in the list of associated
/// for-loops (from outer to inner).
const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
/// Get the loop control variable for the I-th loop (or nullptr) in
/// parent directive.
const ValueDecl *getParentLoopControlVariable(unsigned I) const;
/// Adds explicit data sharing attribute to the specified declaration.
void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
DeclRefExpr *PrivateCopy = nullptr);
/// Adds additional information for the reduction items with the reduction id
/// represented as an operator.
void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
BinaryOperatorKind BOK);
/// Adds additional information for the reduction items with the reduction id
/// represented as reduction identifier.
void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
const Expr *ReductionRef);
/// Returns the location and reduction operation from the innermost parent
/// region for the given \p D.
const DSAVarData
getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
BinaryOperatorKind &BOK,
Expr *&TaskgroupDescriptor) const;
/// Returns the location and reduction operation from the innermost parent
/// region for the given \p D.
const DSAVarData
getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
const Expr *&ReductionRef,
Expr *&TaskgroupDescriptor) const;
/// Return reduction reference expression for the current taskgroup.
Expr *getTaskgroupReductionRef() const {
assert(getTopOfStack().Directive == OMPD_taskgroup &&
"taskgroup reference expression requested for non taskgroup "
"directive.");
return getTopOfStack().TaskgroupReductionRef;
}
/// Checks if the given \p VD declaration is actually a taskgroup reduction
/// descriptor variable at the \p Level of OpenMP regions.
bool isTaskgroupReductionRef(const ValueDecl *VD, unsigned Level) const {
return getStackElemAtLevel(Level).TaskgroupReductionRef &&
cast<DeclRefExpr>(getStackElemAtLevel(Level).TaskgroupReductionRef)
->getDecl() == VD;
}
/// Returns data sharing attributes from top of the stack for the
/// specified declaration.
const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
/// Returns data-sharing attributes for the specified declaration.
const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
/// Checks if the specified variables has data-sharing attributes which
/// match specified \a CPred predicate in any directive which matches \a DPred
/// predicate.
const DSAVarData
hasDSA(ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
bool FromParent) const;
/// Checks if the specified variables has data-sharing attributes which
/// match specified \a CPred predicate in any innermost directive which
/// matches \a DPred predicate.
const DSAVarData
hasInnermostDSA(ValueDecl *D,
const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
bool FromParent) const;
/// Checks if the specified variables has explicit data-sharing
/// attributes which match specified \a CPred predicate at the specified
/// OpenMP region.
bool hasExplicitDSA(const ValueDecl *D,
const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
unsigned Level, bool NotLastprivate = false) const;
/// Returns true if the directive at level \Level matches in the
/// specified \a DPred predicate.
bool hasExplicitDirective(
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
unsigned Level) const;
/// Finds a directive which matches specified \a DPred predicate.
bool hasDirective(
const llvm::function_ref<bool(
OpenMPDirectiveKind, const DeclarationNameInfo &, SourceLocation)>
DPred,
bool FromParent) const;
/// Returns currently analyzed directive.
OpenMPDirectiveKind getCurrentDirective() const {
const SharingMapTy *Top = getTopOfStackOrNull();
return Top ? Top->Directive : OMPD_unknown;
}
/// Returns directive kind at specified level.
OpenMPDirectiveKind getDirective(unsigned Level) const {
assert(!isStackEmpty() && "No directive at specified level.");
return getStackElemAtLevel(Level).Directive;
}
/// Returns the capture region at the specified level.
OpenMPDirectiveKind getCaptureRegion(unsigned Level,
unsigned OpenMPCaptureLevel) const {
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, getDirective(Level));
return CaptureRegions[OpenMPCaptureLevel];
}
/// Returns parent directive.
OpenMPDirectiveKind getParentDirective() const {
const SharingMapTy *Parent = getSecondOnStackOrNull();
return Parent ? Parent->Directive : OMPD_unknown;
}
/// Add requires decl to internal vector
void addRequiresDecl(OMPRequiresDecl *RD) {
RequiresDecls.push_back(RD);
}
/// Checks if the defined 'requires' directive has specified type of clause.
template <typename ClauseType>
bool hasRequiresDeclWithClause() {
return llvm::any_of(RequiresDecls, [](const OMPRequiresDecl *D) {
return llvm::any_of(D->clauselists(), [](const OMPClause *C) {
return isa<ClauseType>(C);
});
});
}
/// Checks for a duplicate clause amongst previously declared requires
/// directives
bool hasDuplicateRequiresClause(ArrayRef<OMPClause *> ClauseList) const {
bool IsDuplicate = false;
for (OMPClause *CNew : ClauseList) {
for (const OMPRequiresDecl *D : RequiresDecls) {
for (const OMPClause *CPrev : D->clauselists()) {
if (CNew->getClauseKind() == CPrev->getClauseKind()) {
SemaRef.Diag(CNew->getBeginLoc(),
diag::err_omp_requires_clause_redeclaration)
<< getOpenMPClauseName(CNew->getClauseKind());
SemaRef.Diag(CPrev->getBeginLoc(),
diag::note_omp_requires_previous_clause)
<< getOpenMPClauseName(CPrev->getClauseKind());
IsDuplicate = true;
}
}
}
}
return IsDuplicate;
}
/// Add location of previously encountered target to internal vector
void addTargetDirLocation(SourceLocation LocStart) {
TargetLocations.push_back(LocStart);
}
// Return previously encountered target region locations.
ArrayRef<SourceLocation> getEncounteredTargetLocs() const {
return TargetLocations;
}
/// Set default data sharing attribute to none.
void setDefaultDSANone(SourceLocation Loc) {
getTopOfStack().DefaultAttr = DSA_none;
getTopOfStack().DefaultAttrLoc = Loc;
}
/// Set default data sharing attribute to shared.
void setDefaultDSAShared(SourceLocation Loc) {
getTopOfStack().DefaultAttr = DSA_shared;
getTopOfStack().DefaultAttrLoc = Loc;
}
/// Set default data mapping attribute to 'tofrom:scalar'.
void setDefaultDMAToFromScalar(SourceLocation Loc) {
getTopOfStack().DefaultMapAttr = DMA_tofrom_scalar;
getTopOfStack().DefaultMapAttrLoc = Loc;
}
DefaultDataSharingAttributes getDefaultDSA() const {
return isStackEmpty() ? DSA_unspecified
: getTopOfStack().DefaultAttr;
}
SourceLocation getDefaultDSALocation() const {
return isStackEmpty() ? SourceLocation()
: getTopOfStack().DefaultAttrLoc;
}
DefaultMapAttributes getDefaultDMA() const {
return isStackEmpty() ? DMA_unspecified
: getTopOfStack().DefaultMapAttr;
}
DefaultMapAttributes getDefaultDMAAtLevel(unsigned Level) const {
return getStackElemAtLevel(Level).DefaultMapAttr;
}
SourceLocation getDefaultDMALocation() const {
return isStackEmpty() ? SourceLocation()
: getTopOfStack().DefaultMapAttrLoc;
}
/// Checks if the specified variable is a threadprivate.
bool isThreadPrivate(VarDecl *D) {
const DSAVarData DVar = getTopDSA(D, false);
return isOpenMPThreadPrivate(DVar.CKind);
}
/// Marks current region as ordered (it has an 'ordered' clause).
void setOrderedRegion(bool IsOrdered, const Expr *Param,
OMPOrderedClause *Clause) {
if (IsOrdered)
getTopOfStack().OrderedRegion.emplace(Param, Clause);
else
getTopOfStack().OrderedRegion.reset();
}
/// Returns true, if region is ordered (has associated 'ordered' clause),
/// false - otherwise.
bool isOrderedRegion() const {
if (const SharingMapTy *Top = getTopOfStackOrNull())
return Top->OrderedRegion.hasValue();
return false;
}
/// Returns optional parameter for the ordered region.
std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
if (const SharingMapTy *Top = getTopOfStackOrNull())
if (Top->OrderedRegion.hasValue())
return Top->OrderedRegion.getValue();
return std::make_pair(nullptr, nullptr);
}
/// Returns true, if parent region is ordered (has associated
/// 'ordered' clause), false - otherwise.
bool isParentOrderedRegion() const {
if (const SharingMapTy *Parent = getSecondOnStackOrNull())
return Parent->OrderedRegion.hasValue();
return false;
}
/// Returns optional parameter for the ordered region.
std::pair<const Expr *, OMPOrderedClause *>
getParentOrderedRegionParam() const {
if (const SharingMapTy *Parent = getSecondOnStackOrNull())
if (Parent->OrderedRegion.hasValue())
return Parent->OrderedRegion.getValue();
return std::make_pair(nullptr, nullptr);
}
/// Marks current region as nowait (it has a 'nowait' clause).
void setNowaitRegion(bool IsNowait = true) {
getTopOfStack().NowaitRegion = IsNowait;
}
/// Returns true, if parent region is nowait (has associated
/// 'nowait' clause), false - otherwise.
bool isParentNowaitRegion() const {
if (const SharingMapTy *Parent = getSecondOnStackOrNull())
return Parent->NowaitRegion;
return false;
}
/// Marks parent region as cancel region.
void setParentCancelRegion(bool Cancel = true) {
if (SharingMapTy *Parent = getSecondOnStackOrNull())
Parent->CancelRegion |= Cancel;
}
/// Return true if current region has inner cancel construct.
bool isCancelRegion() const {
const SharingMapTy *Top = getTopOfStackOrNull();
return Top ? Top->CancelRegion : false;
}
/// Set collapse value for the region.
void setAssociatedLoops(unsigned Val) {
getTopOfStack().AssociatedLoops = Val;
if (Val > 1)
getTopOfStack().HasMutipleLoops = true;
}
/// Return collapse value for region.
unsigned getAssociatedLoops() const {
const SharingMapTy *Top = getTopOfStackOrNull();
return Top ? Top->AssociatedLoops : 0;
}
/// Returns true if the construct is associated with multiple loops.
bool hasMutipleLoops() const {
const SharingMapTy *Top = getTopOfStackOrNull();
return Top ? Top->HasMutipleLoops : false;
}
/// Marks current target region as one with closely nested teams
/// region.
void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
if (SharingMapTy *Parent = getSecondOnStackOrNull())
Parent->InnerTeamsRegionLoc = TeamsRegionLoc;
}
/// Returns true, if current region has closely nested teams region.
bool hasInnerTeamsRegion() const {
return getInnerTeamsRegionLoc().isValid();
}
/// Returns location of the nested teams region (if any).
SourceLocation getInnerTeamsRegionLoc() const {
const SharingMapTy *Top = getTopOfStackOrNull();
return Top ? Top->InnerTeamsRegionLoc : SourceLocation();
}
Scope *getCurScope() const {
const SharingMapTy *Top = getTopOfStackOrNull();
return Top ? Top->CurScope : nullptr;
}
SourceLocation getConstructLoc() const {
const SharingMapTy *Top = getTopOfStackOrNull();
return Top ? Top->ConstructLoc : SourceLocation();
}
/// Do the check specified in \a Check to all component lists and return true
/// if any issue is found.
bool checkMappableExprComponentListsForDecl(
const ValueDecl *VD, bool CurrentRegionOnly,
const llvm::function_ref<
bool(OMPClauseMappableExprCommon::MappableExprComponentListRef,
OpenMPClauseKind)>
Check) const {
if (isStackEmpty())
return false;
auto SI = begin();
auto SE = end();
if (SI == SE)
return false;
if (CurrentRegionOnly)
SE = std::next(SI);
else
std::advance(SI, 1);
for (; SI != SE; ++SI) {
auto MI = SI->MappedExprComponents.find(VD);
if (MI != SI->MappedExprComponents.end())
for (OMPClauseMappableExprCommon::MappableExprComponentListRef L :
MI->second.Components)
if (Check(L, MI->second.Kind))
return true;
}
return false;
}
/// Do the check specified in \a Check to all component lists at a given level
/// and return true if any issue is found.
bool checkMappableExprComponentListsForDeclAtLevel(
const ValueDecl *VD, unsigned Level,
const llvm::function_ref<
bool(OMPClauseMappableExprCommon::MappableExprComponentListRef,
OpenMPClauseKind)>
Check) const {
if (getStackSize() <= Level)
return false;
const SharingMapTy &StackElem = getStackElemAtLevel(Level);
auto MI = StackElem.MappedExprComponents.find(VD);
if (MI != StackElem.MappedExprComponents.end())
for (OMPClauseMappableExprCommon::MappableExprComponentListRef L :
MI->second.Components)
if (Check(L, MI->second.Kind))
return true;
return false;
}
/// Create a new mappable expression component list associated with a given
/// declaration and initialize it with the provided list of components.
void addMappableExpressionComponents(
const ValueDecl *VD,
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
OpenMPClauseKind WhereFoundClauseKind) {
MappedExprComponentTy &MEC = getTopOfStack().MappedExprComponents[VD];
// Create new entry and append the new components there.
MEC.Components.resize(MEC.Components.size() + 1);
MEC.Components.back().append(Components.begin(), Components.end());
MEC.Kind = WhereFoundClauseKind;
}
unsigned getNestingLevel() const {
assert(!isStackEmpty());
return getStackSize() - 1;
}
void addDoacrossDependClause(OMPDependClause *C,
const OperatorOffsetTy &OpsOffs) {
SharingMapTy *Parent = getSecondOnStackOrNull();
assert(Parent && isOpenMPWorksharingDirective(Parent->Directive));
Parent->DoacrossDepends.try_emplace(C, OpsOffs);
}
llvm::iterator_range<DoacrossDependMapTy::const_iterator>
getDoacrossDependClauses() const {
const SharingMapTy &StackElem = getTopOfStack();
if (isOpenMPWorksharingDirective(StackElem.Directive)) {
const DoacrossDependMapTy &Ref = StackElem.DoacrossDepends;
return llvm::make_range(Ref.begin(), Ref.end());
}
return llvm::make_range(StackElem.DoacrossDepends.end(),
StackElem.DoacrossDepends.end());
}
// Store types of classes which have been explicitly mapped
void addMappedClassesQualTypes(QualType QT) {
SharingMapTy &StackElem = getTopOfStack();
StackElem.MappedClassesQualTypes.insert(QT);
}
// Return set of mapped classes types
bool isClassPreviouslyMapped(QualType QT) const {
const SharingMapTy &StackElem = getTopOfStack();
return StackElem.MappedClassesQualTypes.count(QT) != 0;
}
/// Adds global declare target to the parent target region.
void addToParentTargetRegionLinkGlobals(DeclRefExpr *E) {
assert(*OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
E->getDecl()) == OMPDeclareTargetDeclAttr::MT_Link &&
"Expected declare target link global.");
for (auto &Elem : *this) {
if (isOpenMPTargetExecutionDirective(Elem.Directive)) {
Elem.DeclareTargetLinkVarDecls.push_back(E);
return;
}
}
}
/// Returns the list of globals with declare target link if current directive
/// is target.
ArrayRef<DeclRefExpr *> getLinkGlobals() const {
assert(isOpenMPTargetExecutionDirective(getCurrentDirective()) &&
"Expected target executable directive.");
return getTopOfStack().DeclareTargetLinkVarDecls;
}
};
bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
return isOpenMPParallelDirective(DKind) || isOpenMPTeamsDirective(DKind);
}
bool isImplicitOrExplicitTaskingRegion(OpenMPDirectiveKind DKind) {
return isImplicitTaskingRegion(DKind) || isOpenMPTaskingDirective(DKind) ||
DKind == OMPD_unknown;
}
} // namespace
static const Expr *getExprAsWritten(const Expr *E) {
if (const auto *FE = dyn_cast<FullExpr>(E))
E = FE->getSubExpr();
if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
E = MTE->GetTemporaryExpr();
while (const auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
E = Binder->getSubExpr();
if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E))
E = ICE->getSubExprAsWritten();
return E->IgnoreParens();
}
static Expr *getExprAsWritten(Expr *E) {
return const_cast<Expr *>(getExprAsWritten(const_cast<const Expr *>(E)));
}
static const ValueDecl *getCanonicalDecl(const ValueDecl *D) {
if (const auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
if (const auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
D = ME->getMemberDecl();
const auto *VD = dyn_cast<VarDecl>(D);
const auto *FD = dyn_cast<FieldDecl>(D);
if (VD != nullptr) {
VD = VD->getCanonicalDecl();
D = VD;
} else {
assert(FD);
FD = FD->getCanonicalDecl();
D = FD;
}
return D;
}
static ValueDecl *getCanonicalDecl(ValueDecl *D) {
return const_cast<ValueDecl *>(
getCanonicalDecl(const_cast<const ValueDecl *>(D)));
}
DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
ValueDecl *D) const {
D = getCanonicalDecl(D);
auto *VD = dyn_cast<VarDecl>(D);
const auto *FD = dyn_cast<FieldDecl>(D);
DSAVarData DVar;
if (Iter == end()) {
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a region but not in construct]
// File-scope or namespace-scope variables referenced in called routines
// in the region are shared unless they appear in a threadprivate
// directive.
if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(VD))
DVar.CKind = OMPC_shared;
// OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
// in a region but not in construct]
// Variables with static storage duration that are declared in called
// routines in the region are shared.
if (VD && VD->hasGlobalStorage())
DVar.CKind = OMPC_shared;
// Non-static data members are shared by default.
if (FD)
DVar.CKind = OMPC_shared;
return DVar;
}
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.1]
// Variables with automatic storage duration that are declared in a scope
// inside the construct are private.
if (VD && isOpenMPLocal(VD, Iter) && VD->isLocalVarDecl() &&
(VD->getStorageClass() == SC_Auto || VD->getStorageClass() == SC_None)) {
DVar.CKind = OMPC_private;
return DVar;
}
DVar.DKind = Iter->Directive;
// Explicitly specified attributes and local variables with predetermined
// attributes.
if (Iter->SharingMap.count(D)) {
const DSAInfo &Data = Iter->SharingMap.lookup(D);
DVar.RefExpr = Data.RefExpr.getPointer();
DVar.PrivateCopy = Data.PrivateCopy;
DVar.CKind = Data.Attributes;
DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
return DVar;
}
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, implicitly determined, p.1]
// In a parallel or task construct, the data-sharing attributes of these
// variables are determined by the default clause, if present.
switch (Iter->DefaultAttr) {
case DSA_shared:
DVar.CKind = OMPC_shared;
DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
return DVar;
case DSA_none:
return DVar;
case DSA_unspecified:
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, implicitly determined, p.2]
// In a parallel construct, if no default clause is present, these
// variables are shared.
DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
if ((isOpenMPParallelDirective(DVar.DKind) &&
!isOpenMPTaskLoopDirective(DVar.DKind)) ||
isOpenMPTeamsDirective(DVar.DKind)) {
DVar.CKind = OMPC_shared;
return DVar;
}
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, implicitly determined, p.4]
// In a task construct, if no default clause is present, a variable that in
// the enclosing context is determined to be shared by all implicit tasks
// bound to the current team is shared.
if (isOpenMPTaskingDirective(DVar.DKind)) {
DSAVarData DVarTemp;
const_iterator I = Iter, E = end();
do {
++I;
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
// Referenced in a Construct, implicitly determined, p.6]
// In a task construct, if no default clause is present, a variable
// whose data-sharing attribute is not determined by the rules above is
// firstprivate.
DVarTemp = getDSA(I, D);
if (DVarTemp.CKind != OMPC_shared) {
DVar.RefExpr = nullptr;
DVar.CKind = OMPC_firstprivate;
return DVar;
}
} while (I != E && !isImplicitTaskingRegion(I->Directive));
DVar.CKind =
(DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
return DVar;
}
}
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, implicitly determined, p.3]
// For constructs other than task, if no default clause is present, these
// variables inherit their data-sharing attributes from the enclosing
// context.
return getDSA(++Iter, D);
}
const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
const Expr *NewDE) {
assert(!isStackEmpty() && "Data sharing attributes stack is empty");
D = getCanonicalDecl(D);
SharingMapTy &StackElem = getTopOfStack();
auto It = StackElem.AlignedMap.find(D);
if (It == StackElem.AlignedMap.end()) {
assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
StackElem.AlignedMap[D] = NewDE;
return nullptr;
}
assert(It->second && "Unexpected nullptr expr in the aligned map");
return It->second;
}
void DSAStackTy::addLoopControlVariable(const ValueDecl *D, VarDecl *Capture) {
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
SharingMapTy &StackElem = getTopOfStack();
StackElem.LCVMap.try_emplace(
D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture));
}
const DSAStackTy::LCDeclInfo
DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
const SharingMapTy &StackElem = getTopOfStack();
auto It = StackElem.LCVMap.find(D);
if (It != StackElem.LCVMap.end())
return It->second;
return {0, nullptr};
}
const DSAStackTy::LCDeclInfo
DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
const SharingMapTy *Parent = getSecondOnStackOrNull();
assert(Parent && "Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
auto It = Parent->LCVMap.find(D);
if (It != Parent->LCVMap.end())
return It->second;
return {0, nullptr};
}
const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
const SharingMapTy *Parent = getSecondOnStackOrNull();
assert(Parent && "Data-sharing attributes stack is empty");
if (Parent->LCVMap.size() < I)
return nullptr;
for (const auto &Pair : Parent->LCVMap)
if (Pair.second.first == I)
return Pair.first;
return nullptr;
}
void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
DeclRefExpr *PrivateCopy) {
D = getCanonicalDecl(D);
if (A == OMPC_threadprivate) {
DSAInfo &Data = Threadprivates[D];
Data.Attributes = A;
Data.RefExpr.setPointer(E);
Data.PrivateCopy = nullptr;
} else {
DSAInfo &Data = getTopOfStack().SharingMap[D];
assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
(A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
(A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
(isLoopControlVariable(D).first && A == OMPC_private));
if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
Data.RefExpr.setInt(/*IntVal=*/true);
return;
}
const bool IsLastprivate =
A == OMPC_lastprivate || Data.Attributes == OMPC_lastprivate;
Data.Attributes = A;
Data.RefExpr.setPointerAndInt(E, IsLastprivate);
Data.PrivateCopy = PrivateCopy;
if (PrivateCopy) {
DSAInfo &Data = getTopOfStack().SharingMap[PrivateCopy->getDecl()];
Data.Attributes = A;
Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
Data.PrivateCopy = nullptr;
}
}
}
/// Build a variable declaration for OpenMP loop iteration variable.
static VarDecl *buildVarDecl(Sema &SemaRef, SourceLocation Loc, QualType Type,
StringRef Name, const AttrVec *Attrs = nullptr,
DeclRefExpr *OrigRef = nullptr) {
DeclContext *DC = SemaRef.CurContext;
IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
auto *Decl =
VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
if (Attrs) {
for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
I != E; ++I)
Decl->addAttr(*I);
}
Decl->setImplicit();
if (OrigRef) {
Decl->addAttr(
OMPReferencedVarAttr::CreateImplicit(SemaRef.Context, OrigRef));
}
return Decl;
}
static DeclRefExpr *buildDeclRefExpr(Sema &S, VarDecl *D, QualType Ty,
SourceLocation Loc,
bool RefersToCapture = false) {
D->setReferenced();
D->markUsed(S.Context);
return DeclRefExpr::Create(S.getASTContext(), NestedNameSpecifierLoc(),
SourceLocation(), D, RefersToCapture, Loc, Ty,
VK_LValue);
}
void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
BinaryOperatorKind BOK) {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
assert(
getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
"Additional reduction info may be specified only for reduction items.");
ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
assert(ReductionData.ReductionRange.isInvalid() &&
getTopOfStack().Directive == OMPD_taskgroup &&
"Additional reduction info may be specified only once for reduction "
"items.");
ReductionData.set(BOK, SR);
Expr *&TaskgroupReductionRef =
getTopOfStack().TaskgroupReductionRef;
if (!TaskgroupReductionRef) {
VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
SemaRef.Context.VoidPtrTy, ".task_red.");
TaskgroupReductionRef =
buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
}
}
void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
const Expr *ReductionRef) {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
assert(
getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
"Additional reduction info may be specified only for reduction items.");
ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
assert(ReductionData.ReductionRange.isInvalid() &&
getTopOfStack().Directive == OMPD_taskgroup &&
"Additional reduction info may be specified only once for reduction "
"items.");
ReductionData.set(ReductionRef, SR);
Expr *&TaskgroupReductionRef =
getTopOfStack().TaskgroupReductionRef;
if (!TaskgroupReductionRef) {
VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
SemaRef.Context.VoidPtrTy, ".task_red.");
TaskgroupReductionRef =
buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
}
}
const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
const ValueDecl *D, SourceRange &SR, BinaryOperatorKind &BOK,
Expr *&TaskgroupDescriptor) const {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
const DSAInfo &Data = I->SharingMap.lookup(D);
if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
continue;
const ReductionData &ReductionData = I->ReductionMap.lookup(D);
if (!ReductionData.ReductionOp ||
ReductionData.ReductionOp.is<const Expr *>())
return DSAVarData();
SR = ReductionData.ReductionRange;
BOK = ReductionData.ReductionOp.get<ReductionData::BOKPtrType>();
assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
"expression for the descriptor is not "
"set.");
TaskgroupDescriptor = I->TaskgroupReductionRef;
return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
Data.PrivateCopy, I->DefaultAttrLoc);
}
return DSAVarData();
}
const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
const ValueDecl *D, SourceRange &SR, const Expr *&ReductionRef,
Expr *&TaskgroupDescriptor) const {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
const DSAInfo &Data = I->SharingMap.lookup(D);
if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
continue;
const ReductionData &ReductionData = I->ReductionMap.lookup(D);
if (!ReductionData.ReductionOp ||
!ReductionData.ReductionOp.is<const Expr *>())
return DSAVarData();
SR = ReductionData.ReductionRange;
ReductionRef = ReductionData.ReductionOp.get<const Expr *>();
assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
"expression for the descriptor is not "
"set.");
TaskgroupDescriptor = I->TaskgroupReductionRef;
return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
Data.PrivateCopy, I->DefaultAttrLoc);
}
return DSAVarData();
}
bool DSAStackTy::isOpenMPLocal(VarDecl *D, const_iterator I) const {
D = D->getCanonicalDecl();
for (const_iterator E = end(); I != E; ++I) {
if (isImplicitOrExplicitTaskingRegion(I->Directive) ||
isOpenMPTargetExecutionDirective(I->Directive)) {
Scope *TopScope = I->CurScope ? I->CurScope->getParent() : nullptr;
Scope *CurScope = getCurScope();
while (CurScope && CurScope != TopScope && !CurScope->isDeclScope(D))
CurScope = CurScope->getParent();
return CurScope != TopScope;
}
}
return false;
}
static bool isConstNotMutableType(Sema &SemaRef, QualType Type,
bool AcceptIfMutable = true,
bool *IsClassType = nullptr) {
ASTContext &Context = SemaRef.getASTContext();
Type = Type.getNonReferenceType().getCanonicalType();
bool IsConstant = Type.isConstant(Context);
Type = Context.getBaseElementType(Type);
const CXXRecordDecl *RD = AcceptIfMutable && SemaRef.getLangOpts().CPlusPlus
? Type->getAsCXXRecordDecl()
: nullptr;
if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
RD = CTD->getTemplatedDecl();
if (IsClassType)
*IsClassType = RD;
return IsConstant && !(SemaRef.getLangOpts().CPlusPlus && RD &&
RD->hasDefinition() && RD->hasMutableFields());
}
static bool rejectConstNotMutableType(Sema &SemaRef, const ValueDecl *D,
QualType Type, OpenMPClauseKind CKind,
SourceLocation ELoc,
bool AcceptIfMutable = true,
bool ListItemNotVar = false) {
ASTContext &Context = SemaRef.getASTContext();
bool IsClassType;
if (isConstNotMutableType(SemaRef, Type, AcceptIfMutable, &IsClassType)) {
unsigned Diag = ListItemNotVar
? diag::err_omp_const_list_item
: IsClassType ? diag::err_omp_const_not_mutable_variable
: diag::err_omp_const_variable;
SemaRef.Diag(ELoc, Diag) << getOpenMPClauseName(CKind);
if (!ListItemNotVar && D) {
const VarDecl *VD = dyn_cast<VarDecl>(D);
bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
VarDecl::DeclarationOnly;
SemaRef.Diag(D->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< D;
}
return true;
}
return false;
}
const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
bool FromParent) {
D = getCanonicalDecl(D);
DSAVarData DVar;
auto *VD = dyn_cast<VarDecl>(D);
auto TI = Threadprivates.find(D);
if (TI != Threadprivates.end()) {
DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
DVar.CKind = OMPC_threadprivate;
return DVar;
}
if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
DVar.RefExpr = buildDeclRefExpr(
SemaRef, VD, D->getType().getNonReferenceType(),
VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
DVar.CKind = OMPC_threadprivate;
addDSA(D, DVar.RefExpr, OMPC_threadprivate);
return DVar;
}
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.1]
// Variables appearing in threadprivate directives are threadprivate.
if ((VD && VD->getTLSKind() != VarDecl::TLS_None &&
!(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
SemaRef.getLangOpts().OpenMPUseTLS &&
SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
(VD && VD->getStorageClass() == SC_Register &&
VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) {
DVar.RefExpr = buildDeclRefExpr(
SemaRef, VD, D->getType().getNonReferenceType(), D->getLocation());
DVar.CKind = OMPC_threadprivate;
addDSA(D, DVar.RefExpr, OMPC_threadprivate);
return DVar;
}
if (SemaRef.getLangOpts().OpenMPCUDAMode && VD &&
VD->isLocalVarDeclOrParm() && !isStackEmpty() &&
!isLoopControlVariable(D).first) {
const_iterator IterTarget =
std::find_if(begin(), end(), [](const SharingMapTy &Data) {
return isOpenMPTargetExecutionDirective(Data.Directive);
});
if (IterTarget != end()) {
const_iterator ParentIterTarget = IterTarget + 1;
for (const_iterator Iter = begin();
Iter != ParentIterTarget; ++Iter) {
if (isOpenMPLocal(VD, Iter)) {
DVar.RefExpr =
buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
D->getLocation());
DVar.CKind = OMPC_threadprivate;
return DVar;
}
}
if (!isClauseParsingMode() || IterTarget != begin()) {
auto DSAIter = IterTarget->SharingMap.find(D);
if (DSAIter != IterTarget->SharingMap.end() &&
isOpenMPPrivate(DSAIter->getSecond().Attributes)) {
DVar.RefExpr = DSAIter->getSecond().RefExpr.getPointer();
DVar.CKind = OMPC_threadprivate;
return DVar;
}
const_iterator End = end();
if (!SemaRef.isOpenMPCapturedByRef(
D, std::distance(ParentIterTarget, End),
/*OpenMPCaptureLevel=*/0)) {
DVar.RefExpr =
buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
IterTarget->ConstructLoc);
DVar.CKind = OMPC_threadprivate;
return DVar;
}
}
}
}
if (isStackEmpty())
// Not in OpenMP execution region and top scope was already checked.
return DVar;
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.4]
// Static data members are shared.
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.7]
// Variables with static storage duration that are declared in a scope
// inside the construct are shared.
if (VD && VD->isStaticDataMember()) {
// Check for explicitly specified attributes.
const_iterator I = begin();
const_iterator EndI = end();
if (FromParent && I != EndI)
++I;
auto It = I->SharingMap.find(D);
if (It != I->SharingMap.end()) {
const DSAInfo &Data = It->getSecond();
DVar.RefExpr = Data.RefExpr.getPointer();
DVar.PrivateCopy = Data.PrivateCopy;
DVar.CKind = Data.Attributes;
DVar.ImplicitDSALoc = I->DefaultAttrLoc;
DVar.DKind = I->Directive;
return DVar;
}
DVar.CKind = OMPC_shared;
return DVar;
}
auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
// The predetermined shared attribute for const-qualified types having no
// mutable members was removed after OpenMP 3.1.
if (SemaRef.LangOpts.OpenMP <= 31) {
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.6]
// Variables with const qualified type having no mutable member are
// shared.
if (isConstNotMutableType(SemaRef, D->getType())) {
// Variables with const-qualified type having no mutable member may be
// listed in a firstprivate clause, even if they are static data members.
DSAVarData DVarTemp = hasInnermostDSA(
D,
[](OpenMPClauseKind C) {
return C == OMPC_firstprivate || C == OMPC_shared;
},
MatchesAlways, FromParent);
if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
return DVarTemp;
DVar.CKind = OMPC_shared;
return DVar;
}
}
// Explicitly specified attributes and local variables with predetermined
// attributes.
const_iterator I = begin();
const_iterator EndI = end();
if (FromParent && I != EndI)
++I;
auto It = I->SharingMap.find(D);
if (It != I->SharingMap.end()) {
const DSAInfo &Data = It->getSecond();
DVar.RefExpr = Data.RefExpr.getPointer();
DVar.PrivateCopy = Data.PrivateCopy;
DVar.CKind = Data.Attributes;
DVar.ImplicitDSALoc = I->DefaultAttrLoc;
DVar.DKind = I->Directive;
}
return DVar;
}
const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
bool FromParent) const {
if (isStackEmpty()) {
const_iterator I;
return getDSA(I, D);
}
D = getCanonicalDecl(D);
const_iterator StartI = begin();
const_iterator EndI = end();
if (FromParent && StartI != EndI)
++StartI;
return getDSA(StartI, D);
}
const DSAStackTy::DSAVarData
DSAStackTy::hasDSA(ValueDecl *D,
const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
bool FromParent) const {
if (isStackEmpty())
return {};
D = getCanonicalDecl(D);
const_iterator I = begin();
const_iterator EndI = end();
if (FromParent && I != EndI)
++I;
for (; I != EndI; ++I) {
if (!DPred(I->Directive) &&
!isImplicitOrExplicitTaskingRegion(I->Directive))
continue;
const_iterator NewI = I;
DSAVarData DVar = getDSA(NewI, D);
if (I == NewI && CPred(DVar.CKind))
return DVar;
}
return {};
}
const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
bool FromParent) const {
if (isStackEmpty())
return {};
D = getCanonicalDecl(D);
const_iterator StartI = begin();
const_iterator EndI = end();
if (FromParent && StartI != EndI)
++StartI;
if (StartI == EndI || !DPred(StartI->Directive))
return {};
const_iterator NewI = StartI;
DSAVarData DVar = getDSA(NewI, D);
return (NewI == StartI && CPred(DVar.CKind)) ? DVar : DSAVarData();
}
bool DSAStackTy::hasExplicitDSA(
const ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
unsigned Level, bool NotLastprivate) const {
if (getStackSize() <= Level)
return false;
D = getCanonicalDecl(D);
const SharingMapTy &StackElem = getStackElemAtLevel(Level);
auto I = StackElem.SharingMap.find(D);
if (I != StackElem.SharingMap.end() &&
I->getSecond().RefExpr.getPointer() &&
CPred(I->getSecond().Attributes) &&
(!NotLastprivate || !I->getSecond().RefExpr.getInt()))
return true;
// Check predetermined rules for the loop control variables.
auto LI = StackElem.LCVMap.find(D);
if (LI != StackElem.LCVMap.end())
return CPred(OMPC_private);
return false;
}
bool DSAStackTy::hasExplicitDirective(
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
unsigned Level) const {
if (getStackSize() <= Level)
return false;
const SharingMapTy &StackElem = getStackElemAtLevel(Level);
return DPred(StackElem.Directive);
}
bool DSAStackTy::hasDirective(
const llvm::function_ref<bool(OpenMPDirectiveKind,
const DeclarationNameInfo &, SourceLocation)>
DPred,
bool FromParent) const {
// We look only in the enclosing region.
size_t Skip = FromParent ? 2 : 1;
for (const_iterator I = begin() + std::min(Skip, getStackSize()), E = end();
I != E; ++I) {
if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
return true;
}
return false;
}
void Sema::InitDataSharingAttributesStack() {
VarDataSharingAttributesStack = new DSAStackTy(*this);
}
#define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
void Sema::pushOpenMPFunctionRegion() {
DSAStack->pushFunction();
}
void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
DSAStack->popFunction(OldFSI);
}
static bool isOpenMPDeviceDelayedContext(Sema &S) {
assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsDevice &&
"Expected OpenMP device compilation.");
return !S.isInOpenMPTargetExecutionDirective() &&
!S.isInOpenMPDeclareTargetContext();
}
namespace {
/// Status of the function emission on the host/device.
enum class FunctionEmissionStatus {
Emitted,
Discarded,
Unknown,
};
} // anonymous namespace
Sema::DeviceDiagBuilder Sema::diagIfOpenMPDeviceCode(SourceLocation Loc,
unsigned DiagID) {
assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
"Expected OpenMP device compilation.");
FunctionEmissionStatus FES = getEmissionStatus(getCurFunctionDecl());
DeviceDiagBuilder::Kind Kind = DeviceDiagBuilder::K_Nop;
switch (FES) {
case FunctionEmissionStatus::Emitted:
Kind = DeviceDiagBuilder::K_Immediate;
break;
case FunctionEmissionStatus::Unknown:
Kind = isOpenMPDeviceDelayedContext(*this) ? DeviceDiagBuilder::K_Deferred
: DeviceDiagBuilder::K_Immediate;
break;
case FunctionEmissionStatus::TemplateDiscarded:
case FunctionEmissionStatus::OMPDiscarded:
Kind = DeviceDiagBuilder::K_Nop;
break;
case FunctionEmissionStatus::CUDADiscarded:
llvm_unreachable("CUDADiscarded unexpected in OpenMP device compilation");
break;
}
return DeviceDiagBuilder(Kind, Loc, DiagID, getCurFunctionDecl(), *this);
}
Sema::DeviceDiagBuilder Sema::diagIfOpenMPHostCode(SourceLocation Loc,
unsigned DiagID) {
assert(LangOpts.OpenMP && !LangOpts.OpenMPIsDevice &&
"Expected OpenMP host compilation.");
FunctionEmissionStatus FES = getEmissionStatus(getCurFunctionDecl());
DeviceDiagBuilder::Kind Kind = DeviceDiagBuilder::K_Nop;
switch (FES) {
case FunctionEmissionStatus::Emitted:
Kind = DeviceDiagBuilder::K_Immediate;
break;
case FunctionEmissionStatus::Unknown:
Kind = DeviceDiagBuilder::K_Deferred;
break;
case FunctionEmissionStatus::TemplateDiscarded:
case FunctionEmissionStatus::OMPDiscarded:
case FunctionEmissionStatus::CUDADiscarded:
Kind = DeviceDiagBuilder::K_Nop;
break;
}
return DeviceDiagBuilder(Kind, Loc, DiagID, getCurFunctionDecl(), *this);
}
void Sema::checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckForDelayedContext) {
assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
"Expected OpenMP device compilation.");
assert(Callee && "Callee may not be null.");
Callee = Callee->getMostRecentDecl();
FunctionDecl *Caller = getCurFunctionDecl();
// host only function are not available on the device.
if (Caller) {
FunctionEmissionStatus CallerS = getEmissionStatus(Caller);
FunctionEmissionStatus CalleeS = getEmissionStatus(Callee);
assert(CallerS != FunctionEmissionStatus::CUDADiscarded &&
CalleeS != FunctionEmissionStatus::CUDADiscarded &&
"CUDADiscarded unexpected in OpenMP device function check");
if ((CallerS == FunctionEmissionStatus::Emitted ||
(!isOpenMPDeviceDelayedContext(*this) &&
CallerS == FunctionEmissionStatus::Unknown)) &&
CalleeS == FunctionEmissionStatus::OMPDiscarded) {
StringRef HostDevTy = getOpenMPSimpleClauseTypeName(
OMPC_device_type, OMPC_DEVICE_TYPE_host);
Diag(Loc, diag::err_omp_wrong_device_function_call) << HostDevTy << 0;
Diag(Callee->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
diag::note_omp_marked_device_type_here)
<< HostDevTy;
return;
}
}
// If the caller is known-emitted, mark the callee as known-emitted.
// Otherwise, mark the call in our call graph so we can traverse it later.
if ((CheckForDelayedContext && !isOpenMPDeviceDelayedContext(*this)) ||
(!Caller && !CheckForDelayedContext) ||
(Caller && getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted))
markKnownEmitted(*this, Caller, Callee, Loc,
[CheckForDelayedContext](Sema &S, FunctionDecl *FD) {
return CheckForDelayedContext &&
S.getEmissionStatus(FD) ==
FunctionEmissionStatus::Emitted;
});
else if (Caller)
DeviceCallGraph[Caller].insert({Callee, Loc});
}
void Sema::checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckCaller) {
assert(LangOpts.OpenMP && !LangOpts.OpenMPIsDevice &&
"Expected OpenMP host compilation.");
assert(Callee && "Callee may not be null.");
Callee = Callee->getMostRecentDecl();
FunctionDecl *Caller = getCurFunctionDecl();
// device only function are not available on the host.
if (Caller) {
FunctionEmissionStatus CallerS = getEmissionStatus(Caller);
FunctionEmissionStatus CalleeS = getEmissionStatus(Callee);
assert(
(LangOpts.CUDA || (CallerS != FunctionEmissionStatus::CUDADiscarded &&
CalleeS != FunctionEmissionStatus::CUDADiscarded)) &&
"CUDADiscarded unexpected in OpenMP host function check");
if (CallerS == FunctionEmissionStatus::Emitted &&
CalleeS == FunctionEmissionStatus::OMPDiscarded) {
StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
Diag(Callee->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
diag::note_omp_marked_device_type_here)
<< NoHostDevTy;
return;
}
}
// If the caller is known-emitted, mark the callee as known-emitted.
// Otherwise, mark the call in our call graph so we can traverse it later.
if (!shouldIgnoreInHostDeviceCheck(Callee)) {
if ((!CheckCaller && !Caller) ||
(Caller &&
getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted))
markKnownEmitted(
*this, Caller, Callee, Loc, [CheckCaller](Sema &S, FunctionDecl *FD) {
return CheckCaller &&
S.getEmissionStatus(FD) == FunctionEmissionStatus::Emitted;
});
else if (Caller)
DeviceCallGraph[Caller].insert({Callee, Loc});
}
}
void Sema::checkOpenMPDeviceExpr(const Expr *E) {
assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
"OpenMP device compilation mode is expected.");
QualType Ty = E->getType();
if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
((Ty->isFloat128Type() ||
(Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128)) &&
!Context.getTargetInfo().hasFloat128Type()) ||
(Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
!Context.getTargetInfo().hasInt128Type()))
targetDiag(E->getExprLoc(), diag::err_omp_unsupported_type)
<< static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
<< Context.getTargetInfo().getTriple().str() << E->getSourceRange();
}
bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
ASTContext &Ctx = getASTContext();
bool IsByRef = true;
// Find the directive that is associated with the provided scope.
D = cast<ValueDecl>(D->getCanonicalDecl());
QualType Ty = D->getType();
bool IsVariableUsedInMapClause = false;
if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level)) {
// This table summarizes how a given variable should be passed to the device
// given its type and the clauses where it appears. This table is based on
// the description in OpenMP 4.5 [2.10.4, target Construct] and
// OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
//
// =========================================================================
// | type | defaultmap | pvt | first | is_device_ptr | map | res. |
// | |(tofrom:scalar)| | pvt | | | |
// =========================================================================
// | scl | | | | - | | bycopy|
// | scl | | - | x | - | - | bycopy|
// | scl | | x | - | - | - | null |
// | scl | x | | | - | | byref |
// | scl | x | - | x | - | - | bycopy|
// | scl | x | x | - | - | - | null |
// | scl | | - | - | - | x | byref |
// | scl | x | - | - | - | x | byref |
//
// | agg | n.a. | | | - | | byref |
// | agg | n.a. | - | x | - | - | byref |
// | agg | n.a. | x | - | - | - | null |
// | agg | n.a. | - | - | - | x | byref |
// | agg | n.a. | - | - | - | x[] | byref |
//
// | ptr | n.a. | | | - | | bycopy|
// | ptr | n.a. | - | x | - | - | bycopy|
// | ptr | n.a. | x | - | - | - | null |
// | ptr | n.a. | - | - | - | x | byref |
// | ptr | n.a. | - | - | - | x[] | bycopy|
// | ptr | n.a. | - | - | x | | bycopy|
// | ptr | n.a. | - | - | x | x | bycopy|
// | ptr | n.a. | - | - | x | x[] | bycopy|
// =========================================================================
// Legend:
// scl - scalar
// ptr - pointer
// agg - aggregate
// x - applies
// - - invalid in this combination
// [] - mapped with an array section
// byref - should be mapped by reference
// byval - should be mapped by value
// null - initialize a local variable to null on the device
//
// Observations:
// - All scalar declarations that show up in a map clause have to be passed
// by reference, because they may have been mapped in the enclosing data
// environment.
// - If the scalar value does not fit the size of uintptr, it has to be
// passed by reference, regardless the result in the table above.
// - For pointers mapped by value that have either an implicit map or an
// array section, the runtime library may pass the NULL value to the
// device instead of the value passed to it by the compiler.
if (Ty->isReferenceType())
Ty = Ty->castAs<ReferenceType>()->getPointeeType();
// Locate map clauses and see if the variable being captured is referred to
// in any of those clauses. Here we only care about variables, not fields,
// because fields are part of aggregates.
bool IsVariableAssociatedWithSection = false;
DSAStack->checkMappableExprComponentListsForDeclAtLevel(
D, Level,
[&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection, D](
OMPClauseMappableExprCommon::MappableExprComponentListRef
MapExprComponents,
OpenMPClauseKind WhereFoundClauseKind) {
// Only the map clause information influences how a variable is
// captured. E.g. is_device_ptr does not require changing the default
// behavior.
if (WhereFoundClauseKind != OMPC_map)
return false;
auto EI = MapExprComponents.rbegin();
auto EE = MapExprComponents.rend();
assert(EI != EE && "Invalid map expression!");
if (isa<DeclRefExpr>(EI->getAssociatedExpression()))
IsVariableUsedInMapClause |= EI->getAssociatedDeclaration() == D;
++EI;
if (EI == EE)
return false;
if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
isa<MemberExpr>(EI->getAssociatedExpression())) {
IsVariableAssociatedWithSection = true;
// There is nothing more we need to know about this variable.
return true;
}
// Keep looking for more map info.
return false;
});
if (IsVariableUsedInMapClause) {
// If variable is identified in a map clause it is always captured by
// reference except if it is a pointer that is dereferenced somehow.
IsByRef = !(Ty->isPointerType() && IsVariableAssociatedWithSection);
} else {
// By default, all the data that has a scalar type is mapped by copy
// (except for reduction variables).
IsByRef =
(DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
!Ty->isAnyPointerType()) ||
!Ty->isScalarType() ||
DSAStack->getDefaultDMAAtLevel(Level) == DMA_tofrom_scalar ||
DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K) { return K == OMPC_reduction; }, Level);
}
}
if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
IsByRef =
((IsVariableUsedInMapClause &&
DSAStack->getCaptureRegion(Level, OpenMPCaptureLevel) ==
OMPD_target) ||
!DSAStack->hasExplicitDSA(
D,
[](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
Level, /*NotLastprivate=*/true)) &&
// If the variable is artificial and must be captured by value - try to
// capture by value.
!(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
!cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue());
}
// When passing data by copy, we need to make sure it fits the uintptr size
// and alignment, because the runtime library only deals with uintptr types.
// If it does not fit the uintptr size, we need to pass the data by reference
// instead.
if (!IsByRef &&
(Ctx.getTypeSizeInChars(Ty) >
Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
Ctx.getDeclAlign(D) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
IsByRef = true;
}
return IsByRef;
}
unsigned Sema::getOpenMPNestingLevel() const {
assert(getLangOpts().OpenMP);
return DSAStack->getNestingLevel();
}
bool Sema::isInOpenMPTargetExecutionDirective() const {
return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
!DSAStack->isClauseParsingMode()) ||
DSAStack->hasDirective(
[](OpenMPDirectiveKind K, const DeclarationNameInfo &,
SourceLocation) -> bool {
return isOpenMPTargetExecutionDirective(K);
},
false);
}
VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
unsigned StopAt) {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
D = getCanonicalDecl(D);
// If we want to determine whether the variable should be captured from the
// perspective of the current capturing scope, and we've already left all the
// capturing scopes of the top directive on the stack, check from the
// perspective of its parent directive (if any) instead.
DSAStackTy::ParentDirectiveScope InParentDirectiveRAII(
*DSAStack, CheckScopeInfo && DSAStack->isBodyComplete());
// If we are attempting to capture a global variable in a directive with
// 'target' we return true so that this global is also mapped to the device.
//
auto *VD = dyn_cast<VarDecl>(D);
if (VD && !VD->hasLocalStorage() &&
(getCurCapturedRegion() || getCurBlock() || getCurLambda())) {
if (isInOpenMPDeclareTargetContext()) {
// Try to mark variable as declare target if it is used in capturing
// regions.
if (LangOpts.OpenMP <= 45 &&
!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
checkDeclIsAllowedInOpenMPTarget(nullptr, VD);
return nullptr;
} else if (isInOpenMPTargetExecutionDirective()) {
// If the declaration is enclosed in a 'declare target' directive,
// then it should not be captured.
//
if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
return nullptr;
return VD;
}
}
if (CheckScopeInfo) {
bool OpenMPFound = false;
for (unsigned I = StopAt + 1; I > 0; --I) {
FunctionScopeInfo *FSI = FunctionScopes[I - 1];
if(!isa<CapturingScopeInfo>(FSI))
return nullptr;
if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
if (RSI->CapRegionKind == CR_OpenMP) {
OpenMPFound = true;
break;
}
}
if (!OpenMPFound)
return nullptr;
}
if (DSAStack->getCurrentDirective() != OMPD_unknown &&
(!DSAStack->isClauseParsingMode() ||
DSAStack->getParentDirective() != OMPD_unknown)) {
auto &&Info = DSAStack->isLoopControlVariable(D);
if (Info.first ||
(VD && VD->hasLocalStorage() &&
isImplicitOrExplicitTaskingRegion(DSAStack->getCurrentDirective())) ||
(VD && DSAStack->isForceVarCapturing()))
return VD ? VD : Info.second;
DSAStackTy::DSAVarData DVarPrivate =
DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind))
return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
// Threadprivate variables must not be captured.
if (isOpenMPThreadPrivate(DVarPrivate.CKind))
return nullptr;
// The variable is not private or it is the variable in the directive with
// default(none) clause and not used in any clause.
DVarPrivate = DSAStack->hasDSA(D, isOpenMPPrivate,
[](OpenMPDirectiveKind) { return true; },
DSAStack->isClauseParsingMode());
if (DVarPrivate.CKind != OMPC_unknown ||
(VD && DSAStack->getDefaultDSA() == DSA_none))
return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
}
return nullptr;
}
void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const {
SmallVector<OpenMPDirectiveKind, 4> Regions;
getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
FunctionScopesIndex -= Regions.size();
}
void Sema::startOpenMPLoop() {
assert(LangOpts.OpenMP && "OpenMP must be enabled.");
if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()))
DSAStack->loopInit();
}
void Sema::startOpenMPCXXRangeFor() {
assert(LangOpts.OpenMP && "OpenMP must be enabled.");
if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
DSAStack->resetPossibleLoopCounter();
DSAStack->loopStart();
}
}
bool Sema::isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
if (DSAStack->getAssociatedLoops() > 0 &&
!DSAStack->isLoopStarted()) {
DSAStack->resetPossibleLoopCounter(D);
DSAStack->loopStart();
return true;
}
if ((DSAStack->getPossiblyLoopCunter() == D->getCanonicalDecl() ||
DSAStack->isLoopControlVariable(D).first) &&
!DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K) { return K != OMPC_private; }, Level) &&
!isOpenMPSimdDirective(DSAStack->getCurrentDirective()))
return true;
}
if (const auto *VD = dyn_cast<VarDecl>(D)) {
if (DSAStack->isThreadPrivate(const_cast<VarDecl *>(VD)) &&
DSAStack->isForceVarCapturing() &&
!DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K) { return K == OMPC_copyin; }, Level))
return true;
}
return DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
(DSAStack->isClauseParsingMode() &&
DSAStack->getClauseParsingMode() == OMPC_private) ||
// Consider taskgroup reduction descriptor variable a private to avoid
// possible capture in the region.
(DSAStack->hasExplicitDirective(
[](OpenMPDirectiveKind K) { return K == OMPD_taskgroup; },
Level) &&
DSAStack->isTaskgroupReductionRef(D, Level));
}
void Sema::setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D,
unsigned Level) {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
D = getCanonicalDecl(D);
OpenMPClauseKind OMPC = OMPC_unknown;
for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) {
const unsigned NewLevel = I - 1;
if (DSAStack->hasExplicitDSA(D,
[&OMPC](const OpenMPClauseKind K) {
if (isOpenMPPrivate(K)) {
OMPC = K;
return true;
}
return false;
},
NewLevel))
break;
if (DSAStack->checkMappableExprComponentListsForDeclAtLevel(
D, NewLevel,
[](OMPClauseMappableExprCommon::MappableExprComponentListRef,
OpenMPClauseKind) { return true; })) {
OMPC = OMPC_map;
break;
}
if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
NewLevel)) {
OMPC = OMPC_map;
if (D->getType()->isScalarType() &&
DSAStack->getDefaultDMAAtLevel(NewLevel) !=
DefaultMapAttributes::DMA_tofrom_scalar)
OMPC = OMPC_firstprivate;
break;
}
}
if (OMPC != OMPC_unknown)
FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, OMPC));
}
bool Sema::isOpenMPTargetCapturedDecl(const ValueDecl *D,
unsigned Level) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
// Return true if the current level is no longer enclosed in a target region.
const auto *VD = dyn_cast<VarDecl>(D);
return VD && !VD->hasLocalStorage() &&
DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
Level);
}
void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
void Sema::finalizeOpenMPDelayedAnalysis() {
assert(LangOpts.OpenMP && "Expected OpenMP compilation mode.");
// Diagnose implicit declare target functions and their callees.
for (const auto &CallerCallees : DeviceCallGraph) {
Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
OMPDeclareTargetDeclAttr::getDeviceType(
CallerCallees.getFirst()->getMostRecentDecl());
// Ignore host functions during device analyzis.
if (LangOpts.OpenMPIsDevice && DevTy &&
*DevTy == OMPDeclareTargetDeclAttr::DT_Host)
continue;
// Ignore nohost functions during host analyzis.
if (!LangOpts.OpenMPIsDevice && DevTy &&
*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
continue;
for (const std::pair<CanonicalDeclPtr<FunctionDecl>, SourceLocation>
&Callee : CallerCallees.getSecond()) {
const FunctionDecl *FD = Callee.first->getMostRecentDecl();
Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
OMPDeclareTargetDeclAttr::getDeviceType(FD);
if (LangOpts.OpenMPIsDevice && DevTy &&
*DevTy == OMPDeclareTargetDeclAttr::DT_Host) {
// Diagnose host function called during device codegen.
StringRef HostDevTy = getOpenMPSimpleClauseTypeName(
OMPC_device_type, OMPC_DEVICE_TYPE_host);
Diag(Callee.second, diag::err_omp_wrong_device_function_call)
<< HostDevTy << 0;
Diag(FD->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
diag::note_omp_marked_device_type_here)
<< HostDevTy;
continue;
}
if (!LangOpts.OpenMPIsDevice && DevTy &&
*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
// Diagnose nohost function called during host codegen.
StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
Diag(Callee.second, diag::err_omp_wrong_device_function_call)
<< NoHostDevTy << 1;
Diag(FD->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
diag::note_omp_marked_device_type_here)
<< NoHostDevTy;
continue;
}
}
}
}
void Sema::StartOpenMPDSABlock(OpenMPDirectiveKind DKind,
const DeclarationNameInfo &DirName,
Scope *CurScope, SourceLocation Loc) {
DSAStack->push(DKind, DirName, CurScope, Loc);
PushExpressionEvaluationContext(
ExpressionEvaluationContext::PotentiallyEvaluated);
}
void Sema::StartOpenMPClause(OpenMPClauseKind K) {
DSAStack->setClauseParsingMode(K);
}
void Sema::EndOpenMPClause() {
DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
}
static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
ArrayRef<OMPClause *> Clauses);
void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
// OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
// A variable of class type (or array thereof) that appears in a lastprivate
// clause requires an accessible, unambiguous default constructor for the
// class type, unless the list item is also specified in a firstprivate
// clause.
if (const auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
for (OMPClause *C : D->clauses()) {
if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
SmallVector<Expr *, 8> PrivateCopies;
for (Expr *DE : Clause->varlists()) {
if (DE->isValueDependent() || DE->isTypeDependent()) {
PrivateCopies.push_back(nullptr);
continue;
}
auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
auto *VD = cast<VarDecl>(DRE->getDecl());
QualType Type = VD->getType().getNonReferenceType();
const DSAStackTy::DSAVarData DVar =
DSAStack->getTopDSA(VD, /*FromParent=*/false);
if (DVar.CKind == OMPC_lastprivate) {
// Generate helper private variable and initialize it with the
// default value. The address of the original variable is replaced
// by the address of the new private variable in CodeGen. This new
// variable is not added to IdResolver, so the code in the OpenMP
// region uses original variable for proper diagnostics.
VarDecl *VDPrivate = buildVarDecl(
*this, DE->getExprLoc(), Type.getUnqualifiedType(),
VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
ActOnUninitializedDecl(VDPrivate);
if (VDPrivate->isInvalidDecl()) {
PrivateCopies.push_back(nullptr);
continue;
}
PrivateCopies.push_back(buildDeclRefExpr(
*this, VDPrivate, DE->getType(), DE->getExprLoc()));
} else {
// The variable is also a firstprivate, so initialization sequence
// for private copy is generated already.
PrivateCopies.push_back(nullptr);
}
}
Clause->setPrivateCopies(PrivateCopies);
}
}
// Check allocate clauses.
if (!CurContext->isDependentContext())
checkAllocateClauses(*this, DSAStack, D->clauses());
}
DSAStack->pop();
DiscardCleanupsInEvaluationContext();
PopExpressionEvaluationContext();
}
static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
Expr *NumIterations, Sema &SemaRef,
Scope *S, DSAStackTy *Stack);
namespace {
class VarDeclFilterCCC final : public CorrectionCandidateCallback {
private:
Sema &SemaRef;
public:
explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
bool ValidateCandidate(const TypoCorrection &Candidate) override {
NamedDecl *ND = Candidate.getCorrectionDecl();
if (const auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
return VD->hasGlobalStorage() &&
SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
SemaRef.getCurScope());
}
return false;
}
std::unique_ptr<CorrectionCandidateCallback> clone() override {
return std::make_unique<VarDeclFilterCCC>(*this);
}
};
class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
private:
Sema &SemaRef;
public:
explicit VarOrFuncDeclFilterCCC(Sema &S) : SemaRef(S) {}
bool ValidateCandidate(const TypoCorrection &Candidate) override {
NamedDecl *ND = Candidate.getCorrectionDecl();
if (ND && ((isa<VarDecl>(ND) && ND->getKind() == Decl::Var) ||
isa<FunctionDecl>(ND))) {
return SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
SemaRef.getCurScope());
}
return false;
}
std::unique_ptr<CorrectionCandidateCallback> clone() override {
return std::make_unique<VarOrFuncDeclFilterCCC>(*this);
}
};
} // namespace
ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind) {
LookupResult Lookup(*this, Id, LookupOrdinaryName);
LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
if (Lookup.isAmbiguous())
return ExprError();
VarDecl *VD;
if (!Lookup.isSingleResult()) {
VarDeclFilterCCC CCC(*this);
if (TypoCorrection Corrected =
CorrectTypo(Id, LookupOrdinaryName, CurScope, nullptr, CCC,
CTK_ErrorRecovery)) {
diagnoseTypo(Corrected,
PDiag(Lookup.empty()
? diag::err_undeclared_var_use_suggest
: diag::err_omp_expected_var_arg_suggest)
<< Id.getName());
VD = Corrected.getCorrectionDeclAs<VarDecl>();
} else {
Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
: diag::err_omp_expected_var_arg)
<< Id.getName();
return ExprError();
}
} else if (!(VD = Lookup.getAsSingle<VarDecl>())) {
Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
return ExprError();
}
Lookup.suppressDiagnostics();
// OpenMP [2.9.2, Syntax, C/C++]
// Variables must be file-scope, namespace-scope, or static block-scope.
if (Kind == OMPD_threadprivate && !VD->hasGlobalStorage()) {
Diag(Id.getLoc(), diag::err_omp_global_var_arg)
<< getOpenMPDirectiveName(Kind) << !VD->isStaticLocal();
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
return ExprError();
}
VarDecl *CanonicalVD = VD->getCanonicalDecl();
NamedDecl *ND = CanonicalVD;
// OpenMP [2.9.2, Restrictions, C/C++, p.2]
// A threadprivate directive for file-scope variables must appear outside
// any definition or declaration.
if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
!getCurLexicalContext()->isTranslationUnit()) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
<< getOpenMPDirectiveName(Kind) << VD;
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
return ExprError();
}
// OpenMP [2.9.2, Restrictions, C/C++, p.3]
// A threadprivate directive for static class member variables must appear
// in the class definition, in the same scope in which the member
// variables are declared.
if (CanonicalVD->isStaticDataMember() &&
!CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
<< getOpenMPDirectiveName(Kind) << VD;
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
return ExprError();
}
// OpenMP [2.9.2, Restrictions, C/C++, p.4]
// A threadprivate directive for namespace-scope variables must appear
// outside any definition or declaration other than the namespace
// definition itself.
if (CanonicalVD->getDeclContext()->isNamespace() &&
(!getCurLexicalContext()->isFileContext() ||
!getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
<< getOpenMPDirectiveName(Kind) << VD;
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
return ExprError();
}
// OpenMP [2.9.2, Restrictions, C/C++, p.6]
// A threadprivate directive for static block-scope variables must appear
// in the scope of the variable and not in a nested scope.
if (CanonicalVD->isLocalVarDecl() && CurScope &&
!isDeclInScope(ND, getCurLexicalContext(), CurScope)) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
<< getOpenMPDirectiveName(Kind) << VD;
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
return ExprError();
}
// OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
// A threadprivate directive must lexically precede all references to any
// of the variables in its list.
if (Kind == OMPD_threadprivate && VD->isUsed() &&
!DSAStack->isThreadPrivate(VD)) {
Diag(Id.getLoc(), diag::err_omp_var_used)
<< getOpenMPDirectiveName(Kind) << VD;
return ExprError();
}
QualType ExprType = VD->getType().getNonReferenceType();
return DeclRefExpr::Create(Context, NestedNameSpecifierLoc(),
SourceLocation(), VD,
/*RefersToEnclosingVariableOrCapture=*/false,
Id.getLoc(), ExprType, VK_LValue);
}
Sema::DeclGroupPtrTy
Sema::ActOnOpenMPThreadprivateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList) {
if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) {
CurContext->addDecl(D);
return DeclGroupPtrTy::make(DeclGroupRef(D));
}
return nullptr;
}
namespace {
class LocalVarRefChecker final
: public ConstStmtVisitor<LocalVarRefChecker, bool> {
Sema &SemaRef;
public:
bool VisitDeclRefExpr(const DeclRefExpr *E) {
if (const auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
if (VD->hasLocalStorage()) {
SemaRef.Diag(E->getBeginLoc(),
diag::err_omp_local_var_in_threadprivate_init)
<< E->getSourceRange();
SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
<< VD << VD->getSourceRange();
return true;
}
}
return false;
}
bool VisitStmt(const Stmt *S) {
for (const Stmt *Child : S->children()) {
if (Child && Visit(Child))
return true;
}
return false;
}
explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {}
};
} // namespace
OMPThreadPrivateDecl *
Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
auto *DE = cast<DeclRefExpr>(RefExpr);
auto *VD = cast<VarDecl>(DE->getDecl());
SourceLocation ILoc = DE->getExprLoc();
// Mark variable as used.
VD->setReferenced();
VD->markUsed(Context);
QualType QType = VD->getType();
if (QType->isDependentType() || QType->isInstantiationDependentType()) {
// It will be analyzed later.
Vars.push_back(DE);
continue;
}
// OpenMP [2.9.2, Restrictions, C/C++, p.10]
// A threadprivate variable must not have an incomplete type.
if (RequireCompleteType(ILoc, VD->getType(),
diag::err_omp_threadprivate_incomplete_type)) {
continue;
}
// OpenMP [2.9.2, Restrictions, C/C++, p.10]
// A threadprivate variable must not have a reference type.
if (VD->getType()->isReferenceType()) {
Diag(ILoc, diag::err_omp_ref_type_arg)
<< getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType();
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
continue;
}
// Check if this is a TLS variable. If TLS is not being supported, produce
// the corresponding diagnostic.
if ((VD->getTLSKind() != VarDecl::TLS_None &&
!(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
getLangOpts().OpenMPUseTLS &&
getASTContext().getTargetInfo().isTLSSupported())) ||
(VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
!VD->isLocalVarDecl())) {
Diag(ILoc, diag::err_omp_var_thread_local)
<< VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
continue;
}
// Check if initial value of threadprivate variable reference variable with
// local storage (it is not supported by runtime).
if (const Expr *Init = VD->getAnyInitializer()) {
LocalVarRefChecker Checker(*this);
if (Checker.Visit(Init))
continue;
}
Vars.push_back(RefExpr);
DSAStack->addDSA(VD, DE, OMPC_threadprivate);
VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
Context, SourceRange(Loc, Loc)));
if (ASTMutationListener *ML = Context.getASTMutationListener())
ML->DeclarationMarkedOpenMPThreadPrivate(VD);
}
OMPThreadPrivateDecl *D = nullptr;
if (!Vars.empty()) {
D = OMPThreadPrivateDecl::Create(Context, getCurLexicalContext(), Loc,
Vars);
D->setAccess(AS_public);
}
return D;
}
static OMPAllocateDeclAttr::AllocatorTypeTy
getAllocatorKind(Sema &S, DSAStackTy *Stack, Expr *Allocator) {
if (!Allocator)
return OMPAllocateDeclAttr::OMPDefaultMemAlloc;
if (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
Allocator->isInstantiationDependent() ||
Allocator->containsUnexpandedParameterPack())
return OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
auto AllocatorKindRes = OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
const Expr *AE = Allocator->IgnoreParenImpCasts();
for (int I = OMPAllocateDeclAttr::OMPDefaultMemAlloc;
I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
const Expr *DefAllocator = Stack->getAllocator(AllocatorKind);
llvm::FoldingSetNodeID AEId, DAEId;
AE->Profile(AEId, S.getASTContext(), /*Canonical=*/true);
DefAllocator->Profile(DAEId, S.getASTContext(), /*Canonical=*/true);
if (AEId == DAEId) {
AllocatorKindRes = AllocatorKind;
break;
}
}
return AllocatorKindRes;
}
static bool checkPreviousOMPAllocateAttribute(
Sema &S, DSAStackTy *Stack, Expr *RefExpr, VarDecl *VD,
OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind, Expr *Allocator) {
if (!VD->hasAttr<OMPAllocateDeclAttr>())
return false;
const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
Expr *PrevAllocator = A->getAllocator();
OMPAllocateDeclAttr::AllocatorTypeTy PrevAllocatorKind =
getAllocatorKind(S, Stack, PrevAllocator);
bool AllocatorsMatch = AllocatorKind == PrevAllocatorKind;
if (AllocatorsMatch &&
AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc &&
Allocator && PrevAllocator) {
const Expr *AE = Allocator->IgnoreParenImpCasts();
const Expr *PAE = PrevAllocator->IgnoreParenImpCasts();
llvm::FoldingSetNodeID AEId, PAEId;
AE->Profile(AEId, S.Context, /*Canonical=*/true);
PAE->Profile(PAEId, S.Context, /*Canonical=*/true);
AllocatorsMatch = AEId == PAEId;
}
if (!AllocatorsMatch) {
SmallString<256> AllocatorBuffer;
llvm::raw_svector_ostream AllocatorStream(AllocatorBuffer);
if (Allocator)
Allocator->printPretty(AllocatorStream, nullptr, S.getPrintingPolicy());
SmallString<256> PrevAllocatorBuffer;
llvm::raw_svector_ostream PrevAllocatorStream(PrevAllocatorBuffer);
if (PrevAllocator)
PrevAllocator->printPretty(PrevAllocatorStream, nullptr,
S.getPrintingPolicy());
SourceLocation AllocatorLoc =
Allocator ? Allocator->getExprLoc() : RefExpr->getExprLoc();
SourceRange AllocatorRange =
Allocator ? Allocator->getSourceRange() : RefExpr->getSourceRange();
SourceLocation PrevAllocatorLoc =
PrevAllocator ? PrevAllocator->getExprLoc() : A->getLocation();
SourceRange PrevAllocatorRange =
PrevAllocator ? PrevAllocator->getSourceRange() : A->getRange();
S.Diag(AllocatorLoc, diag::warn_omp_used_different_allocator)
<< (Allocator ? 1 : 0) << AllocatorStream.str()
<< (PrevAllocator ? 1 : 0) << PrevAllocatorStream.str()
<< AllocatorRange;
S.Diag(PrevAllocatorLoc, diag::note_omp_previous_allocator)
<< PrevAllocatorRange;
return true;
}
return false;
}
static void
applyOMPAllocateAttribute(Sema &S, VarDecl *VD,
OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
Expr *Allocator, SourceRange SR) {
if (VD->hasAttr<OMPAllocateDeclAttr>())
return;
if (Allocator &&
(Allocator->isTypeDependent() || Allocator->isValueDependent() ||
Allocator->isInstantiationDependent() ||
Allocator->containsUnexpandedParameterPack()))
return;
auto *A = OMPAllocateDeclAttr::CreateImplicit(S.Context, AllocatorKind,
Allocator, SR);
VD->addAttr(A);
if (ASTMutationListener *ML = S.Context.getASTMutationListener())
ML->DeclarationMarkedOpenMPAllocate(VD, A);
}
Sema::DeclGroupPtrTy Sema::ActOnOpenMPAllocateDirective(
SourceLocation Loc, ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses, DeclContext *Owner) {