blob: ee06425dc654a06fdb0133f3ce80ceda5849ad1c [file] [log] [blame]
//===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements extra semantic analysis beyond what is enforced
// by the C type system.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/AttrIterator.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/FormatString.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/NonTrivialTypeVisitor.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/UnresolvedSet.h"
#include "clang/Basic/AddressSpaces.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/SyncScope.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetCXXABI.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/Locale.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <bitset>
#include <cassert>
#include <cctype>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <limits>
#include <string>
#include <tuple>
#include <utility>
using namespace clang;
using namespace sema;
SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const {
return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts,
Context.getTargetInfo());
}
/// Checks that a call expression's argument count is the desired number.
/// This is useful when doing custom type-checking. Returns true on error.
static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) {
unsigned argCount = call->getNumArgs();
if (argCount == desiredArgCount) return false;
if (argCount < desiredArgCount)
return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args)
<< 0 /*function call*/ << desiredArgCount << argCount
<< call->getSourceRange();
// Highlight all the excess arguments.
SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(),
call->getArg(argCount - 1)->getEndLoc());
return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args)
<< 0 /*function call*/ << desiredArgCount << argCount
<< call->getArg(1)->getSourceRange();
}
/// Check that the first argument to __builtin_annotation is an integer
/// and the second argument is a non-wide string literal.
static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
if (checkArgCount(S, TheCall, 2))
return true;
// First argument should be an integer.
Expr *ValArg = TheCall->getArg(0);
QualType Ty = ValArg->getType();
if (!Ty->isIntegerType()) {
S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg)
<< ValArg->getSourceRange();
return true;
}
// Second argument should be a constant string.
Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
if (!Literal || !Literal->isAscii()) {
S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg)
<< StrArg->getSourceRange();
return true;
}
TheCall->setType(Ty);
return false;
}
static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
// We need at least one argument.
if (TheCall->getNumArgs() < 1) {
S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
<< 0 << 1 << TheCall->getNumArgs()
<< TheCall->getCallee()->getSourceRange();
return true;
}
// All arguments should be wide string literals.
for (Expr *Arg : TheCall->arguments()) {
auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
if (!Literal || !Literal->isWide()) {
S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str)
<< Arg->getSourceRange();
return true;
}
}
return false;
}
/// Check that the argument to __builtin_addressof is a glvalue, and set the
/// result type to the corresponding pointer type.
static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
if (checkArgCount(S, TheCall, 1))
return true;
ExprResult Arg(TheCall->getArg(0));
QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc());
if (ResultType.isNull())
return true;
TheCall->setArg(0, Arg.get());
TheCall->setType(ResultType);
return false;
}
/// Check the number of arguments and set the result type to
/// the argument type.
static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
if (checkArgCount(S, TheCall, 1))
return true;
TheCall->setType(TheCall->getArg(0)->getType());
return false;
}
/// Check that the value argument for __builtin_is_aligned(value, alignment) and
/// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer
/// type (but not a function pointer) and that the alignment is a power-of-two.
static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
if (checkArgCount(S, TheCall, 2))
return true;
clang::Expr *Source = TheCall->getArg(0);
bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned;
auto IsValidIntegerType = [](QualType Ty) {
return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType();
};
QualType SrcTy = Source->getType();
// We should also be able to use it with arrays (but not functions!).
if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) {
SrcTy = S.Context.getDecayedType(SrcTy);
}
if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) ||
SrcTy->isFunctionPointerType()) {
// FIXME: this is not quite the right error message since we don't allow
// floating point types, or member pointers.
S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand)
<< SrcTy;
return true;
}
clang::Expr *AlignOp = TheCall->getArg(1);
if (!IsValidIntegerType(AlignOp->getType())) {
S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int)
<< AlignOp->getType();
return true;
}
Expr::EvalResult AlignResult;
unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1;
// We can't check validity of alignment if it is value dependent.
if (!AlignOp->isValueDependent() &&
AlignOp->EvaluateAsInt(AlignResult, S.Context,
Expr::SE_AllowSideEffects)) {
llvm::APSInt AlignValue = AlignResult.Val.getInt();
llvm::APSInt MaxValue(
llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits));
if (AlignValue < 1) {
S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1;
return true;
}
if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) {
S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big)
<< toString(MaxValue, 10);
return true;
}
if (!AlignValue.isPowerOf2()) {
S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two);
return true;
}
if (AlignValue == 1) {
S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless)
<< IsBooleanAlignBuiltin;
}
}
ExprResult SrcArg = S.PerformCopyInitialization(
InitializedEntity::InitializeParameter(S.Context, SrcTy, false),
SourceLocation(), Source);
if (SrcArg.isInvalid())
return true;
TheCall->setArg(0, SrcArg.get());
ExprResult AlignArg =
S.PerformCopyInitialization(InitializedEntity::InitializeParameter(
S.Context, AlignOp->getType(), false),
SourceLocation(), AlignOp);
if (AlignArg.isInvalid())
return true;
TheCall->setArg(1, AlignArg.get());
// For align_up/align_down, the return type is the same as the (potentially
// decayed) argument type including qualifiers. For is_aligned(), the result
// is always bool.
TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy);
return false;
}
static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall,
unsigned BuiltinID) {
if (checkArgCount(S, TheCall, 3))
return true;
// First two arguments should be integers.
for (unsigned I = 0; I < 2; ++I) {
ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I));
if (Arg.isInvalid()) return true;
TheCall->setArg(I, Arg.get());
QualType Ty = Arg.get()->getType();
if (!Ty->isIntegerType()) {
S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
<< Ty << Arg.get()->getSourceRange();
return true;
}
}
// Third argument should be a pointer to a non-const integer.
// IRGen correctly handles volatile, restrict, and address spaces, and
// the other qualifiers aren't possible.
{
ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2));
if (Arg.isInvalid()) return true;
TheCall->setArg(2, Arg.get());
QualType Ty = Arg.get()->getType();
const auto *PtrTy = Ty->getAs<PointerType>();
if (!PtrTy ||
!PtrTy->getPointeeType()->isIntegerType() ||
PtrTy->getPointeeType().isConstQualified()) {
S.Diag(Arg.get()->getBeginLoc(),
diag::err_overflow_builtin_must_be_ptr_int)
<< Ty << Arg.get()->getSourceRange();
return true;
}
}
// Disallow signed ExtIntType args larger than 128 bits to mul function until
// we improve backend support.
if (BuiltinID == Builtin::BI__builtin_mul_overflow) {
for (unsigned I = 0; I < 3; ++I) {
const auto Arg = TheCall->getArg(I);
// Third argument will be a pointer.
auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType();
if (Ty->isExtIntType() && Ty->isSignedIntegerType() &&
S.getASTContext().getIntWidth(Ty) > 128)
return S.Diag(Arg->getBeginLoc(),
diag::err_overflow_builtin_ext_int_max_size)
<< 128;
}
}
return false;
}
static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
if (checkArgCount(S, BuiltinCall, 2))
return true;
SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
Expr *Call = BuiltinCall->getArg(0);
Expr *Chain = BuiltinCall->getArg(1);
if (Call->getStmtClass() != Stmt::CallExprClass) {
S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
<< Call->getSourceRange();
return true;
}
auto CE = cast<CallExpr>(Call);
if (CE->getCallee()->getType()->isBlockPointerType()) {
S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
<< Call->getSourceRange();
return true;
}
const Decl *TargetDecl = CE->getCalleeDecl();
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
if (FD->getBuiltinID()) {
S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
<< Call->getSourceRange();
return true;
}
if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) {
S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
<< Call->getSourceRange();
return true;
}
ExprResult ChainResult = S.UsualUnaryConversions(Chain);
if (ChainResult.isInvalid())
return true;
if (!ChainResult.get()->getType()->isPointerType()) {
S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
<< Chain->getSourceRange();
return true;
}
QualType ReturnTy = CE->getCallReturnType(S.Context);
QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
QualType BuiltinTy = S.Context.getFunctionType(
ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo());
QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy);
Builtin =
S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get();
BuiltinCall->setType(CE->getType());
BuiltinCall->setValueKind(CE->getValueKind());
BuiltinCall->setObjectKind(CE->getObjectKind());
BuiltinCall->setCallee(Builtin);
BuiltinCall->setArg(1, ChainResult.get());
return false;
}
namespace {
class ScanfDiagnosticFormatHandler
: public analyze_format_string::FormatStringHandler {
// Accepts the argument index (relative to the first destination index) of the
// argument whose size we want.
using ComputeSizeFunction =
llvm::function_ref<Optional<llvm::APSInt>(unsigned)>;
// Accepts the argument index (relative to the first destination index), the
// destination size, and the source size).
using DiagnoseFunction =
llvm::function_ref<void(unsigned, unsigned, unsigned)>;
ComputeSizeFunction ComputeSizeArgument;
DiagnoseFunction Diagnose;
public:
ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument,
DiagnoseFunction Diagnose)
: ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {}
bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
const char *StartSpecifier,
unsigned specifierLen) override {
if (!FS.consumesDataArgument())
return true;
unsigned NulByte = 0;
switch ((FS.getConversionSpecifier().getKind())) {
default:
return true;
case analyze_format_string::ConversionSpecifier::sArg:
case analyze_format_string::ConversionSpecifier::ScanListArg:
NulByte = 1;
break;
case analyze_format_string::ConversionSpecifier::cArg:
break;
}
auto OptionalFW = FS.getFieldWidth();
if (OptionalFW.getHowSpecified() !=
analyze_format_string::OptionalAmount::HowSpecified::Constant)
return true;
unsigned SourceSize = OptionalFW.getConstantAmount() + NulByte;
auto DestSizeAPS = ComputeSizeArgument(FS.getArgIndex());
if (!DestSizeAPS)
return true;
unsigned DestSize = DestSizeAPS->getZExtValue();
if (DestSize < SourceSize)
Diagnose(FS.getArgIndex(), DestSize, SourceSize);
return true;
}
};
class EstimateSizeFormatHandler
: public analyze_format_string::FormatStringHandler {
size_t Size;
public:
EstimateSizeFormatHandler(StringRef Format)
: Size(std::min(Format.find(0), Format.size()) +
1 /* null byte always written by sprintf */) {}
bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
const char *, unsigned SpecifierLen) override {
const size_t FieldWidth = computeFieldWidth(FS);
const size_t Precision = computePrecision(FS);
// The actual format.
switch (FS.getConversionSpecifier().getKind()) {
// Just a char.
case analyze_format_string::ConversionSpecifier::cArg:
case analyze_format_string::ConversionSpecifier::CArg:
Size += std::max(FieldWidth, (size_t)1);
break;
// Just an integer.
case analyze_format_string::ConversionSpecifier::dArg:
case analyze_format_string::ConversionSpecifier::DArg:
case analyze_format_string::ConversionSpecifier::iArg:
case analyze_format_string::ConversionSpecifier::oArg:
case analyze_format_string::ConversionSpecifier::OArg:
case analyze_format_string::ConversionSpecifier::uArg:
case analyze_format_string::ConversionSpecifier::UArg:
case analyze_format_string::ConversionSpecifier::xArg:
case analyze_format_string::ConversionSpecifier::XArg:
Size += std::max(FieldWidth, Precision);
break;
// %g style conversion switches between %f or %e style dynamically.
// %f always takes less space, so default to it.
case analyze_format_string::ConversionSpecifier::gArg:
case analyze_format_string::ConversionSpecifier::GArg:
// Floating point number in the form '[+]ddd.ddd'.
case analyze_format_string::ConversionSpecifier::fArg:
case analyze_format_string::ConversionSpecifier::FArg:
Size += std::max(FieldWidth, 1 /* integer part */ +
(Precision ? 1 + Precision
: 0) /* period + decimal */);
break;
// Floating point number in the form '[-]d.ddde[+-]dd'.
case analyze_format_string::ConversionSpecifier::eArg:
case analyze_format_string::ConversionSpecifier::EArg:
Size +=
std::max(FieldWidth,
1 /* integer part */ +
(Precision ? 1 + Precision : 0) /* period + decimal */ +
1 /* e or E letter */ + 2 /* exponent */);
break;
// Floating point number in the form '[-]0xh.hhhhp±dd'.
case analyze_format_string::ConversionSpecifier::aArg:
case analyze_format_string::ConversionSpecifier::AArg:
Size +=
std::max(FieldWidth,
2 /* 0x */ + 1 /* integer part */ +
(Precision ? 1 + Precision : 0) /* period + decimal */ +
1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */);
break;
// Just a string.
case analyze_format_string::ConversionSpecifier::sArg:
case analyze_format_string::ConversionSpecifier::SArg:
Size += FieldWidth;
break;
// Just a pointer in the form '0xddd'.
case analyze_format_string::ConversionSpecifier::pArg:
Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision);
break;
// A plain percent.
case analyze_format_string::ConversionSpecifier::PercentArg:
Size += 1;
break;
default:
break;
}
Size += FS.hasPlusPrefix() || FS.hasSpacePrefix();
if (FS.hasAlternativeForm()) {
switch (FS.getConversionSpecifier().getKind()) {
default:
break;
// Force a leading '0'.
case analyze_format_string::ConversionSpecifier::oArg:
Size += 1;
break;
// Force a leading '0x'.
case analyze_format_string::ConversionSpecifier::xArg:
case analyze_format_string::ConversionSpecifier::XArg:
Size += 2;
break;
// Force a period '.' before decimal, even if precision is 0.
case analyze_format_string::ConversionSpecifier::aArg:
case analyze_format_string::ConversionSpecifier::AArg:
case analyze_format_string::ConversionSpecifier::eArg:
case analyze_format_string::ConversionSpecifier::EArg:
case analyze_format_string::ConversionSpecifier::fArg:
case analyze_format_string::ConversionSpecifier::FArg:
case analyze_format_string::ConversionSpecifier::gArg:
case analyze_format_string::ConversionSpecifier::GArg:
Size += (Precision ? 0 : 1);
break;
}
}
assert(SpecifierLen <= Size && "no underflow");
Size -= SpecifierLen;
return true;
}
size_t getSizeLowerBound() const { return Size; }
private:
static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) {
const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth();
size_t FieldWidth = 0;
if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant)
FieldWidth = FW.getConstantAmount();
return FieldWidth;
}
static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) {
const analyze_format_string::OptionalAmount &FW = FS.getPrecision();
size_t Precision = 0;
// See man 3 printf for default precision value based on the specifier.
switch (FW.getHowSpecified()) {
case analyze_format_string::OptionalAmount::NotSpecified:
switch (FS.getConversionSpecifier().getKind()) {
default:
break;
case analyze_format_string::ConversionSpecifier::dArg: // %d
case analyze_format_string::ConversionSpecifier::DArg: // %D
case analyze_format_string::ConversionSpecifier::iArg: // %i
Precision = 1;
break;
case analyze_format_string::ConversionSpecifier::oArg: // %d
case analyze_format_string::ConversionSpecifier::OArg: // %D
case analyze_format_string::ConversionSpecifier::uArg: // %d
case analyze_format_string::ConversionSpecifier::UArg: // %D
case analyze_format_string::ConversionSpecifier::xArg: // %d
case analyze_format_string::ConversionSpecifier::XArg: // %D
Precision = 1;
break;
case analyze_format_string::ConversionSpecifier::fArg: // %f
case analyze_format_string::ConversionSpecifier::FArg: // %F
case analyze_format_string::ConversionSpecifier::eArg: // %e
case analyze_format_string::ConversionSpecifier::EArg: // %E
case analyze_format_string::ConversionSpecifier::gArg: // %g
case analyze_format_string::ConversionSpecifier::GArg: // %G
Precision = 6;
break;
case analyze_format_string::ConversionSpecifier::pArg: // %d
Precision = 1;
break;
}
break;
case analyze_format_string::OptionalAmount::Constant:
Precision = FW.getConstantAmount();
break;
default:
break;
}
return Precision;
}
};
} // namespace
void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
CallExpr *TheCall) {
if (TheCall->isValueDependent() || TheCall->isTypeDependent() ||
isConstantEvaluated())
return;
unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true);
if (!BuiltinID)
return;
const TargetInfo &TI = getASTContext().getTargetInfo();
unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
auto ComputeExplicitObjectSizeArgument =
[&](unsigned Index) -> Optional<llvm::APSInt> {
Expr::EvalResult Result;
Expr *SizeArg = TheCall->getArg(Index);
if (!SizeArg->EvaluateAsInt(Result, getASTContext()))
return llvm::None;
return Result.Val.getInt();
};
auto ComputeSizeArgument = [&](unsigned Index) -> Optional<llvm::APSInt> {
// If the parameter has a pass_object_size attribute, then we should use its
// (potentially) more strict checking mode. Otherwise, conservatively assume
// type 0.
int BOSType = 0;
// This check can fail for variadic functions.
if (Index < FD->getNumParams()) {
if (const auto *POS =
FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>())
BOSType = POS->getType();
}
const Expr *ObjArg = TheCall->getArg(Index);
uint64_t Result;
if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
return llvm::None;
// Get the object size in the target's size_t width.
return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth);
};
auto ComputeStrLenArgument = [&](unsigned Index) -> Optional<llvm::APSInt> {
Expr *ObjArg = TheCall->getArg(Index);
uint64_t Result;
if (!ObjArg->tryEvaluateStrLen(Result, getASTContext()))
return llvm::None;
// Add 1 for null byte.
return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth);
};
Optional<llvm::APSInt> SourceSize;
Optional<llvm::APSInt> DestinationSize;
unsigned DiagID = 0;
bool IsChkVariant = false;
auto GetFunctionName = [&]() {
StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID);
// Skim off the details of whichever builtin was called to produce a better
// diagnostic, as it's unlikely that the user wrote the __builtin
// explicitly.
if (IsChkVariant) {
FunctionName = FunctionName.drop_front(std::strlen("__builtin___"));
FunctionName = FunctionName.drop_back(std::strlen("_chk"));
} else if (FunctionName.startswith("__builtin_")) {
FunctionName = FunctionName.drop_front(std::strlen("__builtin_"));
}
return FunctionName;
};
switch (BuiltinID) {
default:
return;
case Builtin::BI__builtin_strcpy:
case Builtin::BIstrcpy: {
DiagID = diag::warn_fortify_strlen_overflow;
SourceSize = ComputeStrLenArgument(1);
DestinationSize = ComputeSizeArgument(0);
break;
}
case Builtin::BI__builtin___strcpy_chk: {
DiagID = diag::warn_fortify_strlen_overflow;
SourceSize = ComputeStrLenArgument(1);
DestinationSize = ComputeExplicitObjectSizeArgument(2);
IsChkVariant = true;
break;
}
case Builtin::BIscanf:
case Builtin::BIfscanf:
case Builtin::BIsscanf: {
unsigned FormatIndex = 1;
unsigned DataIndex = 2;
if (BuiltinID == Builtin::BIscanf) {
FormatIndex = 0;
DataIndex = 1;
}
const auto *FormatExpr =
TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
const auto *Format = dyn_cast<StringLiteral>(FormatExpr);
if (!Format)
return;
if (!Format->isAscii() && !Format->isUTF8())
return;
auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize,
unsigned SourceSize) {
DiagID = diag::warn_fortify_scanf_overflow;
unsigned Index = ArgIndex + DataIndex;
StringRef FunctionName = GetFunctionName();
DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall,
PDiag(DiagID) << FunctionName << (Index + 1)
<< DestSize << SourceSize);
};
StringRef FormatStrRef = Format->getString();
auto ShiftedComputeSizeArgument = [&](unsigned Index) {
return ComputeSizeArgument(Index + DataIndex);
};
ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose);
const char *FormatBytes = FormatStrRef.data();
const ConstantArrayType *T =
Context.getAsConstantArrayType(Format->getType());
assert(T && "String literal not of constant array type!");
size_t TypeSize = T->getSize().getZExtValue();
// In case there's a null byte somewhere.
size_t StrLen =
std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0));
analyze_format_string::ParseScanfString(H, FormatBytes,
FormatBytes + StrLen, getLangOpts(),
Context.getTargetInfo());
// Unlike the other cases, in this one we have already issued the diagnostic
// here, so no need to continue (because unlike the other cases, here the
// diagnostic refers to the argument number).
return;
}
case Builtin::BIsprintf:
case Builtin::BI__builtin___sprintf_chk: {
size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3;
auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) {
if (!Format->isAscii() && !Format->isUTF8())
return;
StringRef FormatStrRef = Format->getString();
EstimateSizeFormatHandler H(FormatStrRef);
const char *FormatBytes = FormatStrRef.data();
const ConstantArrayType *T =
Context.getAsConstantArrayType(Format->getType());
assert(T && "String literal not of constant array type!");
size_t TypeSize = T->getSize().getZExtValue();
// In case there's a null byte somewhere.
size_t StrLen =
std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0));
if (!analyze_format_string::ParsePrintfString(
H, FormatBytes, FormatBytes + StrLen, getLangOpts(),
Context.getTargetInfo(), false)) {
DiagID = diag::warn_fortify_source_format_overflow;
SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound())
.extOrTrunc(SizeTypeWidth);
if (BuiltinID == Builtin::BI__builtin___sprintf_chk) {
DestinationSize = ComputeExplicitObjectSizeArgument(2);
IsChkVariant = true;
} else {
DestinationSize = ComputeSizeArgument(0);
}
break;
}
}
return;
}
case Builtin::BI__builtin___memcpy_chk:
case Builtin::BI__builtin___memmove_chk:
case Builtin::BI__builtin___memset_chk:
case Builtin::BI__builtin___strlcat_chk:
case Builtin::BI__builtin___strlcpy_chk:
case Builtin::BI__builtin___strncat_chk:
case Builtin::BI__builtin___strncpy_chk:
case Builtin::BI__builtin___stpncpy_chk:
case Builtin::BI__builtin___memccpy_chk:
case Builtin::BI__builtin___mempcpy_chk: {
DiagID = diag::warn_builtin_chk_overflow;
SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2);
DestinationSize =
ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
IsChkVariant = true;
break;
}
case Builtin::BI__builtin___snprintf_chk:
case Builtin::BI__builtin___vsnprintf_chk: {
DiagID = diag::warn_builtin_chk_overflow;
SourceSize = ComputeExplicitObjectSizeArgument(1);
DestinationSize = ComputeExplicitObjectSizeArgument(3);
IsChkVariant = true;
break;
}
case Builtin::BIstrncat:
case Builtin::BI__builtin_strncat:
case Builtin::BIstrncpy:
case Builtin::BI__builtin_strncpy:
case Builtin::BIstpncpy:
case Builtin::BI__builtin_stpncpy: {
// Whether these functions overflow depends on the runtime strlen of the
// string, not just the buffer size, so emitting the "always overflow"
// diagnostic isn't quite right. We should still diagnose passing a buffer
// size larger than the destination buffer though; this is a runtime abort
// in _FORTIFY_SOURCE mode, and is quite suspicious otherwise.
DiagID = diag::warn_fortify_source_size_mismatch;
SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
DestinationSize = ComputeSizeArgument(0);
break;
}
case Builtin::BImemcpy:
case Builtin::BI__builtin_memcpy:
case Builtin::BImemmove:
case Builtin::BI__builtin_memmove:
case Builtin::BImemset:
case Builtin::BI__builtin_memset:
case Builtin::BImempcpy:
case Builtin::BI__builtin_mempcpy: {
DiagID = diag::warn_fortify_source_overflow;
SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
DestinationSize = ComputeSizeArgument(0);
break;
}
case Builtin::BIsnprintf:
case Builtin::BI__builtin_snprintf:
case Builtin::BIvsnprintf:
case Builtin::BI__builtin_vsnprintf: {
DiagID = diag::warn_fortify_source_size_mismatch;
SourceSize = ComputeExplicitObjectSizeArgument(1);
DestinationSize = ComputeSizeArgument(0);
break;
}
}
if (!SourceSize || !DestinationSize ||
SourceSize.getValue().ule(DestinationSize.getValue()))
return;
StringRef FunctionName = GetFunctionName();
SmallString<16> DestinationStr;
SmallString<16> SourceStr;
DestinationSize->toString(DestinationStr, /*Radix=*/10);
SourceSize->toString(SourceStr, /*Radix=*/10);
DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
PDiag(DiagID)
<< FunctionName << DestinationStr << SourceStr);
}
static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
Scope::ScopeFlags NeededScopeFlags,
unsigned DiagID) {
// Scopes aren't available during instantiation. Fortunately, builtin
// functions cannot be template args so they cannot be formed through template
// instantiation. Therefore checking once during the parse is sufficient.
if (SemaRef.inTemplateInstantiation())
return false;
Scope *S = SemaRef.getCurScope();
while (S && !S->isSEHExceptScope())
S = S->getParent();
if (!S || !(S->getFlags() & NeededScopeFlags)) {
auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
SemaRef.Diag(TheCall->getExprLoc(), DiagID)
<< DRE->getDecl()->getIdentifier();
return true;
}
return false;
}
static inline bool isBlockPointer(Expr *Arg) {
return Arg->getType()->isBlockPointerType();
}
/// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
/// void*, which is a requirement of device side enqueue.
static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
const BlockPointerType *BPT =
cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
ArrayRef<QualType> Params =
BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes();
unsigned ArgCounter = 0;
bool IllegalParams = false;
// Iterate through the block parameters until either one is found that is not
// a local void*, or the block is valid.
for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end();
I != E; ++I, ++ArgCounter) {
if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() ||
(*I)->getPointeeType().getQualifiers().getAddressSpace() !=
LangAS::opencl_local) {
// Get the location of the error. If a block literal has been passed
// (BlockExpr) then we can point straight to the offending argument,
// else we just point to the variable reference.
SourceLocation ErrorLoc;
if (isa<BlockExpr>(BlockArg)) {
BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl();
ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc();
} else if (isa<DeclRefExpr>(BlockArg)) {
ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc();
}
S.Diag(ErrorLoc,
diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
IllegalParams = true;
}
}
return IllegalParams;
}
static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts())) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
<< 1 << Call->getDirectCallee() << "cl_khr_subgroups";
return true;
}
return false;
}
static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
if (checkArgCount(S, TheCall, 2))
return true;
if (checkOpenCLSubgroupExt(S, TheCall))
return true;
// First argument is an ndrange_t type.
Expr *NDRangeArg = TheCall->getArg(0);
if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "'ndrange_t'";
return true;
}
Expr *BlockArg = TheCall->getArg(1);
if (!isBlockPointer(BlockArg)) {
S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "block";
return true;
}
return checkOpenCLBlockArgs(S, BlockArg);
}
/// OpenCL C v2.0, s6.13.17.6 - Check the argument to the
/// get_kernel_work_group_size
/// and get_kernel_preferred_work_group_size_multiple builtin functions.
static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) {
if (checkArgCount(S, TheCall, 1))
return true;
Expr *BlockArg = TheCall->getArg(0);
if (!isBlockPointer(BlockArg)) {
S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "block";
return true;
}
return checkOpenCLBlockArgs(S, BlockArg);
}
/// Diagnose integer type and any valid implicit conversion to it.
static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
const QualType &IntType);
static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall,
unsigned Start, unsigned End) {
bool IllegalParams = false;
for (unsigned I = Start; I <= End; ++I)
IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I),
S.Context.getSizeType());
return IllegalParams;
}
/// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
/// 'local void*' parameter of passed block.
static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall,
Expr *BlockArg,
unsigned NumNonVarArgs) {
const BlockPointerType *BPT =
cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
unsigned NumBlockParams =
BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams();
unsigned TotalNumArgs = TheCall->getNumArgs();
// For each argument passed to the block, a corresponding uint needs to
// be passed to describe the size of the local memory.
if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
S.Diag(TheCall->getBeginLoc(),
diag::err_opencl_enqueue_kernel_local_size_args);
return true;
}
// Check that the sizes of the local memory are specified by integers.
return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs,
TotalNumArgs - 1);
}
/// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different
/// overload formats specified in Table 6.13.17.1.
/// int enqueue_kernel(queue_t queue,
/// kernel_enqueue_flags_t flags,
/// const ndrange_t ndrange,
/// void (^block)(void))
/// int enqueue_kernel(queue_t queue,
/// kernel_enqueue_flags_t flags,
/// const ndrange_t ndrange,
/// uint num_events_in_wait_list,
/// clk_event_t *event_wait_list,
/// clk_event_t *event_ret,
/// void (^block)(void))
/// int enqueue_kernel(queue_t queue,
/// kernel_enqueue_flags_t flags,
/// const ndrange_t ndrange,
/// void (^block)(local void*, ...),
/// uint size0, ...)
/// int enqueue_kernel(queue_t queue,
/// kernel_enqueue_flags_t flags,
/// const ndrange_t ndrange,
/// uint num_events_in_wait_list,
/// clk_event_t *event_wait_list,
/// clk_event_t *event_ret,
/// void (^block)(local void*, ...),
/// uint size0, ...)
static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
unsigned NumArgs = TheCall->getNumArgs();
if (NumArgs < 4) {
S.Diag(TheCall->getBeginLoc(),
diag::err_typecheck_call_too_few_args_at_least)
<< 0 << 4 << NumArgs;
return true;
}
Expr *Arg0 = TheCall->getArg(0);
Expr *Arg1 = TheCall->getArg(1);
Expr *Arg2 = TheCall->getArg(2);
Expr *Arg3 = TheCall->getArg(3);
// First argument always needs to be a queue_t type.
if (!Arg0->getType()->isQueueT()) {
S.Diag(TheCall->getArg(0)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << S.Context.OCLQueueTy;
return true;
}
// Second argument always needs to be a kernel_enqueue_flags_t enum value.
if (!Arg1->getType()->isIntegerType()) {
S.Diag(TheCall->getArg(1)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
return true;
}
// Third argument is always an ndrange_t type.
if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
S.Diag(TheCall->getArg(2)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "'ndrange_t'";
return true;
}
// With four arguments, there is only one form that the function could be
// called in: no events and no variable arguments.
if (NumArgs == 4) {
// check that the last argument is the right block type.
if (!isBlockPointer(Arg3)) {
S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "block";
return true;
}
// we have a block type, check the prototype
const BlockPointerType *BPT =
cast<BlockPointerType>(Arg3->getType().getCanonicalType());
if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) {
S.Diag(Arg3->getBeginLoc(),
diag::err_opencl_enqueue_kernel_blocks_no_args);
return true;
}
return false;
}
// we can have block + varargs.
if (isBlockPointer(Arg3))
return (checkOpenCLBlockArgs(S, Arg3) ||
checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4));
// last two cases with either exactly 7 args or 7 args and varargs.
if (NumArgs >= 7) {
// check common block argument.
Expr *Arg6 = TheCall->getArg(6);
if (!isBlockPointer(Arg6)) {
S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "block";
return true;
}
if (checkOpenCLBlockArgs(S, Arg6))
return true;
// Forth argument has to be any integer type.
if (!Arg3->getType()->isIntegerType()) {
S.Diag(TheCall->getArg(3)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "integer";
return true;
}
// check remaining common arguments.
Expr *Arg4 = TheCall->getArg(4);
Expr *Arg5 = TheCall->getArg(5);
// Fifth argument is always passed as a pointer to clk_event_t.
if (!Arg4->isNullPointerConstant(S.Context,
Expr::NPC_ValueDependentIsNotNull) &&
!Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
S.Diag(TheCall->getArg(4)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee()
<< S.Context.getPointerType(S.Context.OCLClkEventTy);
return true;
}
// Sixth argument is always passed as a pointer to clk_event_t.
if (!Arg5->isNullPointerConstant(S.Context,
Expr::NPC_ValueDependentIsNotNull) &&
!(Arg5->getType()->isPointerType() &&
Arg5->getType()->getPointeeType()->isClkEventT())) {
S.Diag(TheCall->getArg(5)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee()
<< S.Context.getPointerType(S.Context.OCLClkEventTy);
return true;
}
if (NumArgs == 7)
return false;
return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7);
}
// None of the specific case has been detected, give generic error
S.Diag(TheCall->getBeginLoc(),
diag::err_opencl_enqueue_kernel_incorrect_args);
return true;
}
/// Returns OpenCL access qual.
static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) {
return D->getAttr<OpenCLAccessAttr>();
}
/// Returns true if pipe element type is different from the pointer.
static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
const Expr *Arg0 = Call->getArg(0);
// First argument type should always be pipe.
if (!Arg0->getType()->isPipeType()) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
<< Call->getDirectCallee() << Arg0->getSourceRange();
return true;
}
OpenCLAccessAttr *AccessQual =
getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl());
// Validates the access qualifier is compatible with the call.
// OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
// read_only and write_only, and assumed to be read_only if no qualifier is
// specified.
switch (Call->getDirectCallee()->getBuiltinID()) {
case Builtin::BIread_pipe:
case Builtin::BIreserve_read_pipe:
case Builtin::BIcommit_read_pipe:
case Builtin::BIwork_group_reserve_read_pipe:
case Builtin::BIsub_group_reserve_read_pipe:
case Builtin::BIwork_group_commit_read_pipe:
case Builtin::BIsub_group_commit_read_pipe:
if (!(!AccessQual || AccessQual->isReadOnly())) {
S.Diag(Arg0->getBeginLoc(),
diag::err_opencl_builtin_pipe_invalid_access_modifier)
<< "read_only" << Arg0->getSourceRange();
return true;
}
break;
case Builtin::BIwrite_pipe:
case Builtin::BIreserve_write_pipe:
case Builtin::BIcommit_write_pipe:
case Builtin::BIwork_group_reserve_write_pipe:
case Builtin::BIsub_group_reserve_write_pipe:
case Builtin::BIwork_group_commit_write_pipe:
case Builtin::BIsub_group_commit_write_pipe:
if (!(AccessQual && AccessQual->isWriteOnly())) {
S.Diag(Arg0->getBeginLoc(),
diag::err_opencl_builtin_pipe_invalid_access_modifier)
<< "write_only" << Arg0->getSourceRange();
return true;
}
break;
default:
break;
}
return false;
}
/// Returns true if pipe element type is different from the pointer.
static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
const Expr *Arg0 = Call->getArg(0);
const Expr *ArgIdx = Call->getArg(Idx);
const PipeType *PipeTy = cast<PipeType>(Arg0->getType());
const QualType EltTy = PipeTy->getElementType();
const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>();
// The Idx argument should be a pointer and the type of the pointer and
// the type of pipe element should also be the same.
if (!ArgTy ||
!S.Context.hasSameType(
EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
<< Call->getDirectCallee() << S.Context.getPointerType(EltTy)
<< ArgIdx->getType() << ArgIdx->getSourceRange();
return true;
}
return false;
}
// Performs semantic analysis for the read/write_pipe call.
// \param S Reference to the semantic analyzer.
// \param Call A pointer to the builtin call.
// \return True if a semantic error has been found, false otherwise.
static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
// OpenCL v2.0 s6.13.16.2 - The built-in read/write
// functions have two forms.
switch (Call->getNumArgs()) {
case 2:
if (checkOpenCLPipeArg(S, Call))
return true;
// The call with 2 arguments should be
// read/write_pipe(pipe T, T*).
// Check packet type T.
if (checkOpenCLPipePacketType(S, Call, 1))
return true;
break;
case 4: {
if (checkOpenCLPipeArg(S, Call))
return true;
// The call with 4 arguments should be
// read/write_pipe(pipe T, reserve_id_t, uint, T*).
// Check reserve_id_t.
if (!Call->getArg(1)->getType()->isReserveIDT()) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
<< Call->getDirectCallee() << S.Context.OCLReserveIDTy
<< Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
return true;
}
// Check the index.
const Expr *Arg2 = Call->getArg(2);
if (!Arg2->getType()->isIntegerType() &&
!Arg2->getType()->isUnsignedIntegerType()) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
<< Call->getDirectCallee() << S.Context.UnsignedIntTy
<< Arg2->getType() << Arg2->getSourceRange();
return true;
}
// Check packet type T.
if (checkOpenCLPipePacketType(S, Call, 3))
return true;
} break;
default:
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num)
<< Call->getDirectCallee() << Call->getSourceRange();
return true;
}
return false;
}
// Performs a semantic analysis on the {work_group_/sub_group_
// /_}reserve_{read/write}_pipe
// \param S Reference to the semantic analyzer.
// \param Call The call to the builtin function to be analyzed.
// \return True if a semantic error was found, false otherwise.
static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
if (checkArgCount(S, Call, 2))
return true;
if (checkOpenCLPipeArg(S, Call))
return true;
// Check the reserve size.
if (!Call->getArg(1)->getType()->isIntegerType() &&
!Call->getArg(1)->getType()->isUnsignedIntegerType()) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
<< Call->getDirectCallee() << S.Context.UnsignedIntTy
<< Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
return true;
}
// Since return type of reserve_read/write_pipe built-in function is
// reserve_id_t, which is not defined in the builtin def file , we used int
// as return type and need to override the return type of these functions.
Call->setType(S.Context.OCLReserveIDTy);
return false;
}
// Performs a semantic analysis on {work_group_/sub_group_
// /_}commit_{read/write}_pipe
// \param S Reference to the semantic analyzer.
// \param Call The call to the builtin function to be analyzed.
// \return True if a semantic error was found, false otherwise.
static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
if (checkArgCount(S, Call, 2))
return true;
if (checkOpenCLPipeArg(S, Call))
return true;
// Check reserve_id_t.
if (!Call->getArg(1)->getType()->isReserveIDT()) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
<< Call->getDirectCallee() << S.Context.OCLReserveIDTy
<< Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
return true;
}
return false;
}
// Performs a semantic analysis on the call to built-in Pipe
// Query Functions.
// \param S Reference to the semantic analyzer.
// \param Call The call to the builtin function to be analyzed.
// \return True if a semantic error was found, false otherwise.
static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
if (checkArgCount(S, Call, 1))
return true;
if (!Call->getArg(0)->getType()->isPipeType()) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
<< Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
return true;
}
return false;
}
// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
// Performs semantic analysis for the to_global/local/private call.
// \param S Reference to the semantic analyzer.
// \param BuiltinID ID of the builtin function.
// \param Call A pointer to the builtin call.
// \return True if a semantic error has been found, false otherwise.
static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
CallExpr *Call) {
if (checkArgCount(S, Call, 1))
return true;
auto RT = Call->getArg(0)->getType();
if (!RT->isPointerType() || RT->getPointeeType()
.getAddressSpace() == LangAS::opencl_constant) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg)
<< Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
return true;
}
if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) {
S.Diag(Call->getArg(0)->getBeginLoc(),
diag::warn_opencl_generic_address_space_arg)
<< Call->getDirectCallee()->getNameInfo().getAsString()
<< Call->getArg(0)->getSourceRange();
}
RT = RT->getPointeeType();
auto Qual = RT.getQualifiers();
switch (BuiltinID) {
case Builtin::BIto_global:
Qual.setAddressSpace(LangAS::opencl_global);
break;
case Builtin::BIto_local:
Qual.setAddressSpace(LangAS::opencl_local);
break;
case Builtin::BIto_private:
Qual.setAddressSpace(LangAS::opencl_private);
break;
default:
llvm_unreachable("Invalid builtin function");
}
Call->setType(S.Context.getPointerType(S.Context.getQualifiedType(
RT.getUnqualifiedType(), Qual)));
return false;
}
static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) {
if (checkArgCount(S, TheCall, 1))
return ExprError();
// Compute __builtin_launder's parameter type from the argument.
// The parameter type is:
// * The type of the argument if it's not an array or function type,
// Otherwise,
// * The decayed argument type.
QualType ParamTy = [&]() {
QualType ArgTy = TheCall->getArg(0)->getType();
if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe())
return S.Context.getPointerType(Ty->getElementType());
if (ArgTy->isFunctionType()) {
return S.Context.getPointerType(ArgTy);
}
return ArgTy;
}();
TheCall->setType(ParamTy);
auto DiagSelect = [&]() -> llvm::Optional<unsigned> {
if (!ParamTy->isPointerType())
return 0;
if (ParamTy->isFunctionPointerType())
return 1;
if (ParamTy->isVoidPointerType())
return 2;
return llvm::Optional<unsigned>{};
}();
if (DiagSelect.hasValue()) {
S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
<< DiagSelect.getValue() << TheCall->getSourceRange();
return ExprError();
}
// We either have an incomplete class type, or we have a class template
// whose instantiation has not been forced. Example:
//
// template <class T> struct Foo { T value; };
// Foo<int> *p = nullptr;
// auto *d = __builtin_launder(p);
if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(),
diag::err_incomplete_type))
return ExprError();
assert(ParamTy->getPointeeType()->isObjectType() &&
"Unhandled non-object pointer case");
InitializedEntity Entity =
InitializedEntity::InitializeParameter(S.Context, ParamTy, false);
ExprResult Arg =
S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0));
if (Arg.isInvalid())
return ExprError();
TheCall->setArg(0, Arg.get());
return TheCall;
}
// Emit an error and return true if the current architecture is not in the list
// of supported architectures.
static bool
CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
llvm::Triple::ArchType CurArch =
S.getASTContext().getTargetInfo().getTriple().getArch();
if (llvm::is_contained(SupportedArchs, CurArch))
return false;
S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
<< TheCall->getSourceRange();
return true;
}
static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr,
SourceLocation CallSiteLoc);
bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall) {
switch (TI.getTriple().getArch()) {
default:
// Some builtins don't require additional checking, so just consider these
// acceptable.
return false;
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_32:
case llvm::Triple::aarch64_be:
return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::bpfeb:
case llvm::Triple::bpfel:
return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall);
case llvm::Triple::hexagon:
return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall);
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::systemz:
return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
case llvm::Triple::x86:
case llvm::Triple::x86_64:
return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::ppc:
case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::amdgcn:
return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall);
}
}
ExprResult
Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
CallExpr *TheCall) {
ExprResult TheCallResult(TheCall);
// Find out if any arguments are required to be integer constant expressions.
unsigned ICEArguments = 0;
ASTContext::GetBuiltinTypeError Error;
Context.GetBuiltinType(BuiltinID, Error, &ICEArguments);
if (Error != ASTContext::GE_None)
ICEArguments = 0; // Don't diagnose previously diagnosed errors.
// If any arguments are required to be ICE's, check and diagnose.
for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
// Skip arguments not required to be ICE's.
if ((ICEArguments & (1 << ArgNo)) == 0) continue;
llvm::APSInt Result;
if (SemaBuiltinConstantArg(TheCall, ArgNo, Result))
return true;
ICEArguments &= ~(1 << ArgNo);
}
switch (BuiltinID) {
case Builtin::BI__builtin___CFStringMakeConstantString:
assert(TheCall->getNumArgs() == 1 &&
"Wrong # arguments to builtin CFStringMakeConstantString");
if (CheckObjCString(TheCall->getArg(0)))
return ExprError();
break;
case Builtin::BI__builtin_ms_va_start:
case Builtin::BI__builtin_stdarg_start:
case Builtin::BI__builtin_va_start:
if (SemaBuiltinVAStart(BuiltinID, TheCall))
return ExprError();
break;
case Builtin::BI__va_start: {
switch (Context.getTargetInfo().getTriple().getArch()) {
case llvm::Triple::aarch64:
case llvm::Triple::arm:
case llvm::Triple::thumb:
if (SemaBuiltinVAStartARMMicrosoft(TheCall))
return ExprError();
break;
default:
if (SemaBuiltinVAStart(BuiltinID, TheCall))
return ExprError();
break;
}
break;
}
// The acquire, release, and no fence variants are ARM and AArch64 only.
case Builtin::BI_interlockedbittestandset_acq:
case Builtin::BI_interlockedbittestandset_rel:
case Builtin::BI_interlockedbittestandset_nf:
case Builtin::BI_interlockedbittestandreset_acq:
case Builtin::BI_interlockedbittestandreset_rel:
case Builtin::BI_interlockedbittestandreset_nf:
if (CheckBuiltinTargetSupport(
*this, BuiltinID, TheCall,
{llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
return ExprError();
break;
// The 64-bit bittest variants are x64, ARM, and AArch64 only.
case Builtin::BI_bittest64:
case Builtin::BI_bittestandcomplement64:
case Builtin::BI_bittestandreset64:
case Builtin::BI_bittestandset64:
case Builtin::BI_interlockedbittestandreset64:
case Builtin::BI_interlockedbittestandset64:
if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall,
{llvm::Triple::x86_64, llvm::Triple::arm,
llvm::Triple::thumb, llvm::Triple::aarch64}))
return ExprError();
break;
case Builtin::BI__builtin_isgreater:
case Builtin::BI__builtin_isgreaterequal:
case Builtin::BI__builtin_isless:
case Builtin::BI__builtin_islessequal:
case Builtin::BI__builtin_islessgreater:
case Builtin::BI__builtin_isunordered:
if (SemaBuiltinUnorderedCompare(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_fpclassify:
if (SemaBuiltinFPClassification(TheCall, 6))
return ExprError();
break;
case Builtin::BI__builtin_isfinite:
case Builtin::BI__builtin_isinf:
case Builtin::BI__builtin_isinf_sign:
case Builtin::BI__builtin_isnan:
case Builtin::BI__builtin_isnormal:
case Builtin::BI__builtin_signbit:
case Builtin::BI__builtin_signbitf:
case Builtin::BI__builtin_signbitl:
if (SemaBuiltinFPClassification(TheCall, 1))
return ExprError();
break;
case Builtin::BI__builtin_shufflevector:
return SemaBuiltinShuffleVector(TheCall);
// TheCall will be freed by the smart pointer here, but that's fine, since
// SemaBuiltinShuffleVector guts it, but then doesn't release it.
case Builtin::BI__builtin_prefetch:
if (SemaBuiltinPrefetch(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_alloca_with_align:
if (SemaBuiltinAllocaWithAlign(TheCall))
return ExprError();
LLVM_FALLTHROUGH;
case Builtin::BI__builtin_alloca:
Diag(TheCall->getBeginLoc(), diag::warn_alloca)
<< TheCall->getDirectCallee();
break;
case Builtin::BI__arithmetic_fence:
if (SemaBuiltinArithmeticFence(TheCall))
return ExprError();
break;
case Builtin::BI__assume:
case Builtin::BI__builtin_assume:
if (SemaBuiltinAssume(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_assume_aligned:
if (SemaBuiltinAssumeAligned(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_dynamic_object_size:
case Builtin::BI__builtin_object_size:
if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3))
return ExprError();
break;
case Builtin::BI__builtin_longjmp:
if (SemaBuiltinLongjmp(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_setjmp:
if (SemaBuiltinSetjmp(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_classify_type:
if (checkArgCount(*this, TheCall, 1)) return true;
TheCall->setType(Context.IntTy);
break;
case Builtin::BI__builtin_complex:
if (SemaBuiltinComplex(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_constant_p: {
if (checkArgCount(*this, TheCall, 1)) return true;
ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
if (Arg.isInvalid()) return true;
TheCall->setArg(0, Arg.get());
TheCall->setType(Context.IntTy);
break;
}
case Builtin::BI__builtin_launder:
return SemaBuiltinLaunder(*this, TheCall);
case Builtin::BI__sync_fetch_and_add:
case Builtin::BI__sync_fetch_and_add_1:
case Builtin::BI__sync_fetch_and_add_2:
case Builtin::BI__sync_fetch_and_add_4:
case Builtin::BI__sync_fetch_and_add_8:
case Builtin::BI__sync_fetch_and_add_16:
case Builtin::BI__sync_fetch_and_sub:
case Builtin::BI__sync_fetch_and_sub_1:
case Builtin::BI__sync_fetch_and_sub_2:
case Builtin::BI__sync_fetch_and_sub_4:
case Builtin::BI__sync_fetch_and_sub_8:
case Builtin::BI__sync_fetch_and_sub_16:
case Builtin::BI__sync_fetch_and_or:
case Builtin::BI__sync_fetch_and_or_1:
case Builtin::BI__sync_fetch_and_or_2:
case Builtin::BI__sync_fetch_and_or_4:
case Builtin::BI__sync_fetch_and_or_8:
case Builtin::BI__sync_fetch_and_or_16:
case Builtin::BI__sync_fetch_and_and:
case Builtin::BI__sync_fetch_and_and_1:
case Builtin::BI__sync_fetch_and_and_2:
case Builtin::BI__sync_fetch_and_and_4:
case Builtin::BI__sync_fetch_and_and_8:
case Builtin::BI__sync_fetch_and_and_16:
case Builtin::BI__sync_fetch_and_xor:
case Builtin::BI__sync_fetch_and_xor_1:
case Builtin::BI__sync_fetch_and_xor_2:
case Builtin::BI__sync_fetch_and_xor_4:
case Builtin::BI__sync_fetch_and_xor_8:
case Builtin::BI__sync_fetch_and_xor_16:
case Builtin::BI__sync_fetch_and_nand:
case Builtin::BI__sync_fetch_and_nand_1:
case Builtin::BI__sync_fetch_and_nand_2:
case Builtin::BI__sync_fetch_and_nand_4:
case Builtin::BI__sync_fetch_and_nand_8:
case Builtin::BI__sync_fetch_and_nand_16:
case Builtin::BI__sync_add_and_fetch:
case Builtin::BI__sync_add_and_fetch_1:
case Builtin::BI__sync_add_and_fetch_2:
case Builtin::BI__sync_add_and_fetch_4:
case Builtin::BI__sync_add_and_fetch_8:
case Builtin::BI__sync_add_and_fetch_16:
case Builtin::BI__sync_sub_and_fetch:
case Builtin::BI__sync_sub_and_fetch_1:
case Builtin::BI__sync_sub_and_fetch_2:
case Builtin::BI__sync_sub_and_fetch_4:
case Builtin::BI__sync_sub_and_fetch_8:
case Builtin::BI__sync_sub_and_fetch_16:
case Builtin::BI__sync_and_and_fetch:
case Builtin::BI__sync_and_and_fetch_1:
case Builtin::BI__sync_and_and_fetch_2:
case Builtin::BI__sync_and_and_fetch_4:
case Builtin::BI__sync_and_and_fetch_8:
case Builtin::BI__sync_and_and_fetch_16:
case Builtin::BI__sync_or_and_fetch:
case Builtin::BI__sync_or_and_fetch_1:
case Builtin::BI__sync_or_and_fetch_2:
case Builtin::BI__sync_or_and_fetch_4:
case Builtin::BI__sync_or_and_fetch_8:
case Builtin::BI__sync_or_and_fetch_16:
case Builtin::BI__sync_xor_and_fetch:
case Builtin::BI__sync_xor_and_fetch_1:
case Builtin::BI__sync_xor_and_fetch_2:
case Builtin::BI__sync_xor_and_fetch_4:
case Builtin::BI__sync_xor_and_fetch_8:
case Builtin::BI__sync_xor_and_fetch_16:
case Builtin::BI__sync_nand_and_fetch:
case Builtin::BI__sync_nand_and_fetch_1:
case Builtin::BI__sync_nand_and_fetch_2:
case Builtin::BI__sync_nand_and_fetch_4:
case Builtin::BI__sync_nand_and_fetch_8:
case Builtin::BI__sync_nand_and_fetch_16:
case Builtin::BI__sync_val_compare_and_swap:
case Builtin::BI__sync_val_compare_and_swap_1:
case Builtin::BI__sync_val_compare_and_swap_2:
case Builtin::BI__sync_val_compare_and_swap_4:
case Builtin::BI__sync_val_compare_and_swap_8:
case Builtin::BI__sync_val_compare_and_swap_16:
case Builtin::BI__sync_bool_compare_and_swap:
case Builtin::BI__sync_bool_compare_and_swap_1:
case Builtin::BI__sync_bool_compare_and_swap_2:
case Builtin::BI__sync_bool_compare_and_swap_4:
case Builtin::BI__sync_bool_compare_and_swap_8:
case Builtin::BI__sync_bool_compare_and_swap_16:
case Builtin::BI__sync_lock_test_and_set:
case Builtin::BI__sync_lock_test_and_set_1:
case Builtin::BI__sync_lock_test_and_set_2:
case Builtin::BI__sync_lock_test_and_set_4:
case Builtin::BI__sync_lock_test_and_set_8:
case Builtin::BI__sync_lock_test_and_set_16:
case Builtin::BI__sync_lock_release:
case Builtin::BI__sync_lock_release_1:
case Builtin::BI__sync_lock_release_2:
case Builtin::BI__sync_lock_release_4:
case Builtin::BI__sync_lock_release_8:
case Builtin::BI__sync_lock_release_16:
case Builtin::BI__sync_swap:
case Builtin::BI__sync_swap_1:
case Builtin::BI__sync_swap_2:
case Builtin::BI__sync_swap_4:
case Builtin::BI__sync_swap_8:
case Builtin::BI__sync_swap_16:
return SemaBuiltinAtomicOverloaded(TheCallResult);
case Builtin::BI__sync_synchronize:
Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst)
<< TheCall->getCallee()->getSourceRange();
break;
case Builtin::BI__builtin_nontemporal_load:
case Builtin::BI__builtin_nontemporal_store:
return SemaBuiltinNontemporalOverloaded(TheCallResult);
case Builtin::BI__builtin_memcpy_inline: {
clang::Expr *SizeOp = TheCall->getArg(2);
// We warn about copying to or from `nullptr` pointers when `size` is
// greater than 0. When `size` is value dependent we cannot evaluate its
// value so we bail out.
if (SizeOp->isValueDependent())
break;
if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) {
CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc());
CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc());
}
break;
}
#define BUILTIN(ID, TYPE, ATTRS)
#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
case Builtin::BI##ID: \
return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
#include "clang/Basic/Builtins.def"
case Builtin::BI__annotation:
if (SemaBuiltinMSVCAnnotation(*this, TheCall))
return ExprError();
break;
case Builtin::BI__builtin_annotation:
if (SemaBuiltinAnnotation(*this, TheCall))
return ExprError();
break;
case Builtin::BI__builtin_addressof:
if (SemaBuiltinAddressof(*this, TheCall))
return ExprError();
break;
case Builtin::BI__builtin_is_aligned:
case Builtin::BI__builtin_align_up:
case Builtin::BI__builtin_align_down:
if (SemaBuiltinAlignment(*this, TheCall, BuiltinID))
return ExprError();
break;
case Builtin::BI__builtin_add_overflow:
case Builtin::BI__builtin_sub_overflow:
case Builtin::BI__builtin_mul_overflow:
if (SemaBuiltinOverflow(*this, TheCall, BuiltinID))
return ExprError();
break;
case Builtin::BI__builtin_operator_new:
case Builtin::BI__builtin_operator_delete: {
bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
ExprResult Res =
SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
if (Res.isInvalid())
CorrectDelayedTyposInExpr(TheCallResult.get());
return Res;
}
case Builtin::BI__builtin_dump_struct: {
// We first want to ensure we are called with 2 arguments
if (checkArgCount(*this, TheCall, 2))
return ExprError();
// Ensure that the first argument is of type 'struct XX *'
const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts();
const QualType PtrArgType = PtrArg->getType();
if (!PtrArgType->isPointerType() ||
!PtrArgType->getPointeeType()->isRecordType()) {
Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
<< PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType
<< "structure pointer";
return ExprError();
}
// Ensure that the second argument is of type 'FunctionType'
const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts();
const QualType FnPtrArgType = FnPtrArg->getType();
if (!FnPtrArgType->isPointerType()) {
Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
<< FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
<< FnPtrArgType << "'int (*)(const char *, ...)'";
return ExprError();
}
const auto *FuncType =
FnPtrArgType->getPointeeType()->getAs<FunctionType>();
if (!FuncType) {
Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
<< FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
<< FnPtrArgType << "'int (*)(const char *, ...)'";
return ExprError();
}
if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) {
if (!FT->getNumParams()) {
Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
<< FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
<< 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
return ExprError();
}
QualType PT = FT->getParamType(0);
if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy ||
!PT->isPointerType() || !PT->getPointeeType()->isCharType() ||
!PT->getPointeeType().isConstQualified()) {
Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
<< FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
<< 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
return ExprError();
}
}
TheCall->setType(Context.IntTy);
break;
}
case Builtin::BI__builtin_expect_with_probability: {
// We first want to ensure we are called with 3 arguments
if (checkArgCount(*this, TheCall, 3))
return ExprError();
// then check probability is constant float in range [0.0, 1.0]
const Expr *ProbArg = TheCall->getArg(2);
SmallVector<PartialDiagnosticAt, 8> Notes;
Expr::EvalResult Eval;
Eval.Diag = &Notes;
if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) ||
!Eval.Val.isFloat()) {
Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float)
<< ProbArg->getSourceRange();
for (const PartialDiagnosticAt &PDiag : Notes)
Diag(PDiag.first, PDiag.second);
return ExprError();
}
llvm::APFloat Probability = Eval.Val.getFloat();
bool LoseInfo = false;
Probability.convert(llvm::APFloat::IEEEdouble(),
llvm::RoundingMode::Dynamic, &LoseInfo);
if (!(Probability >= llvm::APFloat(0.0) &&
Probability <= llvm::APFloat(1.0))) {
Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range)
<< ProbArg->getSourceRange();
return ExprError();
}
break;
}
case Builtin::BI__builtin_preserve_access_index:
if (SemaBuiltinPreserveAI(*this, TheCall))
return ExprError();
break;
case Builtin::BI__builtin_call_with_static_chain:
if (SemaBuiltinCallWithStaticChain(*this, TheCall))
return ExprError();
break;
case Builtin::BI__exception_code:
case Builtin::BI_exception_code:
if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
diag::err_seh___except_block))
return ExprError();
break;
case Builtin::BI__exception_info:
case Builtin::BI_exception_info:
if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
diag::err_seh___except_filter))
return ExprError();
break;
case Builtin::BI__GetExceptionInfo:
if (checkArgCount(*this, TheCall, 1))
return ExprError();
if (CheckCXXThrowOperand(
TheCall->getBeginLoc(),
Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()),
TheCall))
return ExprError();
TheCall->setType(Context.VoidPtrTy);
break;
// OpenCL v2.0, s6.13.16 - Pipe functions
case Builtin::BIread_pipe:
case Builtin::BIwrite_pipe:
// Since those two functions are declared with var args, we need a semantic
// check for the argument.
if (SemaBuiltinRWPipe(*this, TheCall))
return ExprError();
break;
case Builtin::BIreserve_read_pipe:
case Builtin::BIreserve_write_pipe:
case Builtin::BIwork_group_reserve_read_pipe:
case Builtin::BIwork_group_reserve_write_pipe:
if (SemaBuiltinReserveRWPipe(*this, TheCall))
return ExprError();
break;
case Builtin::BIsub_group_reserve_read_pipe:
case Builtin::BIsub_group_reserve_write_pipe:
if (checkOpenCLSubgroupExt(*this, TheCall) ||
SemaBuiltinReserveRWPipe(*this, TheCall))
return ExprError();
break;
case Builtin::BIcommit_read_pipe:
case Builtin::BIcommit_write_pipe:
case Builtin::BIwork_group_commit_read_pipe:
case Builtin::BIwork_group_commit_write_pipe:
if (SemaBuiltinCommitRWPipe(*this, TheCall))
return ExprError();
break;
case Builtin::BIsub_group_commit_read_pipe:
case Builtin::BIsub_group_commit_write_pipe:
if (checkOpenCLSubgroupExt(*this, TheCall) ||
SemaBuiltinCommitRWPipe(*this, TheCall))
return ExprError();
break;
case Builtin::BIget_pipe_num_packets:
case Builtin::BIget_pipe_max_packets:
if (SemaBuiltinPipePackets(*this, TheCall))
return ExprError();
break;
case Builtin::BIto_global:
case Builtin::BIto_local:
case Builtin::BIto_private:
if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall))
return ExprError();
break;
// OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
case Builtin::BIenqueue_kernel:
if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall))
return ExprError();
break;
case Builtin::BIget_kernel_work_group_size:
case Builtin::BIget_kernel_preferred_work_group_size_multiple:
if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall))
return ExprError();
break;
case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
case Builtin::BIget_kernel_sub_group_count_for_ndrange:
if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall))
return ExprError();
break;
case Builtin::BI__builtin_os_log_format:
Cleanup.setExprNeedsCleanups(true);
LLVM_FALLTHROUGH;
case Builtin::BI__builtin_os_log_format_buffer_size:
if (SemaBuiltinOSLogFormat(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_frame_address:
case Builtin::BI__builtin_return_address: {
if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF))
return ExprError();
// -Wframe-address warning if non-zero passed to builtin
// return/frame address.
Expr::EvalResult Result;
if (!TheCall->getArg(0)->isValueDependent() &&
TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) &&
Result.Val.getInt() != 0)
Diag(TheCall->getBeginLoc(), diag::warn_frame_address)
<< ((BuiltinID == Builtin::BI__builtin_return_address)
? "__builtin_return_address"
: "__builtin_frame_address")
<< TheCall->getSourceRange();
break;
}
case Builtin::BI__builtin_elementwise_abs:
if (SemaBuiltinElementwiseMathOneArg(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_elementwise_min:
case Builtin::BI__builtin_elementwise_max:
if (SemaBuiltinElementwiseMath(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_reduce_max:
case Builtin::BI__builtin_reduce_min:
if (SemaBuiltinReduceMath(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_matrix_transpose:
return SemaBuiltinMatrixTranspose(TheCall, TheCallResult);
case Builtin::BI__builtin_matrix_column_major_load:
return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult);
case Builtin::BI__builtin_matrix_column_major_store:
return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult);
case Builtin::BI__builtin_get_device_side_mangled_name: {
auto Check = [](CallExpr *TheCall) {
if (TheCall->getNumArgs() != 1)
return false;
auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts());
if (!DRE)
return false;
auto *D = DRE->getDecl();
if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D))
return false;
return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() ||
D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>();
};
if (!Check(TheCall)) {
Diag(TheCall->getBeginLoc(),
diag::err_hip_invalid_args_builtin_mangled_name);
return ExprError();
}
}
}
// Since the target specific builtins for each arch overlap, only check those
// of the arch we are compiling for.
if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
assert(Context.getAuxTargetInfo() &&
"Aux Target Builtin, but not an aux target?");
if (CheckTSBuiltinFunctionCall(
*Context.getAuxTargetInfo(),
Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall))
return ExprError();
} else {
if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID,
TheCall))
return ExprError();
}
}
return TheCallResult;
}
// Get the valid immediate range for the specified NEON type code.
static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
NeonTypeFlags Type(t);
int IsQuad = ForceQuad ? true : Type.isQuad();
switch (Type.getEltType()) {
case NeonTypeFlags::Int8:
case NeonTypeFlags::Poly8:
return shift ? 7 : (8 << IsQuad) - 1;
case NeonTypeFlags::Int16:
case NeonTypeFlags::Poly16:
return shift ? 15 : (4 << IsQuad) - 1;
case NeonTypeFlags::Int32:
return shift ? 31 : (2 << IsQuad) - 1;
case NeonTypeFlags::Int64:
case NeonTypeFlags::Poly64:
return shift ? 63 : (1 << IsQuad) - 1;
case NeonTypeFlags::Poly128:
return shift ? 127 : (1 << IsQuad) - 1;
case NeonTypeFlags::Float16:
assert(!shift && "cannot shift float types!");
return (4 << IsQuad) - 1;
case NeonTypeFlags::Float32:
assert(!shift && "cannot shift float types!");
return (2 << IsQuad) - 1;
case NeonTypeFlags::Float64:
assert(!shift && "cannot shift float types!");
return (1 << IsQuad) - 1;
case NeonTypeFlags::BFloat16:
assert(!shift && "cannot shift float types!");
return (4 << IsQuad) - 1;
}
llvm_unreachable("Invalid NeonTypeFlag!");
}
/// getNeonEltType - Return the QualType corresponding to the elements of
/// the vector type specified by the NeonTypeFlags. This is used to check
/// the pointer arguments for Neon load/store intrinsics.
static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
bool IsPolyUnsigned, bool IsInt64Long) {
switch (Flags.getEltType()) {
case NeonTypeFlags::Int8:
return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
case NeonTypeFlags::Int16:
return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
case NeonTypeFlags::Int32:
return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
case NeonTypeFlags::Int64:
if (IsInt64Long)
return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
else
return Flags.isUnsigned() ? Context.UnsignedLongLongTy
: Context.LongLongTy;
case NeonTypeFlags::Poly8:
return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
case NeonTypeFlags::Poly16:
return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
case NeonTypeFlags::Poly64:
if (IsInt64Long)
return Context.UnsignedLongTy;
else
return Context.UnsignedLongLongTy;
case NeonTypeFlags::Poly128:
break;
case NeonTypeFlags::Float16:
return Context.HalfTy;
case NeonTypeFlags::Float32:
return Context.FloatTy;
case NeonTypeFlags::Float64:
return Context.DoubleTy;
case NeonTypeFlags::BFloat16:
return Context.BFloat16Ty;
}
llvm_unreachable("Invalid NeonTypeFlag!");
}
bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
// Range check SVE intrinsics that take immediate values.
SmallVector<std::tuple<int,int,int>, 3> ImmChecks;
switch (BuiltinID) {
default:
return false;
#define GET_SVE_IMMEDIATE_CHECK
#include "clang/Basic/arm_sve_sema_rangechecks.inc"
#undef GET_SVE_IMMEDIATE_CHECK
}
// Perform all the immediate checks for this builtin call.
bool HasError = false;
for (auto &I : ImmChecks) {
int ArgNum, CheckTy, ElementSizeInBits;
std::tie(ArgNum, CheckTy, ElementSizeInBits) = I;
typedef bool(*OptionSetCheckFnTy)(int64_t Value);
// Function that checks whether the operand (ArgNum) is an immediate
// that is one of the predefined values.
auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm,
int ErrDiag) -> bool {
// We can't check the value of a dependent argument.
Expr *Arg = TheCall->getArg(ArgNum);
if (Arg->isTypeDependent() || Arg->isValueDependent())
return false;
// Check constant-ness first.
llvm::APSInt Imm;
if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm))
return true;
if (!CheckImm(Imm.getSExtValue()))
return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange();
return false;
};
switch ((SVETypeFlags::ImmCheckType)CheckTy) {
case SVETypeFlags::ImmCheck0_31:
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31))
HasError = true;
break;
case SVETypeFlags::ImmCheck0_13:
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13))
HasError = true;
break;
case SVETypeFlags::ImmCheck1_16:
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16))
HasError = true;
break;
case SVETypeFlags::ImmCheck0_7:
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7))
HasError = true;
break;
case SVETypeFlags::ImmCheckExtract:
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
(2048 / ElementSizeInBits) - 1))
HasError = true;
break;
case SVETypeFlags::ImmCheckShiftRight:
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits))
HasError = true;
break;
case SVETypeFlags::ImmCheckShiftRightNarrow:
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1,
ElementSizeInBits / 2))
HasError = true;
break;
case SVETypeFlags::ImmCheckShiftLeft:
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
ElementSizeInBits - 1))
HasError = true;
break;
case SVETypeFlags::ImmCheckLaneIndex:
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
(128 / (1 * ElementSizeInBits)) - 1))
HasError = true;
break;
case SVETypeFlags::ImmCheckLaneIndexCompRotate:
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
(128 / (2 * ElementSizeInBits)) - 1))
HasError = true;
break;
case SVETypeFlags::ImmCheckLaneIndexDot:
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
(128 / (4 * ElementSizeInBits)) - 1))
HasError = true;
break;
case SVETypeFlags::ImmCheckComplexRot90_270:
if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; },
diag::err_rotation_argument_to_cadd))
HasError = true;
break;
case SVETypeFlags::ImmCheckComplexRotAll90:
if (CheckImmediateInSet(
[](int64_t V) {
return V == 0 || V == 90 || V == 180 || V == 270;
},
diag::err_rotation_argument_to_cmla))
HasError = true;
break;
case SVETypeFlags::ImmCheck0_1:
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1))
HasError = true;
break;
case SVETypeFlags::ImmCheck0_2:
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2))
HasError = true;
break;
case SVETypeFlags::ImmCheck0_3:
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3))
HasError = true;
break;
}
}
return HasError;
}
bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
unsigned BuiltinID, CallExpr *TheCall) {
llvm::APSInt Result;
uint64_t mask = 0;
unsigned TV = 0;
int PtrArgNum = -1;
bool HasConstPtr = false;
switch (BuiltinID) {
#define GET_NEON_OVERLOAD_CHECK
#include "clang/Basic/arm_neon.inc"
#include "clang/Basic/arm_fp16.inc"
#undef GET_NEON_OVERLOAD_CHECK
}
// For NEON intrinsics which are overloaded on vector element type, validate
// the immediate which specifies which variant to emit.
unsigned ImmArg = TheCall->getNumArgs()-1;
if (mask) {
if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
return true;
TV = Result.getLimitedValue(64);
if ((TV > 63) || (mask & (1ULL << TV)) == 0)
return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
<< TheCall->getArg(ImmArg)->getSourceRange();
}
if (PtrArgNum >= 0) {
// Check that pointer arguments have the specified type.
Expr *Arg = TheCall->getArg(PtrArgNum);
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
Arg = ICE->getSubExpr();
ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
QualType RHSTy = RHS.get()->getType();
llvm::Triple::ArchType Arch = TI.getTriple().getArch();
bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
Arch == llvm::Triple::aarch64_32 ||
Arch == llvm::Triple::aarch64_be;
bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
QualType EltTy =
getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
if (HasConstPtr)
EltTy = EltTy.withConst();
QualType LHSTy = Context.getPointerType(EltTy);
AssignConvertType ConvTy;
ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
if (RHS.isInvalid())
return true;
if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy,
RHS.get(), AA_Assigning))
return true;
}
// For NEON intrinsics which take an immediate value as part of the
// instruction, range check them here.
unsigned i = 0, l = 0, u = 0;
switch (BuiltinID) {
default:
return false;
#define GET_NEON_IMMEDIATE_CHECK
#include "clang/Basic/arm_neon.inc"
#include "clang/Basic/arm_fp16.inc"
#undef GET_NEON_IMMEDIATE_CHECK
}
return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
}
bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
switch (BuiltinID) {
default:
return false;
#include "clang/Basic/arm_mve_builtin_sema.inc"
}
}
bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall) {
bool Err = false;
switch (BuiltinID) {
default:
return false;
#include "clang/Basic/arm_cde_builtin_sema.inc"
}
if (Err)
return true;
return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true);
}
bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI,
const Expr *CoprocArg, bool WantCDE) {
if (isConstantEvaluated())
return false;
// We can't check the value of a dependent argument.
if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
return false;
llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context);
int64_t CoprocNo = CoprocNoAP.getExtValue();
assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
if (IsCDECoproc != WantCDE)
return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc)
<< (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
return false;
}
bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth) {
assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex ||
BuiltinID == ARM::BI__builtin_arm_strex ||
BuiltinID == ARM::BI__builtin_arm_stlex ||
BuiltinID == AArch64::BI__builtin_arm_ldrex ||
BuiltinID == AArch64::BI__builtin_arm_ldaex ||
BuiltinID == AArch64::BI__builtin_arm_strex ||
BuiltinID == AArch64::BI__builtin_arm_stlex) &&
"unexpected ARM builtin");
bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex ||
BuiltinID == AArch64::BI__builtin_arm_ldrex ||
BuiltinID == AArch64::BI__builtin_arm_ldaex;
DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
// Ensure that we have the proper number of arguments.
if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2))
return true;
// Inspect the pointer argument of the atomic builtin. This should always be
// a pointer type, whose element is an integral scalar or pointer type.
// Because it is a pointer type, we don't have to worry about any implicit
// casts here.
Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg);
if (PointerArgRes.isInvalid())
return true;
PointerArg = PointerArgRes.get();
const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
if (!pointerType) {
Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
<< PointerArg->getType() << PointerArg->getSourceRange();
return true;
}
// ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
// task is to insert the appropriate casts into the AST. First work out just
// what the appropriate type is.
QualType ValType = pointerType->getPointeeType();
QualType AddrType = ValType.getUnqualifiedType().withVolatile();
if (IsLdrex)
AddrType.addConst();
// Issue a warning if the cast is dodgy.
CastKind CastNeeded = CK_NoOp;
if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
CastNeeded = CK_BitCast;
Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
<< PointerArg->getType() << Context.getPointerType(AddrType)
<< AA_Passing << PointerArg->getSourceRange();
}
// Finally, do the cast and replace the argument with the corrected version.
AddrType = Context.getPointerType(AddrType);
PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded);
if (PointerArgRes.isInvalid())
return true;
PointerArg = PointerArgRes.get();
TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
// In general, we allow ints, floats and pointers to be loaded and stored.
if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
!ValType->isBlockPointerType() && !ValType->isFloatingType()) {
Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
<< PointerArg->getType() << PointerArg->getSourceRange();
return true;
}
// But ARM doesn't have instructions to deal with 128-bit versions.
if (Context.getTypeSize(ValType) > MaxWidth) {
assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
<< PointerArg->getType() << PointerArg->getSourceRange();
return true;
}
switch (ValType.getObjCLifetime()) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
// okay
break;
case Qualifiers::OCL_Weak:
case Qualifiers::OCL_Strong:
case Qualifiers::OCL_Autoreleasing:
Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
<< ValType << PointerArg->getSourceRange();
return true;
}
if (IsLdrex) {
TheCall->setType(ValType);
return false;
}
// Initialize the argument to be stored.
ExprResult ValArg = TheCall->getArg(0);
InitializedEntity Entity = InitializedEntity::InitializeParameter(
Context, ValType, /*consume*/ false);
ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
if (ValArg.isInvalid())
return true;
TheCall->setArg(0, ValArg.get());
// __builtin_arm_strex always returns an int. It's marked as such in the .def,
// but the custom checker bypasses all default analysis.
TheCall->setType(Context.IntTy);
return false;
}
bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall) {
if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex ||
BuiltinID == ARM::BI__builtin_arm_strex ||
BuiltinID == ARM::BI__builtin_arm_stlex) {
return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
}
if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
}
if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
BuiltinID == ARM::BI__builtin_arm_wsr64)
return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
if (BuiltinID == ARM::BI__builtin_arm_rsr ||
BuiltinID == ARM::BI__builtin_arm_rsrp ||
BuiltinID == ARM::BI__builtin_arm_wsr ||
BuiltinID == ARM::BI__builtin_arm_wsrp)
return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
return true;
if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
return true;
if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
return true;
// For intrinsics which take an immediate value as part of the instruction,
// range check them here.
// FIXME: VFP Intrinsics should error if VFP not present.
switch (BuiltinID) {
default: return false;
case ARM::BI__builtin_arm_ssat:
return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32);
case ARM::BI__builtin_arm_usat:
return SemaBuiltinConstantArgRange</