| //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// |
| // |
| // The LLVM Compiler Infrastructure |
| // |
| // This file is distributed under the University of Illinois Open Source |
| // License. See LICENSE.TXT for details. |
| // |
| //===----------------------------------------------------------------------===// |
| // |
| // This file implements extra semantic analysis beyond what is enforced |
| // by the C type system. |
| // |
| //===----------------------------------------------------------------------===// |
| |
| #include "clang/AST/APValue.h" |
| #include "clang/AST/ASTContext.h" |
| #include "clang/AST/Attr.h" |
| #include "clang/AST/AttrIterator.h" |
| #include "clang/AST/CharUnits.h" |
| #include "clang/AST/Decl.h" |
| #include "clang/AST/DeclBase.h" |
| #include "clang/AST/DeclCXX.h" |
| #include "clang/AST/DeclObjC.h" |
| #include "clang/AST/DeclarationName.h" |
| #include "clang/AST/EvaluatedExprVisitor.h" |
| #include "clang/AST/Expr.h" |
| #include "clang/AST/ExprCXX.h" |
| #include "clang/AST/ExprObjC.h" |
| #include "clang/AST/ExprOpenMP.h" |
| #include "clang/AST/NSAPI.h" |
| #include "clang/AST/OperationKinds.h" |
| #include "clang/AST/Stmt.h" |
| #include "clang/AST/TemplateBase.h" |
| #include "clang/AST/Type.h" |
| #include "clang/AST/TypeLoc.h" |
| #include "clang/AST/UnresolvedSet.h" |
| #include "clang/Analysis/Analyses/FormatString.h" |
| #include "clang/Basic/AddressSpaces.h" |
| #include "clang/Basic/CharInfo.h" |
| #include "clang/Basic/Diagnostic.h" |
| #include "clang/Basic/IdentifierTable.h" |
| #include "clang/Basic/LLVM.h" |
| #include "clang/Basic/LangOptions.h" |
| #include "clang/Basic/OpenCLOptions.h" |
| #include "clang/Basic/OperatorKinds.h" |
| #include "clang/Basic/PartialDiagnostic.h" |
| #include "clang/Basic/SourceLocation.h" |
| #include "clang/Basic/SourceManager.h" |
| #include "clang/Basic/Specifiers.h" |
| #include "clang/Basic/SyncScope.h" |
| #include "clang/Basic/TargetBuiltins.h" |
| #include "clang/Basic/TargetCXXABI.h" |
| #include "clang/Basic/TargetInfo.h" |
| #include "clang/Basic/TypeTraits.h" |
| #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. |
| #include "clang/Sema/Initialization.h" |
| #include "clang/Sema/Lookup.h" |
| #include "clang/Sema/Ownership.h" |
| #include "clang/Sema/Scope.h" |
| #include "clang/Sema/ScopeInfo.h" |
| #include "clang/Sema/Sema.h" |
| #include "clang/Sema/SemaInternal.h" |
| #include "llvm/ADT/APFloat.h" |
| #include "llvm/ADT/APInt.h" |
| #include "llvm/ADT/APSInt.h" |
| #include "llvm/ADT/ArrayRef.h" |
| #include "llvm/ADT/DenseMap.h" |
| #include "llvm/ADT/FoldingSet.h" |
| #include "llvm/ADT/None.h" |
| #include "llvm/ADT/Optional.h" |
| #include "llvm/ADT/STLExtras.h" |
| #include "llvm/ADT/SmallBitVector.h" |
| #include "llvm/ADT/SmallPtrSet.h" |
| #include "llvm/ADT/SmallString.h" |
| #include "llvm/ADT/SmallVector.h" |
| #include "llvm/ADT/StringRef.h" |
| #include "llvm/ADT/StringSwitch.h" |
| #include "llvm/ADT/Triple.h" |
| #include "llvm/Support/AtomicOrdering.h" |
| #include "llvm/Support/Casting.h" |
| #include "llvm/Support/Compiler.h" |
| #include "llvm/Support/ConvertUTF.h" |
| #include "llvm/Support/ErrorHandling.h" |
| #include "llvm/Support/Format.h" |
| #include "llvm/Support/Locale.h" |
| #include "llvm/Support/MathExtras.h" |
| #include "llvm/Support/raw_ostream.h" |
| #include <algorithm> |
| #include <cassert> |
| #include <cstddef> |
| #include <cstdint> |
| #include <functional> |
| #include <limits> |
| #include <string> |
| #include <tuple> |
| #include <utility> |
| |
| using namespace clang; |
| using namespace sema; |
| |
| SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, |
| unsigned ByteNo) const { |
| return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, |
| Context.getTargetInfo()); |
| } |
| |
| /// Checks that a call expression's argument count is the desired number. |
| /// This is useful when doing custom type-checking. Returns true on error. |
| static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { |
| unsigned argCount = call->getNumArgs(); |
| if (argCount == desiredArgCount) return false; |
| |
| if (argCount < desiredArgCount) |
| return S.Diag(call->getLocEnd(), diag::err_typecheck_call_too_few_args) |
| << 0 /*function call*/ << desiredArgCount << argCount |
| << call->getSourceRange(); |
| |
| // Highlight all the excess arguments. |
| SourceRange range(call->getArg(desiredArgCount)->getLocStart(), |
| call->getArg(argCount - 1)->getLocEnd()); |
| |
| return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) |
| << 0 /*function call*/ << desiredArgCount << argCount |
| << call->getArg(1)->getSourceRange(); |
| } |
| |
| /// Check that the first argument to __builtin_annotation is an integer |
| /// and the second argument is a non-wide string literal. |
| static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { |
| if (checkArgCount(S, TheCall, 2)) |
| return true; |
| |
| // First argument should be an integer. |
| Expr *ValArg = TheCall->getArg(0); |
| QualType Ty = ValArg->getType(); |
| if (!Ty->isIntegerType()) { |
| S.Diag(ValArg->getLocStart(), diag::err_builtin_annotation_first_arg) |
| << ValArg->getSourceRange(); |
| return true; |
| } |
| |
| // Second argument should be a constant string. |
| Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); |
| StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); |
| if (!Literal || !Literal->isAscii()) { |
| S.Diag(StrArg->getLocStart(), diag::err_builtin_annotation_second_arg) |
| << StrArg->getSourceRange(); |
| return true; |
| } |
| |
| TheCall->setType(Ty); |
| return false; |
| } |
| |
| static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { |
| // We need at least one argument. |
| if (TheCall->getNumArgs() < 1) { |
| S.Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least) |
| << 0 << 1 << TheCall->getNumArgs() |
| << TheCall->getCallee()->getSourceRange(); |
| return true; |
| } |
| |
| // All arguments should be wide string literals. |
| for (Expr *Arg : TheCall->arguments()) { |
| auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); |
| if (!Literal || !Literal->isWide()) { |
| S.Diag(Arg->getLocStart(), diag::err_msvc_annotation_wide_str) |
| << Arg->getSourceRange(); |
| return true; |
| } |
| } |
| |
| return false; |
| } |
| |
| /// Check that the argument to __builtin_addressof is a glvalue, and set the |
| /// result type to the corresponding pointer type. |
| static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { |
| if (checkArgCount(S, TheCall, 1)) |
| return true; |
| |
| ExprResult Arg(TheCall->getArg(0)); |
| QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getLocStart()); |
| if (ResultType.isNull()) |
| return true; |
| |
| TheCall->setArg(0, Arg.get()); |
| TheCall->setType(ResultType); |
| return false; |
| } |
| |
| static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) { |
| if (checkArgCount(S, TheCall, 3)) |
| return true; |
| |
| // First two arguments should be integers. |
| for (unsigned I = 0; I < 2; ++I) { |
| Expr *Arg = TheCall->getArg(I); |
| QualType Ty = Arg->getType(); |
| if (!Ty->isIntegerType()) { |
| S.Diag(Arg->getLocStart(), diag::err_overflow_builtin_must_be_int) |
| << Ty << Arg->getSourceRange(); |
| return true; |
| } |
| } |
| |
| // Third argument should be a pointer to a non-const integer. |
| // IRGen correctly handles volatile, restrict, and address spaces, and |
| // the other qualifiers aren't possible. |
| { |
| Expr *Arg = TheCall->getArg(2); |
| QualType Ty = Arg->getType(); |
| const auto *PtrTy = Ty->getAs<PointerType>(); |
| if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() && |
| !PtrTy->getPointeeType().isConstQualified())) { |
| S.Diag(Arg->getLocStart(), diag::err_overflow_builtin_must_be_ptr_int) |
| << Ty << Arg->getSourceRange(); |
| return true; |
| } |
| } |
| |
| return false; |
| } |
| |
| static void SemaBuiltinMemChkCall(Sema &S, FunctionDecl *FDecl, |
| CallExpr *TheCall, unsigned SizeIdx, |
| unsigned DstSizeIdx) { |
| if (TheCall->getNumArgs() <= SizeIdx || |
| TheCall->getNumArgs() <= DstSizeIdx) |
| return; |
| |
| const Expr *SizeArg = TheCall->getArg(SizeIdx); |
| const Expr *DstSizeArg = TheCall->getArg(DstSizeIdx); |
| |
| llvm::APSInt Size, DstSize; |
| |
| // find out if both sizes are known at compile time |
| if (!SizeArg->EvaluateAsInt(Size, S.Context) || |
| !DstSizeArg->EvaluateAsInt(DstSize, S.Context)) |
| return; |
| |
| if (Size.ule(DstSize)) |
| return; |
| |
| // confirmed overflow so generate the diagnostic. |
| IdentifierInfo *FnName = FDecl->getIdentifier(); |
| SourceLocation SL = TheCall->getLocStart(); |
| SourceRange SR = TheCall->getSourceRange(); |
| |
| S.Diag(SL, diag::warn_memcpy_chk_overflow) << SR << FnName; |
| } |
| |
| static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { |
| if (checkArgCount(S, BuiltinCall, 2)) |
| return true; |
| |
| SourceLocation BuiltinLoc = BuiltinCall->getLocStart(); |
| Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); |
| Expr *Call = BuiltinCall->getArg(0); |
| Expr *Chain = BuiltinCall->getArg(1); |
| |
| if (Call->getStmtClass() != Stmt::CallExprClass) { |
| S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) |
| << Call->getSourceRange(); |
| return true; |
| } |
| |
| auto CE = cast<CallExpr>(Call); |
| if (CE->getCallee()->getType()->isBlockPointerType()) { |
| S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) |
| << Call->getSourceRange(); |
| return true; |
| } |
| |
| const Decl *TargetDecl = CE->getCalleeDecl(); |
| if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) |
| if (FD->getBuiltinID()) { |
| S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) |
| << Call->getSourceRange(); |
| return true; |
| } |
| |
| if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { |
| S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) |
| << Call->getSourceRange(); |
| return true; |
| } |
| |
| ExprResult ChainResult = S.UsualUnaryConversions(Chain); |
| if (ChainResult.isInvalid()) |
| return true; |
| if (!ChainResult.get()->getType()->isPointerType()) { |
| S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) |
| << Chain->getSourceRange(); |
| return true; |
| } |
| |
| QualType ReturnTy = CE->getCallReturnType(S.Context); |
| QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; |
| QualType BuiltinTy = S.Context.getFunctionType( |
| ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); |
| QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); |
| |
| Builtin = |
| S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); |
| |
| BuiltinCall->setType(CE->getType()); |
| BuiltinCall->setValueKind(CE->getValueKind()); |
| BuiltinCall->setObjectKind(CE->getObjectKind()); |
| BuiltinCall->setCallee(Builtin); |
| BuiltinCall->setArg(1, ChainResult.get()); |
| |
| return false; |
| } |
| |
| static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, |
| Scope::ScopeFlags NeededScopeFlags, |
| unsigned DiagID) { |
| // Scopes aren't available during instantiation. Fortunately, builtin |
| // functions cannot be template args so they cannot be formed through template |
| // instantiation. Therefore checking once during the parse is sufficient. |
| if (SemaRef.inTemplateInstantiation()) |
| return false; |
| |
| Scope *S = SemaRef.getCurScope(); |
| while (S && !S->isSEHExceptScope()) |
| S = S->getParent(); |
| if (!S || !(S->getFlags() & NeededScopeFlags)) { |
| auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); |
| SemaRef.Diag(TheCall->getExprLoc(), DiagID) |
| << DRE->getDecl()->getIdentifier(); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| static inline bool isBlockPointer(Expr *Arg) { |
| return Arg->getType()->isBlockPointerType(); |
| } |
| |
| /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local |
| /// void*, which is a requirement of device side enqueue. |
| static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { |
| const BlockPointerType *BPT = |
| cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); |
| ArrayRef<QualType> Params = |
| BPT->getPointeeType()->getAs<FunctionProtoType>()->getParamTypes(); |
| unsigned ArgCounter = 0; |
| bool IllegalParams = false; |
| // Iterate through the block parameters until either one is found that is not |
| // a local void*, or the block is valid. |
| for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); |
| I != E; ++I, ++ArgCounter) { |
| if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || |
| (*I)->getPointeeType().getQualifiers().getAddressSpace() != |
| LangAS::opencl_local) { |
| // Get the location of the error. If a block literal has been passed |
| // (BlockExpr) then we can point straight to the offending argument, |
| // else we just point to the variable reference. |
| SourceLocation ErrorLoc; |
| if (isa<BlockExpr>(BlockArg)) { |
| BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); |
| ErrorLoc = BD->getParamDecl(ArgCounter)->getLocStart(); |
| } else if (isa<DeclRefExpr>(BlockArg)) { |
| ErrorLoc = cast<DeclRefExpr>(BlockArg)->getLocStart(); |
| } |
| S.Diag(ErrorLoc, |
| diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); |
| IllegalParams = true; |
| } |
| } |
| |
| return IllegalParams; |
| } |
| |
| static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { |
| if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) { |
| S.Diag(Call->getLocStart(), diag::err_opencl_requires_extension) |
| << 1 << Call->getDirectCallee() << "cl_khr_subgroups"; |
| return true; |
| } |
| return false; |
| } |
| |
| static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { |
| if (checkArgCount(S, TheCall, 2)) |
| return true; |
| |
| if (checkOpenCLSubgroupExt(S, TheCall)) |
| return true; |
| |
| // First argument is an ndrange_t type. |
| Expr *NDRangeArg = TheCall->getArg(0); |
| if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { |
| S.Diag(NDRangeArg->getLocStart(), |
| diag::err_opencl_builtin_expected_type) |
| << TheCall->getDirectCallee() << "'ndrange_t'"; |
| return true; |
| } |
| |
| Expr *BlockArg = TheCall->getArg(1); |
| if (!isBlockPointer(BlockArg)) { |
| S.Diag(BlockArg->getLocStart(), |
| diag::err_opencl_builtin_expected_type) |
| << TheCall->getDirectCallee() << "block"; |
| return true; |
| } |
| return checkOpenCLBlockArgs(S, BlockArg); |
| } |
| |
| /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the |
| /// get_kernel_work_group_size |
| /// and get_kernel_preferred_work_group_size_multiple builtin functions. |
| static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { |
| if (checkArgCount(S, TheCall, 1)) |
| return true; |
| |
| Expr *BlockArg = TheCall->getArg(0); |
| if (!isBlockPointer(BlockArg)) { |
| S.Diag(BlockArg->getLocStart(), |
| diag::err_opencl_builtin_expected_type) |
| << TheCall->getDirectCallee() << "block"; |
| return true; |
| } |
| return checkOpenCLBlockArgs(S, BlockArg); |
| } |
| |
| /// Diagnose integer type and any valid implicit conversion to it. |
| static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, |
| const QualType &IntType); |
| |
| static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, |
| unsigned Start, unsigned End) { |
| bool IllegalParams = false; |
| for (unsigned I = Start; I <= End; ++I) |
| IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), |
| S.Context.getSizeType()); |
| return IllegalParams; |
| } |
| |
| /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all |
| /// 'local void*' parameter of passed block. |
| static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, |
| Expr *BlockArg, |
| unsigned NumNonVarArgs) { |
| const BlockPointerType *BPT = |
| cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); |
| unsigned NumBlockParams = |
| BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams(); |
| unsigned TotalNumArgs = TheCall->getNumArgs(); |
| |
| // For each argument passed to the block, a corresponding uint needs to |
| // be passed to describe the size of the local memory. |
| if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { |
| S.Diag(TheCall->getLocStart(), |
| diag::err_opencl_enqueue_kernel_local_size_args); |
| return true; |
| } |
| |
| // Check that the sizes of the local memory are specified by integers. |
| return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, |
| TotalNumArgs - 1); |
| } |
| |
| /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different |
| /// overload formats specified in Table 6.13.17.1. |
| /// int enqueue_kernel(queue_t queue, |
| /// kernel_enqueue_flags_t flags, |
| /// const ndrange_t ndrange, |
| /// void (^block)(void)) |
| /// int enqueue_kernel(queue_t queue, |
| /// kernel_enqueue_flags_t flags, |
| /// const ndrange_t ndrange, |
| /// uint num_events_in_wait_list, |
| /// clk_event_t *event_wait_list, |
| /// clk_event_t *event_ret, |
| /// void (^block)(void)) |
| /// int enqueue_kernel(queue_t queue, |
| /// kernel_enqueue_flags_t flags, |
| /// const ndrange_t ndrange, |
| /// void (^block)(local void*, ...), |
| /// uint size0, ...) |
| /// int enqueue_kernel(queue_t queue, |
| /// kernel_enqueue_flags_t flags, |
| /// const ndrange_t ndrange, |
| /// uint num_events_in_wait_list, |
| /// clk_event_t *event_wait_list, |
| /// clk_event_t *event_ret, |
| /// void (^block)(local void*, ...), |
| /// uint size0, ...) |
| static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { |
| unsigned NumArgs = TheCall->getNumArgs(); |
| |
| if (NumArgs < 4) { |
| S.Diag(TheCall->getLocStart(), diag::err_typecheck_call_too_few_args); |
| return true; |
| } |
| |
| Expr *Arg0 = TheCall->getArg(0); |
| Expr *Arg1 = TheCall->getArg(1); |
| Expr *Arg2 = TheCall->getArg(2); |
| Expr *Arg3 = TheCall->getArg(3); |
| |
| // First argument always needs to be a queue_t type. |
| if (!Arg0->getType()->isQueueT()) { |
| S.Diag(TheCall->getArg(0)->getLocStart(), |
| diag::err_opencl_builtin_expected_type) |
| << TheCall->getDirectCallee() << S.Context.OCLQueueTy; |
| return true; |
| } |
| |
| // Second argument always needs to be a kernel_enqueue_flags_t enum value. |
| if (!Arg1->getType()->isIntegerType()) { |
| S.Diag(TheCall->getArg(1)->getLocStart(), |
| diag::err_opencl_builtin_expected_type) |
| << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; |
| return true; |
| } |
| |
| // Third argument is always an ndrange_t type. |
| if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { |
| S.Diag(TheCall->getArg(2)->getLocStart(), |
| diag::err_opencl_builtin_expected_type) |
| << TheCall->getDirectCallee() << "'ndrange_t'"; |
| return true; |
| } |
| |
| // With four arguments, there is only one form that the function could be |
| // called in: no events and no variable arguments. |
| if (NumArgs == 4) { |
| // check that the last argument is the right block type. |
| if (!isBlockPointer(Arg3)) { |
| S.Diag(Arg3->getLocStart(), diag::err_opencl_builtin_expected_type) |
| << TheCall->getDirectCallee() << "block"; |
| return true; |
| } |
| // we have a block type, check the prototype |
| const BlockPointerType *BPT = |
| cast<BlockPointerType>(Arg3->getType().getCanonicalType()); |
| if (BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams() > 0) { |
| S.Diag(Arg3->getLocStart(), |
| diag::err_opencl_enqueue_kernel_blocks_no_args); |
| return true; |
| } |
| return false; |
| } |
| // we can have block + varargs. |
| if (isBlockPointer(Arg3)) |
| return (checkOpenCLBlockArgs(S, Arg3) || |
| checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); |
| // last two cases with either exactly 7 args or 7 args and varargs. |
| if (NumArgs >= 7) { |
| // check common block argument. |
| Expr *Arg6 = TheCall->getArg(6); |
| if (!isBlockPointer(Arg6)) { |
| S.Diag(Arg6->getLocStart(), diag::err_opencl_builtin_expected_type) |
| << TheCall->getDirectCallee() << "block"; |
| return true; |
| } |
| if (checkOpenCLBlockArgs(S, Arg6)) |
| return true; |
| |
| // Forth argument has to be any integer type. |
| if (!Arg3->getType()->isIntegerType()) { |
| S.Diag(TheCall->getArg(3)->getLocStart(), |
| diag::err_opencl_builtin_expected_type) |
| << TheCall->getDirectCallee() << "integer"; |
| return true; |
| } |
| // check remaining common arguments. |
| Expr *Arg4 = TheCall->getArg(4); |
| Expr *Arg5 = TheCall->getArg(5); |
| |
| // Fifth argument is always passed as a pointer to clk_event_t. |
| if (!Arg4->isNullPointerConstant(S.Context, |
| Expr::NPC_ValueDependentIsNotNull) && |
| !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { |
| S.Diag(TheCall->getArg(4)->getLocStart(), |
| diag::err_opencl_builtin_expected_type) |
| << TheCall->getDirectCallee() |
| << S.Context.getPointerType(S.Context.OCLClkEventTy); |
| return true; |
| } |
| |
| // Sixth argument is always passed as a pointer to clk_event_t. |
| if (!Arg5->isNullPointerConstant(S.Context, |
| Expr::NPC_ValueDependentIsNotNull) && |
| !(Arg5->getType()->isPointerType() && |
| Arg5->getType()->getPointeeType()->isClkEventT())) { |
| S.Diag(TheCall->getArg(5)->getLocStart(), |
| diag::err_opencl_builtin_expected_type) |
| << TheCall->getDirectCallee() |
| << S.Context.getPointerType(S.Context.OCLClkEventTy); |
| return true; |
| } |
| |
| if (NumArgs == 7) |
| return false; |
| |
| return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); |
| } |
| |
| // None of the specific case has been detected, give generic error |
| S.Diag(TheCall->getLocStart(), |
| diag::err_opencl_enqueue_kernel_incorrect_args); |
| return true; |
| } |
| |
| /// Returns OpenCL access qual. |
| static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { |
| return D->getAttr<OpenCLAccessAttr>(); |
| } |
| |
| /// Returns true if pipe element type is different from the pointer. |
| static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { |
| const Expr *Arg0 = Call->getArg(0); |
| // First argument type should always be pipe. |
| if (!Arg0->getType()->isPipeType()) { |
| S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_first_arg) |
| << Call->getDirectCallee() << Arg0->getSourceRange(); |
| return true; |
| } |
| OpenCLAccessAttr *AccessQual = |
| getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); |
| // Validates the access qualifier is compatible with the call. |
| // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be |
| // read_only and write_only, and assumed to be read_only if no qualifier is |
| // specified. |
| switch (Call->getDirectCallee()->getBuiltinID()) { |
| case Builtin::BIread_pipe: |
| case Builtin::BIreserve_read_pipe: |
| case Builtin::BIcommit_read_pipe: |
| case Builtin::BIwork_group_reserve_read_pipe: |
| case Builtin::BIsub_group_reserve_read_pipe: |
| case Builtin::BIwork_group_commit_read_pipe: |
| case Builtin::BIsub_group_commit_read_pipe: |
| if (!(!AccessQual || AccessQual->isReadOnly())) { |
| S.Diag(Arg0->getLocStart(), |
| diag::err_opencl_builtin_pipe_invalid_access_modifier) |
| << "read_only" << Arg0->getSourceRange(); |
| return true; |
| } |
| break; |
| case Builtin::BIwrite_pipe: |
| case Builtin::BIreserve_write_pipe: |
| case Builtin::BIcommit_write_pipe: |
| case Builtin::BIwork_group_reserve_write_pipe: |
| case Builtin::BIsub_group_reserve_write_pipe: |
| case Builtin::BIwork_group_commit_write_pipe: |
| case Builtin::BIsub_group_commit_write_pipe: |
| if (!(AccessQual && AccessQual->isWriteOnly())) { |
| S.Diag(Arg0->getLocStart(), |
| diag::err_opencl_builtin_pipe_invalid_access_modifier) |
| << "write_only" << Arg0->getSourceRange(); |
| return true; |
| } |
| break; |
| default: |
| break; |
| } |
| return false; |
| } |
| |
| /// Returns true if pipe element type is different from the pointer. |
| static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { |
| const Expr *Arg0 = Call->getArg(0); |
| const Expr *ArgIdx = Call->getArg(Idx); |
| const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); |
| const QualType EltTy = PipeTy->getElementType(); |
| const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); |
| // The Idx argument should be a pointer and the type of the pointer and |
| // the type of pipe element should also be the same. |
| if (!ArgTy || |
| !S.Context.hasSameType( |
| EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { |
| S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg) |
| << Call->getDirectCallee() << S.Context.getPointerType(EltTy) |
| << ArgIdx->getType() << ArgIdx->getSourceRange(); |
| return true; |
| } |
| return false; |
| } |
| |
| // \brief Performs semantic analysis for the read/write_pipe call. |
| // \param S Reference to the semantic analyzer. |
| // \param Call A pointer to the builtin call. |
| // \return True if a semantic error has been found, false otherwise. |
| static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { |
| // OpenCL v2.0 s6.13.16.2 - The built-in read/write |
| // functions have two forms. |
| switch (Call->getNumArgs()) { |
| case 2: |
| if (checkOpenCLPipeArg(S, Call)) |
| return true; |
| // The call with 2 arguments should be |
| // read/write_pipe(pipe T, T*). |
| // Check packet type T. |
| if (checkOpenCLPipePacketType(S, Call, 1)) |
| return true; |
| break; |
| |
| case 4: { |
| if (checkOpenCLPipeArg(S, Call)) |
| return true; |
| // The call with 4 arguments should be |
| // read/write_pipe(pipe T, reserve_id_t, uint, T*). |
| // Check reserve_id_t. |
| if (!Call->getArg(1)->getType()->isReserveIDT()) { |
| S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg) |
| << Call->getDirectCallee() << S.Context.OCLReserveIDTy |
| << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); |
| return true; |
| } |
| |
| // Check the index. |
| const Expr *Arg2 = Call->getArg(2); |
| if (!Arg2->getType()->isIntegerType() && |
| !Arg2->getType()->isUnsignedIntegerType()) { |
| S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg) |
| << Call->getDirectCallee() << S.Context.UnsignedIntTy |
| << Arg2->getType() << Arg2->getSourceRange(); |
| return true; |
| } |
| |
| // Check packet type T. |
| if (checkOpenCLPipePacketType(S, Call, 3)) |
| return true; |
| } break; |
| default: |
| S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_arg_num) |
| << Call->getDirectCallee() << Call->getSourceRange(); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| // \brief Performs a semantic analysis on the {work_group_/sub_group_ |
| // /_}reserve_{read/write}_pipe |
| // \param S Reference to the semantic analyzer. |
| // \param Call The call to the builtin function to be analyzed. |
| // \return True if a semantic error was found, false otherwise. |
| static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { |
| if (checkArgCount(S, Call, 2)) |
| return true; |
| |
| if (checkOpenCLPipeArg(S, Call)) |
| return true; |
| |
| // Check the reserve size. |
| if (!Call->getArg(1)->getType()->isIntegerType() && |
| !Call->getArg(1)->getType()->isUnsignedIntegerType()) { |
| S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg) |
| << Call->getDirectCallee() << S.Context.UnsignedIntTy |
| << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); |
| return true; |
| } |
| |
| // Since return type of reserve_read/write_pipe built-in function is |
| // reserve_id_t, which is not defined in the builtin def file , we used int |
| // as return type and need to override the return type of these functions. |
| Call->setType(S.Context.OCLReserveIDTy); |
| |
| return false; |
| } |
| |
| // \brief Performs a semantic analysis on {work_group_/sub_group_ |
| // /_}commit_{read/write}_pipe |
| // \param S Reference to the semantic analyzer. |
| // \param Call The call to the builtin function to be analyzed. |
| // \return True if a semantic error was found, false otherwise. |
| static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { |
| if (checkArgCount(S, Call, 2)) |
| return true; |
| |
| if (checkOpenCLPipeArg(S, Call)) |
| return true; |
| |
| // Check reserve_id_t. |
| if (!Call->getArg(1)->getType()->isReserveIDT()) { |
| S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg) |
| << Call->getDirectCallee() << S.Context.OCLReserveIDTy |
| << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| // \brief Performs a semantic analysis on the call to built-in Pipe |
| // Query Functions. |
| // \param S Reference to the semantic analyzer. |
| // \param Call The call to the builtin function to be analyzed. |
| // \return True if a semantic error was found, false otherwise. |
| static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { |
| if (checkArgCount(S, Call, 1)) |
| return true; |
| |
| if (!Call->getArg(0)->getType()->isPipeType()) { |
| S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_first_arg) |
| << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| // \brief OpenCL v2.0 s6.13.9 - Address space qualifier functions. |
| // \brief Performs semantic analysis for the to_global/local/private call. |
| // \param S Reference to the semantic analyzer. |
| // \param BuiltinID ID of the builtin function. |
| // \param Call A pointer to the builtin call. |
| // \return True if a semantic error has been found, false otherwise. |
| static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, |
| CallExpr *Call) { |
| if (Call->getNumArgs() != 1) { |
| S.Diag(Call->getLocStart(), diag::err_opencl_builtin_to_addr_arg_num) |
| << Call->getDirectCallee() << Call->getSourceRange(); |
| return true; |
| } |
| |
| auto RT = Call->getArg(0)->getType(); |
| if (!RT->isPointerType() || RT->getPointeeType() |
| .getAddressSpace() == LangAS::opencl_constant) { |
| S.Diag(Call->getLocStart(), diag::err_opencl_builtin_to_addr_invalid_arg) |
| << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); |
| return true; |
| } |
| |
| RT = RT->getPointeeType(); |
| auto Qual = RT.getQualifiers(); |
| switch (BuiltinID) { |
| case Builtin::BIto_global: |
| Qual.setAddressSpace(LangAS::opencl_global); |
| break; |
| case Builtin::BIto_local: |
| Qual.setAddressSpace(LangAS::opencl_local); |
| break; |
| case Builtin::BIto_private: |
| Qual.setAddressSpace(LangAS::opencl_private); |
| break; |
| default: |
| llvm_unreachable("Invalid builtin function"); |
| } |
| Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( |
| RT.getUnqualifiedType(), Qual))); |
| |
| return false; |
| } |
| |
| ExprResult |
| Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, |
| CallExpr *TheCall) { |
| ExprResult TheCallResult(TheCall); |
| |
| // Find out if any arguments are required to be integer constant expressions. |
| unsigned ICEArguments = 0; |
| ASTContext::GetBuiltinTypeError Error; |
| Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); |
| if (Error != ASTContext::GE_None) |
| ICEArguments = 0; // Don't diagnose previously diagnosed errors. |
| |
| // If any arguments are required to be ICE's, check and diagnose. |
| for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { |
| // Skip arguments not required to be ICE's. |
| if ((ICEArguments & (1 << ArgNo)) == 0) continue; |
| |
| llvm::APSInt Result; |
| if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) |
| return true; |
| ICEArguments &= ~(1 << ArgNo); |
| } |
| |
| switch (BuiltinID) { |
| case Builtin::BI__builtin___CFStringMakeConstantString: |
| assert(TheCall->getNumArgs() == 1 && |
| "Wrong # arguments to builtin CFStringMakeConstantString"); |
| if (CheckObjCString(TheCall->getArg(0))) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_ms_va_start: |
| case Builtin::BI__builtin_stdarg_start: |
| case Builtin::BI__builtin_va_start: |
| if (SemaBuiltinVAStart(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__va_start: { |
| switch (Context.getTargetInfo().getTriple().getArch()) { |
| case llvm::Triple::arm: |
| case llvm::Triple::thumb: |
| if (SemaBuiltinVAStartARMMicrosoft(TheCall)) |
| return ExprError(); |
| break; |
| default: |
| if (SemaBuiltinVAStart(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| } |
| break; |
| } |
| case Builtin::BI__builtin_isgreater: |
| case Builtin::BI__builtin_isgreaterequal: |
| case Builtin::BI__builtin_isless: |
| case Builtin::BI__builtin_islessequal: |
| case Builtin::BI__builtin_islessgreater: |
| case Builtin::BI__builtin_isunordered: |
| if (SemaBuiltinUnorderedCompare(TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_fpclassify: |
| if (SemaBuiltinFPClassification(TheCall, 6)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_isfinite: |
| case Builtin::BI__builtin_isinf: |
| case Builtin::BI__builtin_isinf_sign: |
| case Builtin::BI__builtin_isnan: |
| case Builtin::BI__builtin_isnormal: |
| if (SemaBuiltinFPClassification(TheCall, 1)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_shufflevector: |
| return SemaBuiltinShuffleVector(TheCall); |
| // TheCall will be freed by the smart pointer here, but that's fine, since |
| // SemaBuiltinShuffleVector guts it, but then doesn't release it. |
| case Builtin::BI__builtin_prefetch: |
| if (SemaBuiltinPrefetch(TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_alloca_with_align: |
| if (SemaBuiltinAllocaWithAlign(TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__assume: |
| case Builtin::BI__builtin_assume: |
| if (SemaBuiltinAssume(TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_assume_aligned: |
| if (SemaBuiltinAssumeAligned(TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_object_size: |
| if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_longjmp: |
| if (SemaBuiltinLongjmp(TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_setjmp: |
| if (SemaBuiltinSetjmp(TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI_setjmp: |
| case Builtin::BI_setjmpex: |
| if (checkArgCount(*this, TheCall, 1)) |
| return true; |
| break; |
| case Builtin::BI__builtin_classify_type: |
| if (checkArgCount(*this, TheCall, 1)) return true; |
| TheCall->setType(Context.IntTy); |
| break; |
| case Builtin::BI__builtin_constant_p: |
| if (checkArgCount(*this, TheCall, 1)) return true; |
| TheCall->setType(Context.IntTy); |
| break; |
| case Builtin::BI__sync_fetch_and_add: |
| case Builtin::BI__sync_fetch_and_add_1: |
| case Builtin::BI__sync_fetch_and_add_2: |
| case Builtin::BI__sync_fetch_and_add_4: |
| case Builtin::BI__sync_fetch_and_add_8: |
| case Builtin::BI__sync_fetch_and_add_16: |
| case Builtin::BI__sync_fetch_and_sub: |
| case Builtin::BI__sync_fetch_and_sub_1: |
| case Builtin::BI__sync_fetch_and_sub_2: |
| case Builtin::BI__sync_fetch_and_sub_4: |
| case Builtin::BI__sync_fetch_and_sub_8: |
| case Builtin::BI__sync_fetch_and_sub_16: |
| case Builtin::BI__sync_fetch_and_or: |
| case Builtin::BI__sync_fetch_and_or_1: |
| case Builtin::BI__sync_fetch_and_or_2: |
| case Builtin::BI__sync_fetch_and_or_4: |
| case Builtin::BI__sync_fetch_and_or_8: |
| case Builtin::BI__sync_fetch_and_or_16: |
| case Builtin::BI__sync_fetch_and_and: |
| case Builtin::BI__sync_fetch_and_and_1: |
| case Builtin::BI__sync_fetch_and_and_2: |
| case Builtin::BI__sync_fetch_and_and_4: |
| case Builtin::BI__sync_fetch_and_and_8: |
| case Builtin::BI__sync_fetch_and_and_16: |
| case Builtin::BI__sync_fetch_and_xor: |
| case Builtin::BI__sync_fetch_and_xor_1: |
| case Builtin::BI__sync_fetch_and_xor_2: |
| case Builtin::BI__sync_fetch_and_xor_4: |
| case Builtin::BI__sync_fetch_and_xor_8: |
| case Builtin::BI__sync_fetch_and_xor_16: |
| case Builtin::BI__sync_fetch_and_nand: |
| case Builtin::BI__sync_fetch_and_nand_1: |
| case Builtin::BI__sync_fetch_and_nand_2: |
| case Builtin::BI__sync_fetch_and_nand_4: |
| case Builtin::BI__sync_fetch_and_nand_8: |
| case Builtin::BI__sync_fetch_and_nand_16: |
| case Builtin::BI__sync_add_and_fetch: |
| case Builtin::BI__sync_add_and_fetch_1: |
| case Builtin::BI__sync_add_and_fetch_2: |
| case Builtin::BI__sync_add_and_fetch_4: |
| case Builtin::BI__sync_add_and_fetch_8: |
| case Builtin::BI__sync_add_and_fetch_16: |
| case Builtin::BI__sync_sub_and_fetch: |
| case Builtin::BI__sync_sub_and_fetch_1: |
| case Builtin::BI__sync_sub_and_fetch_2: |
| case Builtin::BI__sync_sub_and_fetch_4: |
| case Builtin::BI__sync_sub_and_fetch_8: |
| case Builtin::BI__sync_sub_and_fetch_16: |
| case Builtin::BI__sync_and_and_fetch: |
| case Builtin::BI__sync_and_and_fetch_1: |
| case Builtin::BI__sync_and_and_fetch_2: |
| case Builtin::BI__sync_and_and_fetch_4: |
| case Builtin::BI__sync_and_and_fetch_8: |
| case Builtin::BI__sync_and_and_fetch_16: |
| case Builtin::BI__sync_or_and_fetch: |
| case Builtin::BI__sync_or_and_fetch_1: |
| case Builtin::BI__sync_or_and_fetch_2: |
| case Builtin::BI__sync_or_and_fetch_4: |
| case Builtin::BI__sync_or_and_fetch_8: |
| case Builtin::BI__sync_or_and_fetch_16: |
| case Builtin::BI__sync_xor_and_fetch: |
| case Builtin::BI__sync_xor_and_fetch_1: |
| case Builtin::BI__sync_xor_and_fetch_2: |
| case Builtin::BI__sync_xor_and_fetch_4: |
| case Builtin::BI__sync_xor_and_fetch_8: |
| case Builtin::BI__sync_xor_and_fetch_16: |
| case Builtin::BI__sync_nand_and_fetch: |
| case Builtin::BI__sync_nand_and_fetch_1: |
| case Builtin::BI__sync_nand_and_fetch_2: |
| case Builtin::BI__sync_nand_and_fetch_4: |
| case Builtin::BI__sync_nand_and_fetch_8: |
| case Builtin::BI__sync_nand_and_fetch_16: |
| case Builtin::BI__sync_val_compare_and_swap: |
| case Builtin::BI__sync_val_compare_and_swap_1: |
| case Builtin::BI__sync_val_compare_and_swap_2: |
| case Builtin::BI__sync_val_compare_and_swap_4: |
| case Builtin::BI__sync_val_compare_and_swap_8: |
| case Builtin::BI__sync_val_compare_and_swap_16: |
| case Builtin::BI__sync_bool_compare_and_swap: |
| case Builtin::BI__sync_bool_compare_and_swap_1: |
| case Builtin::BI__sync_bool_compare_and_swap_2: |
| case Builtin::BI__sync_bool_compare_and_swap_4: |
| case Builtin::BI__sync_bool_compare_and_swap_8: |
| case Builtin::BI__sync_bool_compare_and_swap_16: |
| case Builtin::BI__sync_lock_test_and_set: |
| case Builtin::BI__sync_lock_test_and_set_1: |
| case Builtin::BI__sync_lock_test_and_set_2: |
| case Builtin::BI__sync_lock_test_and_set_4: |
| case Builtin::BI__sync_lock_test_and_set_8: |
| case Builtin::BI__sync_lock_test_and_set_16: |
| case Builtin::BI__sync_lock_release: |
| case Builtin::BI__sync_lock_release_1: |
| case Builtin::BI__sync_lock_release_2: |
| case Builtin::BI__sync_lock_release_4: |
| case Builtin::BI__sync_lock_release_8: |
| case Builtin::BI__sync_lock_release_16: |
| case Builtin::BI__sync_swap: |
| case Builtin::BI__sync_swap_1: |
| case Builtin::BI__sync_swap_2: |
| case Builtin::BI__sync_swap_4: |
| case Builtin::BI__sync_swap_8: |
| case Builtin::BI__sync_swap_16: |
| return SemaBuiltinAtomicOverloaded(TheCallResult); |
| case Builtin::BI__builtin_nontemporal_load: |
| case Builtin::BI__builtin_nontemporal_store: |
| return SemaBuiltinNontemporalOverloaded(TheCallResult); |
| #define BUILTIN(ID, TYPE, ATTRS) |
| #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ |
| case Builtin::BI##ID: \ |
| return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); |
| #include "clang/Basic/Builtins.def" |
| case Builtin::BI__annotation: |
| if (SemaBuiltinMSVCAnnotation(*this, TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_annotation: |
| if (SemaBuiltinAnnotation(*this, TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_addressof: |
| if (SemaBuiltinAddressof(*this, TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_add_overflow: |
| case Builtin::BI__builtin_sub_overflow: |
| case Builtin::BI__builtin_mul_overflow: |
| if (SemaBuiltinOverflow(*this, TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_operator_new: |
| case Builtin::BI__builtin_operator_delete: |
| if (!getLangOpts().CPlusPlus) { |
| Diag(TheCall->getExprLoc(), diag::err_builtin_requires_language) |
| << (BuiltinID == Builtin::BI__builtin_operator_new |
| ? "__builtin_operator_new" |
| : "__builtin_operator_delete") |
| << "C++"; |
| return ExprError(); |
| } |
| // CodeGen assumes it can find the global new and delete to call, |
| // so ensure that they are declared. |
| DeclareGlobalNewDelete(); |
| break; |
| |
| // check secure string manipulation functions where overflows |
| // are detectable at compile time |
| case Builtin::BI__builtin___memcpy_chk: |
| case Builtin::BI__builtin___memmove_chk: |
| case Builtin::BI__builtin___memset_chk: |
| case Builtin::BI__builtin___strlcat_chk: |
| case Builtin::BI__builtin___strlcpy_chk: |
| case Builtin::BI__builtin___strncat_chk: |
| case Builtin::BI__builtin___strncpy_chk: |
| case Builtin::BI__builtin___stpncpy_chk: |
| SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3); |
| break; |
| case Builtin::BI__builtin___memccpy_chk: |
| SemaBuiltinMemChkCall(*this, FDecl, TheCall, 3, 4); |
| break; |
| case Builtin::BI__builtin___snprintf_chk: |
| case Builtin::BI__builtin___vsnprintf_chk: |
| SemaBuiltinMemChkCall(*this, FDecl, TheCall, 1, 3); |
| break; |
| case Builtin::BI__builtin_call_with_static_chain: |
| if (SemaBuiltinCallWithStaticChain(*this, TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__exception_code: |
| case Builtin::BI_exception_code: |
| if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, |
| diag::err_seh___except_block)) |
| return ExprError(); |
| break; |
| case Builtin::BI__exception_info: |
| case Builtin::BI_exception_info: |
| if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, |
| diag::err_seh___except_filter)) |
| return ExprError(); |
| break; |
| case Builtin::BI__GetExceptionInfo: |
| if (checkArgCount(*this, TheCall, 1)) |
| return ExprError(); |
| |
| if (CheckCXXThrowOperand( |
| TheCall->getLocStart(), |
| Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), |
| TheCall)) |
| return ExprError(); |
| |
| TheCall->setType(Context.VoidPtrTy); |
| break; |
| // OpenCL v2.0, s6.13.16 - Pipe functions |
| case Builtin::BIread_pipe: |
| case Builtin::BIwrite_pipe: |
| // Since those two functions are declared with var args, we need a semantic |
| // check for the argument. |
| if (SemaBuiltinRWPipe(*this, TheCall)) |
| return ExprError(); |
| TheCall->setType(Context.IntTy); |
| break; |
| case Builtin::BIreserve_read_pipe: |
| case Builtin::BIreserve_write_pipe: |
| case Builtin::BIwork_group_reserve_read_pipe: |
| case Builtin::BIwork_group_reserve_write_pipe: |
| if (SemaBuiltinReserveRWPipe(*this, TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BIsub_group_reserve_read_pipe: |
| case Builtin::BIsub_group_reserve_write_pipe: |
| if (checkOpenCLSubgroupExt(*this, TheCall) || |
| SemaBuiltinReserveRWPipe(*this, TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BIcommit_read_pipe: |
| case Builtin::BIcommit_write_pipe: |
| case Builtin::BIwork_group_commit_read_pipe: |
| case Builtin::BIwork_group_commit_write_pipe: |
| if (SemaBuiltinCommitRWPipe(*this, TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BIsub_group_commit_read_pipe: |
| case Builtin::BIsub_group_commit_write_pipe: |
| if (checkOpenCLSubgroupExt(*this, TheCall) || |
| SemaBuiltinCommitRWPipe(*this, TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BIget_pipe_num_packets: |
| case Builtin::BIget_pipe_max_packets: |
| if (SemaBuiltinPipePackets(*this, TheCall)) |
| return ExprError(); |
| TheCall->setType(Context.UnsignedIntTy); |
| break; |
| case Builtin::BIto_global: |
| case Builtin::BIto_local: |
| case Builtin::BIto_private: |
| if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. |
| case Builtin::BIenqueue_kernel: |
| if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BIget_kernel_work_group_size: |
| case Builtin::BIget_kernel_preferred_work_group_size_multiple: |
| if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) |
| return ExprError(); |
| break; |
| break; |
| case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: |
| case Builtin::BIget_kernel_sub_group_count_for_ndrange: |
| if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_os_log_format: |
| case Builtin::BI__builtin_os_log_format_buffer_size: |
| if (SemaBuiltinOSLogFormat(TheCall)) |
| return ExprError(); |
| break; |
| } |
| |
| // Since the target specific builtins for each arch overlap, only check those |
| // of the arch we are compiling for. |
| if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { |
| switch (Context.getTargetInfo().getTriple().getArch()) { |
| case llvm::Triple::arm: |
| case llvm::Triple::armeb: |
| case llvm::Triple::thumb: |
| case llvm::Triple::thumbeb: |
| if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| case llvm::Triple::aarch64: |
| case llvm::Triple::aarch64_be: |
| if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| case llvm::Triple::mips: |
| case llvm::Triple::mipsel: |
| case llvm::Triple::mips64: |
| case llvm::Triple::mips64el: |
| if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| case llvm::Triple::systemz: |
| if (CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| case llvm::Triple::x86: |
| case llvm::Triple::x86_64: |
| if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| case llvm::Triple::ppc: |
| case llvm::Triple::ppc64: |
| case llvm::Triple::ppc64le: |
| if (CheckPPCBuiltinFunctionCall(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| default: |
| break; |
| } |
| } |
| |
| return TheCallResult; |
| } |
| |
| // Get the valid immediate range for the specified NEON type code. |
| static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { |
| NeonTypeFlags Type(t); |
| int IsQuad = ForceQuad ? true : Type.isQuad(); |
| switch (Type.getEltType()) { |
| case NeonTypeFlags::Int8: |
| case NeonTypeFlags::Poly8: |
| return shift ? 7 : (8 << IsQuad) - 1; |
| case NeonTypeFlags::Int16: |
| case NeonTypeFlags::Poly16: |
| return shift ? 15 : (4 << IsQuad) - 1; |
| case NeonTypeFlags::Int32: |
| return shift ? 31 : (2 << IsQuad) - 1; |
| case NeonTypeFlags::Int64: |
| case NeonTypeFlags::Poly64: |
| return shift ? 63 : (1 << IsQuad) - 1; |
| case NeonTypeFlags::Poly128: |
| return shift ? 127 : (1 << IsQuad) - 1; |
| case NeonTypeFlags::Float16: |
| assert(!shift && "cannot shift float types!"); |
| return (4 << IsQuad) - 1; |
| case NeonTypeFlags::Float32: |
| assert(!shift && "cannot shift float types!"); |
| return (2 << IsQuad) - 1; |
| case NeonTypeFlags::Float64: |
| assert(!shift && "cannot shift float types!"); |
| return (1 << IsQuad) - 1; |
| } |
| llvm_unreachable("Invalid NeonTypeFlag!"); |
| } |
| |
| /// getNeonEltType - Return the QualType corresponding to the elements of |
| /// the vector type specified by the NeonTypeFlags. This is used to check |
| /// the pointer arguments for Neon load/store intrinsics. |
| static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, |
| bool IsPolyUnsigned, bool IsInt64Long) { |
| switch (Flags.getEltType()) { |
| case NeonTypeFlags::Int8: |
| return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; |
| case NeonTypeFlags::Int16: |
| return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; |
| case NeonTypeFlags::Int32: |
| return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; |
| case NeonTypeFlags::Int64: |
| if (IsInt64Long) |
| return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; |
| else |
| return Flags.isUnsigned() ? Context.UnsignedLongLongTy |
| : Context.LongLongTy; |
| case NeonTypeFlags::Poly8: |
| return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; |
| case NeonTypeFlags::Poly16: |
| return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; |
| case NeonTypeFlags::Poly64: |
| if (IsInt64Long) |
| return Context.UnsignedLongTy; |
| else |
| return Context.UnsignedLongLongTy; |
| case NeonTypeFlags::Poly128: |
| break; |
| case NeonTypeFlags::Float16: |
| return Context.HalfTy; |
| case NeonTypeFlags::Float32: |
| return Context.FloatTy; |
| case NeonTypeFlags::Float64: |
| return Context.DoubleTy; |
| } |
| llvm_unreachable("Invalid NeonTypeFlag!"); |
| } |
| |
| bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { |
| llvm::APSInt Result; |
| uint64_t mask = 0; |
| unsigned TV = 0; |
| int PtrArgNum = -1; |
| bool HasConstPtr = false; |
| switch (BuiltinID) { |
| #define GET_NEON_OVERLOAD_CHECK |
| #include "clang/Basic/arm_neon.inc" |
| #undef GET_NEON_OVERLOAD_CHECK |
| } |
| |
| // For NEON intrinsics which are overloaded on vector element type, validate |
| // the immediate which specifies which variant to emit. |
| unsigned ImmArg = TheCall->getNumArgs()-1; |
| if (mask) { |
| if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) |
| return true; |
| |
| TV = Result.getLimitedValue(64); |
| if ((TV > 63) || (mask & (1ULL << TV)) == 0) |
| return Diag(TheCall->getLocStart(), diag::err_invalid_neon_type_code) |
| << TheCall->getArg(ImmArg)->getSourceRange(); |
| } |
| |
| if (PtrArgNum >= 0) { |
| // Check that pointer arguments have the specified type. |
| Expr *Arg = TheCall->getArg(PtrArgNum); |
| if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) |
| Arg = ICE->getSubExpr(); |
| ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); |
| QualType RHSTy = RHS.get()->getType(); |
| |
| llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch(); |
| bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || |
| Arch == llvm::Triple::aarch64_be; |
| bool IsInt64Long = |
| Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong; |
| QualType EltTy = |
| getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); |
| if (HasConstPtr) |
| EltTy = EltTy.withConst(); |
| QualType LHSTy = Context.getPointerType(EltTy); |
| AssignConvertType ConvTy; |
| ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); |
| if (RHS.isInvalid()) |
| return true; |
| if (DiagnoseAssignmentResult(ConvTy, Arg->getLocStart(), LHSTy, RHSTy, |
| RHS.get(), AA_Assigning)) |
| return true; |
| } |
| |
| // For NEON intrinsics which take an immediate value as part of the |
| // instruction, range check them here. |
| unsigned i = 0, l = 0, u = 0; |
| switch (BuiltinID) { |
| default: |
| return false; |
| #define GET_NEON_IMMEDIATE_CHECK |
| #include "clang/Basic/arm_neon.inc" |
| #undef GET_NEON_IMMEDIATE_CHECK |
| } |
| |
| return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); |
| } |
| |
| bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, |
| unsigned MaxWidth) { |
| assert((BuiltinID == ARM::BI__builtin_arm_ldrex || |
| BuiltinID == ARM::BI__builtin_arm_ldaex || |
| BuiltinID == ARM::BI__builtin_arm_strex || |
| BuiltinID == ARM::BI__builtin_arm_stlex || |
| BuiltinID == AArch64::BI__builtin_arm_ldrex || |
| BuiltinID == AArch64::BI__builtin_arm_ldaex || |
| BuiltinID == AArch64::BI__builtin_arm_strex || |
| BuiltinID == AArch64::BI__builtin_arm_stlex) && |
| "unexpected ARM builtin"); |
| bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || |
| BuiltinID == ARM::BI__builtin_arm_ldaex || |
| BuiltinID == AArch64::BI__builtin_arm_ldrex || |
| BuiltinID == AArch64::BI__builtin_arm_ldaex; |
| |
| DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); |
| |
| // Ensure that we have the proper number of arguments. |
| if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) |
| return true; |
| |
| // Inspect the pointer argument of the atomic builtin. This should always be |
| // a pointer type, whose element is an integral scalar or pointer type. |
| // Because it is a pointer type, we don't have to worry about any implicit |
| // casts here. |
| Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); |
| ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); |
| if (PointerArgRes.isInvalid()) |
| return true; |
| PointerArg = PointerArgRes.get(); |
| |
| const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); |
| if (!pointerType) { |
| Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer) |
| << PointerArg->getType() << PointerArg->getSourceRange(); |
| return true; |
| } |
| |
| // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next |
| // task is to insert the appropriate casts into the AST. First work out just |
| // what the appropriate type is. |
| QualType ValType = pointerType->getPointeeType(); |
| QualType AddrType = ValType.getUnqualifiedType().withVolatile(); |
| if (IsLdrex) |
| AddrType.addConst(); |
| |
| // Issue a warning if the cast is dodgy. |
| CastKind CastNeeded = CK_NoOp; |
| if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { |
| CastNeeded = CK_BitCast; |
| Diag(DRE->getLocStart(), diag::ext_typecheck_convert_discards_qualifiers) |
| << PointerArg->getType() |
| << Context.getPointerType(AddrType) |
| << AA_Passing << PointerArg->getSourceRange(); |
| } |
| |
| // Finally, do the cast and replace the argument with the corrected version. |
| AddrType = Context.getPointerType(AddrType); |
| PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); |
| if (PointerArgRes.isInvalid()) |
| return true; |
| PointerArg = PointerArgRes.get(); |
| |
| TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); |
| |
| // In general, we allow ints, floats and pointers to be loaded and stored. |
| if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && |
| !ValType->isBlockPointerType() && !ValType->isFloatingType()) { |
| Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intfltptr) |
| << PointerArg->getType() << PointerArg->getSourceRange(); |
| return true; |
| } |
| |
| // But ARM doesn't have instructions to deal with 128-bit versions. |
| if (Context.getTypeSize(ValType) > MaxWidth) { |
| assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); |
| Diag(DRE->getLocStart(), diag::err_atomic_exclusive_builtin_pointer_size) |
| << PointerArg->getType() << PointerArg->getSourceRange(); |
| return true; |
| } |
| |
| switch (ValType.getObjCLifetime()) { |
| case Qualifiers::OCL_None: |
| case Qualifiers::OCL_ExplicitNone: |
| // okay |
| break; |
| |
| case Qualifiers::OCL_Weak: |
| case Qualifiers::OCL_Strong: |
| case Qualifiers::OCL_Autoreleasing: |
| Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership) |
| << ValType << PointerArg->getSourceRange(); |
| return true; |
| } |
| |
| if (IsLdrex) { |
| TheCall->setType(ValType); |
| return false; |
| } |
| |
| // Initialize the argument to be stored. |
| ExprResult ValArg = TheCall->getArg(0); |
| InitializedEntity Entity = InitializedEntity::InitializeParameter( |
| Context, ValType, /*consume*/ false); |
| ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); |
| if (ValArg.isInvalid()) |
| return true; |
| TheCall->setArg(0, ValArg.get()); |
| |
| // __builtin_arm_strex always returns an int. It's marked as such in the .def, |
| // but the custom checker bypasses all default analysis. |
| TheCall->setType(Context.IntTy); |
| return false; |
| } |
| |
| bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { |
| if (BuiltinID == ARM::BI__builtin_arm_ldrex || |
| BuiltinID == ARM::BI__builtin_arm_ldaex || |
| BuiltinID == ARM::BI__builtin_arm_strex || |
| BuiltinID == ARM::BI__builtin_arm_stlex) { |
| return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); |
| } |
| |
| if (BuiltinID == ARM::BI__builtin_arm_prefetch) { |
| return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || |
| SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); |
| } |
| |
| if (BuiltinID == ARM::BI__builtin_arm_rsr64 || |
| BuiltinID == ARM::BI__builtin_arm_wsr64) |
| return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); |
| |
| if (BuiltinID == ARM::BI__builtin_arm_rsr || |
| BuiltinID == ARM::BI__builtin_arm_rsrp || |
| BuiltinID == ARM::BI__builtin_arm_wsr || |
| BuiltinID == ARM::BI__builtin_arm_wsrp) |
| return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); |
| |
| if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) |
| return true; |
| |
| // For intrinsics which take an immediate value as part of the instruction, |
| // range check them here. |
| // FIXME: VFP Intrinsics should error if VFP not present. |
| switch (BuiltinID) { |
| default: return false; |
| case ARM::BI__builtin_arm_ssat: |
| return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); |
| case ARM::BI__builtin_arm_usat: |
| return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); |
| case ARM::BI__builtin_arm_ssat16: |
| return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); |
| case ARM::BI__builtin_arm_usat16: |
| return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); |
| case ARM::BI__builtin_arm_vcvtr_f: |
| case ARM::BI__builtin_arm_vcvtr_d: |
| return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); |
| case ARM::BI__builtin_arm_dmb: |
| case ARM::BI__builtin_arm_dsb: |
| case ARM::BI__builtin_arm_isb: |
| case ARM::BI__builtin_arm_dbg: |
| return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); |
| } |
| } |
| |
| bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, |
| CallExpr *TheCall) { |
| if (BuiltinID == AArch64::BI__builtin_arm_ldrex || |
| BuiltinID == AArch64::BI__builtin_arm_ldaex || |
| BuiltinID == AArch64::BI__builtin_arm_strex || |
| BuiltinID == AArch64::BI__builtin_arm_stlex) { |
| return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); |
| } |
| |
| if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { |
| return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || |
| SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || |
| SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || |
| SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); |
| } |
| |
| if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || |
| BuiltinID == AArch64::BI__builtin_arm_wsr64) |
| return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); |
| |
| if (BuiltinID == AArch64::BI__builtin_arm_rsr || |
| BuiltinID == AArch64::BI__builtin_arm_rsrp || |
| BuiltinID == AArch64::BI__builtin_arm_wsr || |
| BuiltinID == AArch64::BI__builtin_arm_wsrp) |
| return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); |
| |
| if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) |
| return true; |
| |
| // For intrinsics which take an immediate value as part of the instruction, |
| // range check them here. |
| unsigned i = 0, l = 0, u = 0; |
| switch (BuiltinID) { |
| default: return false; |
| case AArch64::BI__builtin_arm_dmb: |
| case AArch64::BI__builtin_arm_dsb: |
| case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; |
| } |
| |
| return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); |
| } |
| |
| // CheckMipsBuiltinFunctionCall - Checks the constant value passed to the |
| // intrinsic is correct. The switch statement is ordered by DSP, MSA. The |
| // ordering for DSP is unspecified. MSA is ordered by the data format used |
| // by the underlying instruction i.e., df/m, df/n and then by size. |
| // |
| // FIXME: The size tests here should instead be tablegen'd along with the |
| // definitions from include/clang/Basic/BuiltinsMips.def. |
| // FIXME: GCC is strict on signedness for some of these intrinsics, we should |
| // be too. |
| bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { |
| unsigned i = 0, l = 0, u = 0, m = 0; |
| switch (BuiltinID) { |
| default: return false; |
| case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; |
| case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; |
| case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; |
| case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; |
| case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; |
| case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; |
| case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; |
| // MSA instrinsics. Instructions (which the intrinsics maps to) which use the |
| // df/m field. |
| // These intrinsics take an unsigned 3 bit immediate. |
| case Mips::BI__builtin_msa_bclri_b: |
| case Mips::BI__builtin_msa_bnegi_b: |
| case Mips::BI__builtin_msa_bseti_b: |
| case Mips::BI__builtin_msa_sat_s_b: |
| case Mips::BI__builtin_msa_sat_u_b: |
| case Mips::BI__builtin_msa_slli_b: |
| case Mips::BI__builtin_msa_srai_b: |
| case Mips::BI__builtin_msa_srari_b: |
| case Mips::BI__builtin_msa_srli_b: |
| case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; |
| case Mips::BI__builtin_msa_binsli_b: |
| case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; |
| // These intrinsics take an unsigned 4 bit immediate. |
| case Mips::BI__builtin_msa_bclri_h: |
| case Mips::BI__builtin_msa_bnegi_h: |
| case Mips::BI__builtin_msa_bseti_h: |
| case Mips::BI__builtin_msa_sat_s_h: |
| case Mips::BI__builtin_msa_sat_u_h: |
| case Mips::BI__builtin_msa_slli_h: |
| case Mips::BI__builtin_msa_srai_h: |
| case Mips::BI__builtin_msa_srari_h: |
| case Mips::BI__builtin_msa_srli_h: |
| case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; |
| case Mips::BI__builtin_msa_binsli_h: |
| case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; |
| // These intrinsics take an unsigned 5 bit immedate. |
| // The first block of intrinsics actually have an unsigned 5 bit field, |
| // not a df/n field. |
| case Mips::BI__builtin_msa_clei_u_b: |
| case Mips::BI__builtin_msa_clei_u_h: |
| case Mips::BI__builtin_msa_clei_u_w: |
| case Mips::BI__builtin_msa_clei_u_d: |
| case Mips::BI__builtin_msa_clti_u_b: |
| case Mips::BI__builtin_msa_clti_u_h: |
| case Mips::BI__builtin_msa_clti_u_w: |
| case Mips::BI__builtin_msa_clti_u_d: |
| case Mips::BI__builtin_msa_maxi_u_b: |
| case Mips::BI__builtin_msa_maxi_u_h: |
| case Mips::BI__builtin_msa_maxi_u_w: |
| case Mips::BI__builtin_msa_maxi_u_d: |
| case Mips::BI__builtin_msa_mini_u_b: |
| case Mips::BI__builtin_msa_mini_u_h: |
| case Mips::BI__builtin_msa_mini_u_w: |
| case Mips::BI__builtin_msa_mini_u_d: |
| case Mips::BI__builtin_msa_addvi_b: |
| case Mips::BI__builtin_msa_addvi_h: |
| case Mips::BI__builtin_msa_addvi_w: |
| case Mips::BI__builtin_msa_addvi_d: |
| case Mips::BI__builtin_msa_bclri_w: |
| case Mips::BI__builtin_msa_bnegi_w: |
| case Mips::BI__builtin_msa_bseti_w: |
| case Mips::BI__builtin_msa_sat_s_w: |
| case Mips::BI__builtin_msa_sat_u_w: |
| case Mips::BI__builtin_msa_slli_w: |
| case Mips::BI__builtin_msa_srai_w: |
| case Mips::BI__builtin_msa_srari_w: |
| case Mips::BI__builtin_msa_srli_w: |
| case Mips::BI__builtin_msa_srlri_w: |
| case Mips::BI__builtin_msa_subvi_b: |
| case Mips::BI__builtin_msa_subvi_h: |
| case Mips::BI__builtin_msa_subvi_w: |
| case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; |
| case Mips::BI__builtin_msa_binsli_w: |
| case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; |
| // These intrinsics take an unsigned 6 bit immediate. |
| case Mips::BI__builtin_msa_bclri_d: |
| case Mips::BI__builtin_msa_bnegi_d: |
| case Mips::BI__builtin_msa_bseti_d: |
| case Mips::BI__builtin_msa_sat_s_d: |
| case Mips::BI__builtin_msa_sat_u_d: |
| case Mips::BI__builtin_msa_slli_d: |
| case Mips::BI__builtin_msa_srai_d: |
| case Mips::BI__builtin_msa_srari_d: |
| case Mips::BI__builtin_msa_srli_d: |
| case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; |
| case Mips::BI__builtin_msa_binsli_d: |
| case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; |
| // These intrinsics take a signed 5 bit immediate. |
| case Mips::BI__builtin_msa_ceqi_b: |
| case Mips::BI__builtin_msa_ceqi_h: |
| case Mips::BI__builtin_msa_ceqi_w: |
| case Mips::BI__builtin_msa_ceqi_d: |
| case Mips::BI__builtin_msa_clti_s_b: |
| case Mips::BI__builtin_msa_clti_s_h: |
| case Mips::BI__builtin_msa_clti_s_w: |
| case Mips::BI__builtin_msa_clti_s_d: |
| case Mips::BI__builtin_msa_clei_s_b: |
| case Mips::BI__builtin_msa_clei_s_h: |
| case Mips::BI__builtin_msa_clei_s_w: |
| case Mips::BI__builtin_msa_clei_s_d: |
| case Mips::BI__builtin_msa_maxi_s_b: |
| case Mips::BI__builtin_msa_maxi_s_h: |
| case Mips::BI__builtin_msa_maxi_s_w: |
| case Mips::BI__builtin_msa_maxi_s_d: |
| case Mips::BI__builtin_msa_mini_s_b: |
| case Mips::BI__builtin_msa_mini_s_h: |
| case Mips::BI__builtin_msa_mini_s_w: |
| case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; |
| // These intrinsics take an unsigned 8 bit immediate. |
| case Mips::BI__builtin_msa_andi_b: |
| case Mips::BI__builtin_msa_nori_b: |
| case Mips::BI__builtin_msa_ori_b: |
| case Mips::BI__builtin_msa_shf_b: |
| case Mips::BI__builtin_msa_shf_h: |
| case Mips::BI__builtin_msa_shf_w: |
| case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; |
| case Mips::BI__builtin_msa_bseli_b: |
| case Mips::BI__builtin_msa_bmnzi_b: |
| case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; |
| // df/n format |
| // These intrinsics take an unsigned 4 bit immediate. |
| case Mips::BI__builtin_msa_copy_s_b: |
| case Mips::BI__builtin_msa_copy_u_b: |
| case Mips::BI__builtin_msa_insve_b: |
| case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; |
| case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; |
| // These intrinsics take an unsigned 3 bit immediate. |
| case Mips::BI__builtin_msa_copy_s_h: |
| case Mips::BI__builtin_msa_copy_u_h: |
| case Mips::BI__builtin_msa_insve_h: |
| case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; |
| case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; |
| // These intrinsics take an unsigned 2 bit immediate. |
| case Mips::BI__builtin_msa_copy_s_w: |
| case Mips::BI__builtin_msa_copy_u_w: |
| case Mips::BI__builtin_msa_insve_w: |
| case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; |
| case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; |
| // These intrinsics take an unsigned 1 bit immediate. |
| case Mips::BI__builtin_msa_copy_s_d: |
| case Mips::BI__builtin_msa_copy_u_d: |
| case Mips::BI__builtin_msa_insve_d: |
| case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; |
| case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; |
| // Memory offsets and immediate loads. |
| // These intrinsics take a signed 10 bit immediate. |
| case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; |
| case Mips::BI__builtin_msa_ldi_h: |
| case Mips::BI__builtin_msa_ldi_w: |
| case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; |
| case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 16; break; |
| case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 16; break; |
| case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 16; break; |
| case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 16; break; |
| case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 16; break; |
| case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 16; break; |
| case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 16; break; |
| case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 16; break; |
| } |
| |
| if (!m) |
| return SemaBuiltinConstantArgRange(TheCall, i, l, u); |
| |
| return SemaBuiltinConstantArgRange(TheCall, i, l, u) || |
| SemaBuiltinConstantArgMultiple(TheCall, i, m); |
| } |
| |
| bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { |
| unsigned i = 0, l = 0, u = 0; |
| bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde || |
| BuiltinID == PPC::BI__builtin_divdeu || |
| BuiltinID == PPC::BI__builtin_bpermd; |
| bool IsTarget64Bit = Context.getTargetInfo() |
| .getTypeWidth(Context |
| .getTargetInfo() |
| .getIntPtrType()) == 64; |
| bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe || |
| BuiltinID == PPC::BI__builtin_divweu || |
| BuiltinID == PPC::BI__builtin_divde || |
| BuiltinID == PPC::BI__builtin_divdeu; |
| |
| if (Is64BitBltin && !IsTarget64Bit) |
| return Diag(TheCall->getLocStart(), diag::err_64_bit_builtin_32_bit_tgt) |
| << TheCall->getSourceRange(); |
| |
| if ((IsBltinExtDiv && !Context.getTargetInfo().hasFeature("extdiv")) || |
| (BuiltinID == PPC::BI__builtin_bpermd && |
| !Context.getTargetInfo().hasFeature("bpermd"))) |
| return Diag(TheCall->getLocStart(), diag::err_ppc_builtin_only_on_pwr7) |
| << TheCall->getSourceRange(); |
| |
| switch (BuiltinID) { |
| default: return false; |
| case PPC::BI__builtin_altivec_crypto_vshasigmaw: |
| case PPC::BI__builtin_altivec_crypto_vshasigmad: |
| return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || |
| SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); |
| case PPC::BI__builtin_tbegin: |
| case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; |
| case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; |
| case PPC::BI__builtin_tabortwc: |
| case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; |
| case PPC::BI__builtin_tabortwci: |
| case PPC::BI__builtin_tabortdci: |
| return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || |
| SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); |
| case PPC::BI__builtin_vsx_xxpermdi: |
| case PPC::BI__builtin_vsx_xxsldwi: |
| return SemaBuiltinVSX(TheCall); |
| } |
| return SemaBuiltinConstantArgRange(TheCall, i, l, u); |
| } |
| |
| bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, |
| CallExpr *TheCall) { |
| if (BuiltinID == SystemZ::BI__builtin_tabort) { |
| Expr *Arg = TheCall->getArg(0); |
| llvm::APSInt AbortCode(32); |
| if (Arg->isIntegerConstantExpr(AbortCode, Context) && |
| AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256) |
| return Diag(Arg->getLocStart(), diag::err_systemz_invalid_tabort_code) |
| << Arg->getSourceRange(); |
| } |
| |
| // For intrinsics which take an immediate value as part of the instruction, |
| // range check them here. |
| unsigned i = 0, l = 0, u = 0; |
| switch (BuiltinID) { |
| default: return false; |
| case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; |
| case SystemZ::BI__builtin_s390_verimb: |
| case SystemZ::BI__builtin_s390_verimh: |
| case SystemZ::BI__builtin_s390_verimf: |
| case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; |
| case SystemZ::BI__builtin_s390_vfaeb: |
| case SystemZ::BI__builtin_s390_vfaeh: |
| case SystemZ::BI__builtin_s390_vfaef: |
| case SystemZ::BI__builtin_s390_vfaebs: |
| case SystemZ::BI__builtin_s390_vfaehs: |
| case SystemZ::BI__builtin_s390_vfaefs: |
| case SystemZ::BI__builtin_s390_vfaezb: |
| case SystemZ::BI__builtin_s390_vfaezh: |
| case SystemZ::BI__builtin_s390_vfaezf: |
| case SystemZ::BI__builtin_s390_vfaezbs: |
| case SystemZ::BI__builtin_s390_vfaezhs: |
| case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; |
| case SystemZ::BI__builtin_s390_vfisb: |
| case SystemZ::BI__builtin_s390_vfidb: |
| return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || |
| SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); |
| case SystemZ::BI__builtin_s390_vftcisb: |
| case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; |
| case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; |
| case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; |
| case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; |
| case SystemZ::BI__builtin_s390_vstrcb: |
| case SystemZ::BI__builtin_s390_vstrch: |
| case SystemZ::BI__builtin_s390_vstrcf: |
| case SystemZ::BI__builtin_s390_vstrczb: |
| case SystemZ::BI__builtin_s390_vstrczh: |
| case SystemZ::BI__builtin_s390_vstrczf: |
| case SystemZ::BI__builtin_s390_vstrcbs: |
| case SystemZ::BI__builtin_s390_vstrchs: |
| case SystemZ::BI__builtin_s390_vstrcfs: |
| case SystemZ::BI__builtin_s390_vstrczbs: |
| case SystemZ::BI__builtin_s390_vstrczhs: |
| case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; |
| case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; |
| case SystemZ::BI__builtin_s390_vfminsb: |
| case SystemZ::BI__builtin_s390_vfmaxsb: |
| case SystemZ::BI__builtin_s390_vfmindb: |
| case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; |
| } |
| return SemaBuiltinConstantArgRange(TheCall, i, l, u); |
| } |
| |
| /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). |
| /// This checks that the target supports __builtin_cpu_supports and |
| /// that the string argument is constant and valid. |
| static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) { |
| Expr *Arg = TheCall->getArg(0); |
| |
| // Check if the argument is a string literal. |
| if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) |
| return S.Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal) |
| << Arg->getSourceRange(); |
| |
| // Check the contents of the string. |
| StringRef Feature = |
| cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); |
| if (!S.Context.getTargetInfo().validateCpuSupports(Feature)) |
| return S.Diag(TheCall->getLocStart(), diag::err_invalid_cpu_supports) |
| << Arg->getSourceRange(); |
| return false; |
| } |
| |
| /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). |
| /// This checks that the target supports __builtin_cpu_is and |
| /// that the string argument is constant and valid. |
| static bool SemaBuiltinCpuIs(Sema &S, CallExpr *TheCall) { |
| Expr *Arg = TheCall->getArg(0); |
| |
| // Check if the argument is a string literal. |
| if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) |
| return S.Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal) |
| << Arg->getSourceRange(); |
| |
| // Check the contents of the string. |
| StringRef Feature = |
| cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); |
| if (!S.Context.getTargetInfo().validateCpuIs(Feature)) |
| return S.Diag(TheCall->getLocStart(), diag::err_invalid_cpu_is) |
| << Arg->getSourceRange(); |
| return false; |
| } |
| |
| // Check if the rounding mode is legal. |
| bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { |
| // Indicates if this instruction has rounding control or just SAE. |
| bool HasRC = false; |
| |
| unsigned ArgNum = 0; |
| switch (BuiltinID) { |
| default: |
| return false; |
| case X86::BI__builtin_ia32_vcvttsd2si32: |
| case X86::BI__builtin_ia32_vcvttsd2si64: |
| case X86::BI__builtin_ia32_vcvttsd2usi32: |
| case X86::BI__builtin_ia32_vcvttsd2usi64: |
| case X86::BI__builtin_ia32_vcvttss2si32: |
| case X86::BI__builtin_ia32_vcvttss2si64: |
| case X86::BI__builtin_ia32_vcvttss2usi32: |
| case X86::BI__builtin_ia32_vcvttss2usi64: |
| ArgNum = 1; |
| break; |
| case X86::BI__builtin_ia32_cvtps2pd512_mask: |
| case X86::BI__builtin_ia32_cvttpd2dq512_mask: |
| case X86::BI__builtin_ia32_cvttpd2qq512_mask: |
| case X86::BI__builtin_ia32_cvttpd2udq512_mask: |
| case X86::BI__builtin_ia32_cvttpd2uqq512_mask: |
| case X86::BI__builtin_ia32_cvttps2dq512_mask: |
| case X86::BI__builtin_ia32_cvttps2qq512_mask: |
| case X86::BI__builtin_ia32_cvttps2udq512_mask: |
| case X86::BI__builtin_ia32_cvttps2uqq512_mask: |
| case X86::BI__builtin_ia32_exp2pd_mask: |
| case X86::BI__builtin_ia32_exp2ps_mask: |
| case X86::BI__builtin_ia32_getexppd512_mask: |
| case X86::BI__builtin_ia32_getexpps512_mask: |
| case X86::BI__builtin_ia32_rcp28pd_mask: |
| case X86::BI__builtin_ia32_rcp28ps_mask: |
| case X86::BI__builtin_ia32_rsqrt28pd_mask: |
| case X86::BI__builtin_ia32_rsqrt28ps_mask: |
| case X86::BI__builtin_ia32_vcomisd: |
| case X86::BI__builtin_ia32_vcomiss: |
| case X86::BI__builtin_ia32_vcvtph2ps512_mask: |
| ArgNum = 3; |
| break; |
| case X86::BI__builtin_ia32_cmppd512_mask: |
| case X86::BI__builtin_ia32_cmpps512_mask: |
| case X86::BI__builtin_ia32_cmpsd_mask: |
| case X86::BI__builtin_ia32_cmpss_mask: |
| case X86::BI__builtin_ia32_cvtss2sd_round_mask: |
| case X86::BI__builtin_ia32_getexpsd128_round_mask: |
| case X86::BI__builtin_ia32_getexpss128_round_mask: |
| case X86::BI__builtin_ia32_maxpd512_mask: |
| case X86::BI__builtin_ia32_maxps512_mask: |
| case X86::BI__builtin_ia32_maxsd_round_mask: |
| case X86::BI__builtin_ia32_maxss_round_mask: |
| case X86::BI__builtin_ia32_minpd512_mask: |
| case X86::BI__builtin_ia32_minps512_mask: |
| case X86::BI__builtin_ia32_minsd_round_mask: |
| case X86::BI__builtin_ia32_minss_round_mask: |
| case X86::BI__builtin_ia32_rcp28sd_round_mask: |
| case X86::BI__builtin_ia32_rcp28ss_round_mask: |
| case X86::BI__builtin_ia32_reducepd512_mask: |
| case X86::BI__builtin_ia32_reduceps512_mask: |
| case X86::BI__builtin_ia32_rndscalepd_mask: |
| case X86::BI__builtin_ia32_rndscaleps_mask: |
| case X86::BI__builtin_ia32_rsqrt28sd_round_mask: |
| case X86::BI__builtin_ia32_rsqrt28ss_round_mask: |
| ArgNum = 4; |
| break; |
| case X86::BI__builtin_ia32_fixupimmpd512_mask: |
| case X86::BI__builtin_ia32_fixupimmpd512_maskz: |
| case X86::BI__builtin_ia32_fixupimmps512_mask: |
| case X86::BI__builtin_ia32_fixupimmps512_maskz: |
| case X86::BI__builtin_ia32_fixupimmsd_mask: |
| case X86::BI__builtin_ia32_fixupimmsd_maskz: |
| case X86::BI__builtin_ia32_fixupimmss_mask: |
| case X86::BI__builtin_ia32_fixupimmss_maskz: |
| case X86::BI__builtin_ia32_rangepd512_mask: |
| case X86::BI__builtin_ia32_rangeps512_mask: |
| case X86::BI__builtin_ia32_rangesd128_round_mask: |
| case X86::BI__builtin_ia32_rangess128_round_mask: |
| case X86::BI__builtin_ia32_reducesd_mask: |
| case X86::BI__builtin_ia32_reducess_mask: |
| case X86::BI__builtin_ia32_rndscalesd_round_mask: |
| case X86::BI__builtin_ia32_rndscaless_round_mask: |
| ArgNum = 5; |
| break; |
| case X86::BI__builtin_ia32_vcvtsd2si64: |
| case X86::BI__builtin_ia32_vcvtsd2si32: |
| case X86::BI__builtin_ia32_vcvtsd2usi32: |
| case X86::BI__builtin_ia32_vcvtsd2usi64: |
| case X86::BI__builtin_ia32_vcvtss2si32: |
| case X86::BI__builtin_ia32_vcvtss2si64: |
| case X86::BI__builtin_ia32_vcvtss2usi32: |
| case X86::BI__builtin_ia32_vcvtss2usi64: |
| ArgNum = 1; |
| HasRC = true; |
| break; |
| case X86::BI__builtin_ia32_cvtsi2sd64: |
| case X86::BI__builtin_ia32_cvtsi2ss32: |
| case X86::BI__builtin_ia32_cvtsi2ss64: |
| case X86::BI__builtin_ia32_cvtusi2sd64: |
| case X86::BI__builtin_ia32_cvtusi2ss32: |
| case X86::BI__builtin_ia32_cvtusi2ss64: |
| ArgNum = 2; |
| HasRC = true; |
| break; |
| case X86::BI__builtin_ia32_cvtdq2ps512_mask: |
| case X86::BI__builtin_ia32_cvtudq2ps512_mask: |
| case X86::BI__builtin_ia32_cvtpd2ps512_mask: |
| case X86::BI__builtin_ia32_cvtpd2qq512_mask: |
| case X86::BI__builtin_ia32_cvtpd2uqq512_mask: |
| case X86::BI__builtin_ia32_cvtps2qq512_mask: |
| case X86::BI__builtin_ia32_cvtps2uqq512_mask: |
| case X86::BI__builtin_ia32_cvtqq2pd512_mask: |
| case X86::BI__builtin_ia32_cvtqq2ps512_mask: |
| case X86::BI__builtin_ia32_cvtuqq2pd512_mask: |
| case X86::BI__builtin_ia32_cvtuqq2ps512_mask: |
| case X86::BI__builtin_ia32_sqrtpd512_mask: |
| case X86::BI__builtin_ia32_sqrtps512_mask: |
| ArgNum = 3; |
| HasRC = true; |
| break; |
| case X86::BI__builtin_ia32_addpd512_mask: |
| case X86::BI__builtin_ia32_addps512_mask: |
| case X86::BI__builtin_ia32_divpd512_mask: |
| case X86::BI__builtin_ia32_divps512_mask: |
| case X86::BI__builtin_ia32_mulpd512_mask: |
| case X86::BI__builtin_ia32_mulps512_mask: |
| case X86::BI__builtin_ia32_subpd512_mask: |
| case X86::BI__builtin_ia32_subps512_mask: |
| case X86::BI__builtin_ia32_addss_round_mask: |
| case X86::BI__builtin_ia32_addsd_round_mask: |
| case X86::BI__builtin_ia32_divss_round_mask: |
| case X86::BI__builtin_ia32_divsd_round_mask: |
| case X86::BI__builtin_ia32_mulss_round_mask: |
| case X86::BI__builtin_ia32_mulsd_round_mask: |
| case X86::BI__builtin_ia32_subss_round_mask: |
| case X86::BI__builtin_ia32_subsd_round_mask: |
| case X86::BI__builtin_ia32_scalefpd512_mask: |
| case X86::BI__builtin_ia32_scalefps512_mask: |
| case X86::BI__builtin_ia32_scalefsd_round_mask: |
| case X86::BI__builtin_ia32_scalefss_round_mask: |
| case X86::BI__builtin_ia32_getmantpd512_mask: |
| case X86::BI__builtin_ia32_getmantps512_mask: |
| case X86::BI__builtin_ia32_cvtsd2ss_round_mask: |
| case X86::BI__builtin_ia32_sqrtsd_round_mask: |
| case X86::BI__builtin_ia32_sqrtss_round_mask: |
| case X86::BI__builtin_ia32_vfmaddpd512_mask: |
| case X86::BI__builtin_ia32_vfmaddpd512_mask3: |
| case X86::BI__builtin_ia32_vfmaddpd512_maskz: |
| case X86::BI__builtin_ia32_vfmaddps512_mask: |
| case X86::BI__builtin_ia32_vfmaddps512_mask3: |
| case X86::BI__builtin_ia32_vfmaddps512_maskz: |
| case X86::BI__builtin_ia32_vfmaddsubpd512_mask: |
| case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: |
| case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: |
| case X86::BI__builtin_ia32_vfmaddsubps512_mask: |
| case X86::BI__builtin_ia32_vfmaddsubps512_mask3: |
| case X86::BI__builtin_ia32_vfmaddsubps512_maskz: |
| case X86::BI__builtin_ia32_vfmsubpd512_mask3: |
| case X86::BI__builtin_ia32_vfmsubps512_mask3: |
| case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: |
| case X86::BI__builtin_ia32_vfmsubaddps512_mask3: |
| case X86::BI__builtin_ia32_vfnmaddpd512_mask: |
| case X86::BI__builtin_ia32_vfnmaddps512_mask: |
| case X86::BI__builtin_ia32_vfnmsubpd512_mask: |
| case X86::BI__builtin_ia32_vfnmsubpd512_mask3: |
| case X86::BI__builtin_ia32_vfnmsubps512_mask: |
| case X86::BI__builtin_ia32_vfnmsubps512_mask3: |
| case X86::BI__builtin_ia32_vfmaddsd3_mask: |
| case X86::BI__builtin_ia32_vfmaddsd3_maskz: |
| case X86::BI__builtin_ia32_vfmaddsd3_mask3: |
| case X86::BI__builtin_ia32_vfmaddss3_mask: |
| case X86::BI__builtin_ia32_vfmaddss3_maskz: |
| case X86::BI__builtin_ia32_vfmaddss3_mask3: |
| ArgNum = 4; |
| HasRC = true; |
| break; |
| case X86::BI__builtin_ia32_getmantsd_round_mask: |
| case X86::BI__builtin_ia32_getmantss_round_mask: |
| ArgNum = 5; |
| HasRC = true; |
| break; |
| } |
| |
| llvm::APSInt Result; |
| |
| // We can't check the value of a dependent argument. |
| Expr *Arg = TheCall->getArg(ArgNum); |
| if (Arg->isTypeDependent() || Arg->isValueDependent()) |
| return false; |
| |
| // Check constant-ness first. |
| if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) |
| return true; |
| |
| // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit |
| // is set. If the intrinsic has rounding control(bits 1:0), make sure its only |
| // combined with ROUND_NO_EXC. |
| if (Result == 4/*ROUND_CUR_DIRECTION*/ || |
| Result == 8/*ROUND_NO_EXC*/ || |
| (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) |
| return false; |
| |
| return Diag(TheCall->getLocStart(), diag::err_x86_builtin_invalid_rounding) |
| << Arg->getSourceRange(); |
| } |
| |
| // Check if the gather/scatter scale is legal. |
| bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, |
| CallExpr *TheCall) { |
| unsigned ArgNum = 0; |
| switch (BuiltinID) { |
| default: |
| return false; |
| case X86::BI__builtin_ia32_gatherpfdpd: |
| case X86::BI__builtin_ia32_gatherpfdps: |
| case X86::BI__builtin_ia32_gatherpfqpd: |
| case X86::BI__builtin_ia32_gatherpfqps: |
| case X86::BI__builtin_ia32_scatterpfdpd: |
| case X86::BI__builtin_ia32_scatterpfdps: |
| case X86::BI__builtin_ia32_scatterpfqpd: |
| case X86::BI__builtin_ia32_scatterpfqps: |
| ArgNum = 3; |
| break; |
| case X86::BI__builtin_ia32_gatherd_pd: |
| case X86::BI__builtin_ia32_gatherd_pd256: |
| case X86::BI__builtin_ia32_gatherq_pd: |
| case X86::BI__builtin_ia32_gatherq_pd256: |
| case X86::BI__builtin_ia32_gatherd_ps: |
| case X86::BI__builtin_ia32_gatherd_ps256: |
| case X86::BI__builtin_ia32_gatherq_ps: |
| case X86::BI__builtin_ia32_gatherq_ps256: |
| case X86::BI__builtin_ia32_gatherd_q: |
| case X86::BI__builtin_ia32_gatherd_q256: |
| case X86::BI__builtin_ia32_gatherq_q: |
| case X86::BI__builtin_ia32_gatherq_q256: |
| case X86::BI__builtin_ia32_gatherd_d: |
| case X86::BI__builtin_ia32_gatherd_d256: |
| case X86::BI__builtin_ia32_gatherq_d: |
| case X86::BI__builtin_ia32_gatherq_d256: |
| case X86::BI__builtin_ia32_gather3div2df: |
| case X86::BI__builtin_ia32_gather3div2di: |
| case X86::BI__builtin_ia32_gather3div4df: |
| case X86::BI__builtin_ia32_gather3div4di: |
| case X86::BI__builtin_ia32_gather3div4sf: |
| case X86::BI__builtin_ia32_gather3div4si: |
| case X86::BI__builtin_ia32_gather3div8sf: |
| case X86::BI__builtin_ia32_gather3div8si: |
| case X86::BI__builtin_ia32_gather3siv2df: |
| case X86::BI__builtin_ia32_gather3siv2di: |
| case X86::BI__builtin_ia32_gather3siv4df: |
| case X86::BI__builtin_ia32_gather3siv4di: |
| case X86::BI__builtin_ia32_gather3siv4sf: |
| case X86::BI__builtin_ia32_gather3siv4si: |
| case X86::BI__builtin_ia32_gather3siv8sf: |
| case X86::BI__builtin_ia32_gather3siv8si: |
| case X86::BI__builtin_ia32_gathersiv8df: |
| case X86::BI__builtin_ia32_gathersiv16sf: |
| case X86::BI__builtin_ia32_gatherdiv8df: |
| case X86::BI__builtin_ia32_gatherdiv16sf: |
| case X86::BI__builtin_ia32_gathersiv8di: |
| case X86::BI__builtin_ia32_gathersiv16si: |
| case X86::BI__builtin_ia32_gatherdiv8di: |
| case X86::BI__builtin_ia32_gatherdiv16si: |
| case X86::BI__builtin_ia32_scatterdiv2df: |
| case X86::BI__builtin_ia32_scatterdiv2di: |
| case X86::BI__builtin_ia32_scatterdiv4df: |
| case X86::BI__builtin_ia32_scatterdiv4di: |
| case X86::BI__builtin_ia32_scatterdiv4sf: |
| case X86::BI__builtin_ia32_scatterdiv4si: |
| case X86::BI__builtin_ia32_scatterdiv8sf: |
| case X86::BI__builtin_ia32_scatterdiv8si: |
| case X86::BI__builtin_ia32_scattersiv2df: |
| case X86::BI__builtin_ia32_scattersiv2di: |
| case X86::BI__builtin_ia32_scattersiv4df: |
| case X86::BI__builtin_ia32_scattersiv4di: |
| case X86::BI__builtin_ia32_scattersiv4sf: |
| case X86::BI__builtin_ia32_scattersiv4si: |
| case X86::BI__builtin_ia32_scattersiv8sf: |
| case X86::BI__builtin_ia32_scattersiv8si: |
| case X86::BI__builtin_ia32_scattersiv8df: |
| case X86::BI__builtin_ia32_scattersiv16sf: |
| case X86::BI__builtin_ia32_scatterdiv8df: |
| case X86::BI__builtin_ia32_scatterdiv16sf: |
| case X86::BI__builtin_ia32_scattersiv8di: |
| case X86::BI__builtin_ia32_scattersiv16si: |
| case X86::BI__builtin_ia32_scatterdiv8di: |
| case X86::BI__builtin_ia32_scatterdiv16si: |
| ArgNum = 4; |
| break; |
| } |
| |
| llvm::APSInt Result; |
| |
| // We can't check the value of a dependent argument. |
| Expr *Arg = TheCall->getArg(ArgNum); |
| if (Arg->isTypeDependent() || Arg->isValueDependent()) |
| return false; |
| |
| // Check constant-ness first. |
| if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) |
| return true; |
| |
| if (Result == 1 || Result == 2 || Result == 4 || Result == 8) |
| return false; |
| |
| return Diag(TheCall->getLocStart(), diag::err_x86_builtin_invalid_scale) |
| << Arg->getSourceRange(); |
| } |
| |
| bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { |
| if (BuiltinID == X86::BI__builtin_cpu_supports) |
| return SemaBuiltinCpuSupports(*this, TheCall); |
| |
| if (BuiltinID == X86::BI__builtin_cpu_is) |
| return SemaBuiltinCpuIs(*this, TheCall); |
| |
| // If the intrinsic has rounding or SAE make sure its valid. |
| if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) |
| return true; |
| |
| // If the intrinsic has a gather/scatter scale immediate make sure its valid. |
| if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) |
| return true; |
| |
| // For intrinsics which take an immediate value as part of the instruction, |
| // range check them here. |
| int i = 0, l = 0, u = 0; |
| switch (BuiltinID) { |
| default: |
| return false; |
| case X86::BI_mm_prefetch: |
| i = 1; l = 0; u = 7; |
| break; |
| case X86::BI__builtin_ia32_sha1rnds4: |
| case X86::BI__builtin_ia32_shuf_f32x4_256_mask: |
| case X86::BI__builtin_ia32_shuf_f64x2_256_mask: |
| case X86::BI__builtin_ia32_shuf_i32x4_256_mask: |
| case X86::BI__builtin_ia32_shuf_i64x2_256_mask: |
| i = 2; l = 0; u = 3; |
| break; |
| case X86::BI__builtin_ia32_vpermil2pd: |
| case X86::BI__builtin_ia32_vpermil2pd256: |
| case X86::BI__builtin_ia32_vpermil2ps: |
| case X86::BI__builtin_ia32_vpermil2ps256: |
| i = 3; l = 0; u = 3; |
| break; |
| case X86::BI__builtin_ia32_cmpb128_mask: |
| case X86::BI__builtin_ia32_cmpw128_mask: |
| case X86::BI__builtin_ia32_cmpd128_mask: |
| case X86::BI__builtin_ia32_cmpq128_mask: |
| case X86::BI__builtin_ia32_cmpb256_mask: |
| case X86::BI__builtin_ia32_cmpw256_mask: |
| case X86::BI__builtin_ia32_cmpd256_mask: |
| case X86::BI__builtin_ia32_cmpq256_mask: |
| case X86::BI__builtin_ia32_cmpb512_mask: |
| case X86::BI__builtin_ia32_cmpw512_mask: |
| case X86::BI__builtin_ia32_cmpd512_mask: |
| case X86::BI__builtin_ia32_cmpq512_mask: |
| case X86::BI__builtin_ia32_ucmpb128_mask: |
| case X86::BI__builtin_ia32_ucmpw128_mask: |
| case X86::BI__builtin_ia32_ucmpd128_mask: |
| case X86::BI__builtin_ia32_ucmpq128_mask: |
| case X86::BI__builtin_ia32_ucmpb256_mask: |
| case X86::BI__builtin_ia32_ucmpw256_mask: |
| case X86::BI__builtin_ia32_ucmpd256_mask: |
| case X86::BI__builtin_ia32_ucmpq256_mask: |
| case X86::BI__builtin_ia32_ucmpb512_mask: |
| case X86::BI__builtin_ia32_ucmpw512_mask: |
| case X86::BI__builtin_ia32_ucmpd512_mask: |
| case X86::BI__builtin_ia32_ucmpq512_mask: |
| case X86::BI__builtin_ia32_vpcomub: |
| case X86::BI__builtin_ia32_vpcomuw: |
| case X86::BI__builtin_ia32_vpcomud: |
| case X86::BI__builtin_ia32_vpcomuq: |
| case X86::BI__builtin_ia32_vpcomb: |
| case X86::BI__builtin_ia32_vpcomw: |
| case X86::BI__builtin_ia32_vpcomd: |
| case X86::BI__builtin_ia32_vpcomq: |
| i = 2; l = 0; u = 7; |
| break; |
| case X86::BI__builtin_ia32_roundps: |
| case X86::BI__builtin_ia32_roundpd: |
| case X86::BI__builtin_ia32_roundps256: |
| case X86::BI__builtin_ia32_roundpd256: |
| i = 1; l = 0; u = 15; |
| break; |
| case X86::BI__builtin_ia32_roundss: |
| case X86::BI__builtin_ia32_roundsd: |
| case X86::BI__builtin_ia32_rangepd128_mask: |
| case X86::BI__builtin_ia32_rangepd256_mask: |
| case X86::BI__builtin_ia32_rangepd512_mask: |
| case X86::BI__builtin_ia32_rangeps128_mask: |
| case X86::BI__builtin_ia32_rangeps256_mask: |
| case X86::BI__builtin_ia32_rangeps512_mask: |
| case X86::BI__builtin_ia32_getmantsd_round_mask: |
| case X86::BI__builtin_ia32_getmantss_round_mask: |
| i = 2; l = 0; u = 15; |
| break; |
| case X86::BI__builtin_ia32_cmpps: |
| case X86::BI__builtin_ia32_cmpss: |
| case X86::BI__builtin_ia32_cmppd: |
| case X86::BI__builtin_ia32_cmpsd: |
| case X86::BI__builtin_ia32_cmpps256: |
| case X86::BI__builtin_ia32_cmppd256: |
| case X86::BI__builtin_ia32_cmpps128_mask: |
| case X86::BI__builtin_ia32_cmppd128_mask: |
| case X86::BI__builtin_ia32_cmpps256_mask: |
| case X86::BI__builtin_ia32_cmppd256_mask: |
| case X86::BI__builtin_ia32_cmpps512_mask: |
| case X86::BI__builtin_ia32_cmppd512_mask: |
| case X86::BI__builtin_ia32_cmpsd_mask: |
| case X86::BI__builtin_ia32_cmpss_mask: |
| i = 2; l = 0; u = 31; |
| break; |
| case X86::BI__builtin_ia32_xabort: |
| i = 0; l = -128; u = 255; |
| break; |
| case X86::BI__builtin_ia32_pshufw: |
| case X86::BI__builtin_ia32_aeskeygenassist128: |
| i = 1; l = -128; u = 255; |
| break; |
| case X86::BI__builtin_ia32_vcvtps2ph: |
| case X86::BI__builtin_ia32_vcvtps2ph_mask: |
| case X86::BI__builtin_ia32_vcvtps2ph256: |
| case X86::BI__builtin_ia32_vcvtps2ph256_mask: |
| case X86::BI__builtin_ia32_vcvtps2ph512_mask: |
| case X86::BI__builtin_ia32_rndscaleps_128_mask: |
| case X86::BI__builtin_ia32_rndscalepd_128_mask: |
| case X86::BI__builtin_ia32_rndscaleps_256_mask: |
| case X86::BI__builtin_ia32_rndscalepd_256_mask: |
| case X86::BI__builtin_ia32_rndscaleps_mask: |
| case X86::BI__builtin_ia32_rndscalepd_mask: |
| case X86::BI__builtin_ia32_reducepd128_mask: |
| case X86::BI__builtin_ia32_reducepd256_mask: |
| case X86::BI__builtin_ia32_reducepd512_mask: |
| case X86::BI__builtin_ia32_reduceps128_mask: |
| case X86::BI__builtin_ia32_reduceps256_mask: |
| case X86::BI__builtin_ia32_reduceps512_mask: |
| case X86::BI__builtin_ia32_prold512_mask: |
| case X86::BI__builtin_ia32_prolq512_mask: |
| case X86::BI__builtin_ia32_prold128_mask: |
| case X86::BI__builtin_ia32_prold256_mask: |
| case X86::BI__builtin_ia32_prolq128_mask: |
| case X86::BI__builtin_ia32_prolq256_mask: |
| case X86::BI__builtin_ia32_prord128_mask: |
| case X86::BI__builtin_ia32_prord256_mask: |
| case X86::BI__builtin_ia32_prorq128_mask: |
| case X86::BI__builtin_ia32_prorq256_mask: |
| case X86::BI__builtin_ia32_fpclasspd128_mask: |
| case X86::BI__builtin_ia32_fpclasspd256_mask: |
| case X86::BI__builtin_ia32_fpclassps128_mask: |
| case X86::BI__builtin_ia32_fpclassps256_mask: |
| case X86::BI__builtin_ia32_fpclassps512_mask: |
| case X86::BI__builtin_ia32_fpclasspd512_mask: |
| case X86::BI__builtin_ia32_fpclasssd_mask: |
| case X86::BI__builtin_ia32_fpclassss_mask: |
| i = 1; l = 0; u = 255; |
| break; |
| case X86::BI__builtin_ia32_palignr: |
| case X86::BI__builtin_ia32_insertps128: |
| case X86::BI__builtin_ia32_dpps: |
| case X86::BI__builtin_ia32_dppd: |
| case X86::BI__builtin_ia32_dpps256: |
| case X86::BI__builtin_ia32_mpsadbw128: |
| case X86::BI__builtin_ia32_mpsadbw256: |
| case X86::BI__builtin_ia32_pcmpistrm128: |
| case X86::BI__builtin_ia32_pcmpistri128: |
| case X86::BI__builtin_ia32_pcmpistria128: |
| case X86::BI__builtin_ia32_pcmpistric128: |
| case X86::BI__builtin_ia32_pcmpistrio128: |
| case X86::BI__builtin_ia32_pcmpistris128: |
| case X86::BI__builtin_ia32_pcmpistriz128: |
| case X86::BI__builtin_ia32_pclmulqdq128: |
| case X86::BI__builtin_ia32_vperm2f128_pd256: |
| case X86::BI__builtin_ia32_vperm2f128_ps256: |
| case X86::BI__builtin_ia32_vperm2f128_si256: |
| case X86::BI__builtin_ia32_permti256: |
| i = 2; l = -128; u = 255; |
| break; |
| case X86::BI__builtin_ia32_palignr128: |
| case X86::BI__builtin_ia32_palignr256: |
| case X86::BI__builtin_ia32_palignr512_mask: |
| case X86::BI__builtin_ia32_vcomisd: |
| case X86::BI__builtin_ia32_vcomiss: |
| case X86::BI__builtin_ia32_shuf_f32x4_mask: |
| case X86::BI__builtin_ia32_shuf_f64x2_mask: |
| case X86::BI__builtin_ia32_shuf_i32x4_mask: |
| case X86::BI__builtin_ia32_shuf_i64x2_mask: |
| case X86::BI__builtin_ia32_dbpsadbw128_mask: |
| case X86::BI__builtin_ia32_dbpsadbw256_mask: |
| case X86::BI__builtin_ia32_dbpsadbw512_mask: |
| i = 2; l = 0; u = 255; |
| break; |
| case X86::BI__builtin_ia32_fixupimmpd512_mask: |
| case X86::BI__builtin_ia32_fixupimmpd512_maskz: |
| case X86::BI__builtin_ia32_fixupimmps512_mask: |
| case X86::BI__builtin_ia32_fixupimmps512_maskz: |
| case X86::BI__builtin_ia32_fixupimmsd_mask: |
| case X86::BI__builtin_ia32_fixupimmsd_maskz: |
| case X86::BI__builtin_ia32_fixupimmss_mask: |
| case X86::BI__builtin_ia32_fixupimmss_maskz: |
| case X86::BI__builtin_ia32_fixupimmpd128_mask: |
| case X86::BI__builtin_ia32_fixupimmpd128_maskz: |
| case X86::BI__builtin_ia32_fixupimmpd256_mask: |
| case X86::BI__builtin_ia32_fixupimmpd256_maskz: |
| case X86::BI__builtin_ia32_fixupimmps128_mask: |
| case X86::BI__builtin_ia32_fixupimmps128_maskz: |
| case X86::BI__builtin_ia32_fixupimmps256_mask: |
| case X86::BI__builtin_ia32_fixupimmps256_maskz: |
| case X86::BI__builtin_ia32_pternlogd512_mask: |
| case X86::BI__builtin_ia32_pternlogd512_maskz: |
| case X86::BI__builtin_ia32_pternlogq512_mask: |
| case X86::BI__builtin_ia32_pternlogq512_maskz: |
| case X86::BI__builtin_ia32_pternlogd128_mask: |
| case X86::BI__builtin_ia32_pternlogd128_maskz: |
| case X86::BI__builtin_ia32_pternlogd256_mask: |
| case X86::BI__builtin_ia32_pternlogd256_maskz: |
| case X86::BI__builtin_ia32_pternlogq128_mask: |
| case X86::BI__builtin_ia32_pternlogq128_maskz: |
| case X86::BI__builtin_ia32_pternlogq256_mask: |
| case X86::BI__builtin_ia32_pternlogq256_maskz: |
| i = 3; l = 0; u = 255; |
| break; |
| case X86::BI__builtin_ia32_gatherpfdpd: |
| case X86::BI__builtin_ia32_gatherpfdps: |
| case X86::BI__builtin_ia32_gatherpfqpd: |
| case X86::BI__builtin_ia32_gatherpfqps: |
| case X86::BI__builtin_ia32_scatterpfdpd: |
| case X86::BI__builtin_ia32_scatterpfdps: |
| case X86::BI__builtin_ia32_scatterpfqpd: |
| case X86::BI__builtin_ia32_scatterpfqps: |
| i = 4; l = 2; u = 3; |
| break; |
| case X86::BI__builtin_ia32_pcmpestrm128: |
| case X86::BI__builtin_ia32_pcmpestri128: |
| case X86::BI__builtin_ia32_pcmpestria128: |
| case X86::BI__builtin_ia32_pcmpestric128: |
| case X86::BI__builtin_ia32_pcmpestrio128: |
| case X86::BI__builtin_ia32_pcmpestris128: |
| case X86::BI__builtin_ia32_pcmpestriz128: |
| i = 4; l = -128; u = 255; |
| break; |
| case X86::BI__builtin_ia32_rndscalesd_round_mask: |
| case X86::BI__builtin_ia32_rndscaless_round_mask: |
| i = 4; l = 0; u = 255; |
| break; |
| } |
| return SemaBuiltinConstantArgRange(TheCall, i, l, u); |
| } |
| |
| /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo |
| /// parameter with the FormatAttr's correct format_idx and firstDataArg. |
| /// Returns true when the format fits the function and the FormatStringInfo has |
| /// been populated. |
| bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, |
| FormatStringInfo *FSI) { |
| FSI->HasVAListArg = Format->getFirstArg() == 0; |
| FSI->FormatIdx = Format->getFormatIdx() - 1; |
| FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; |
| |
| // The way the format attribute works in GCC, the implicit this argument |
| // of member functions is counted. However, it doesn't appear in our own |
| // lists, so decrement format_idx in that case. |
| if (IsCXXMember) { |
| if(FSI->FormatIdx == 0) |
| return false; |
| --FSI->FormatIdx; |
| if (FSI->FirstDataArg != 0) |
| --FSI->FirstDataArg; |
| } |
| return true; |
| } |
| |
| /// Checks if a the given expression evaluates to null. |
| /// |
| /// \brief Returns true if the value evaluates to null. |
| static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { |
| // If the expression has non-null type, it doesn't evaluate to null. |
| if (auto nullability |
| = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { |
| if (*nullability == NullabilityKind::NonNull) |
| return false; |
| } |
| |
| // As a special case, transparent unions initialized with zero are |
| // considered null for the purposes of the nonnull attribute. |
| if (const RecordType *UT = Expr->getType()->getAsUnionType()) { |
| if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) |
| if (const CompoundLiteralExpr *CLE = |
| dyn_cast<CompoundLiteralExpr>(Expr)) |
| if (const InitListExpr *ILE = |
| dyn_cast<InitListExpr>(CLE->getInitializer())) |
| Expr = ILE->getInit(0); |
| } |
| |
| bool Result; |
| return (!Expr->isValueDependent() && |
| Expr->EvaluateAsBooleanCondition(Result, S.Context) && |
| !Result); |
| } |
| |
| static void CheckNonNullArgument(Sema &S, |
| const Expr *ArgExpr, |
| SourceLocation CallSiteLoc) { |
| if (CheckNonNullExpr(S, ArgExpr)) |
| S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, |
| S.PDiag(diag::warn_null_arg) << ArgExpr->getSourceRange()); |
| } |
| |
| bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { |
| FormatStringInfo FSI; |
| if ((GetFormatStringType(Format) == FST_NSString) && |
| getFormatStringInfo(Format, false, &FSI)) { |
| Idx = FSI.FormatIdx; |
| return true; |
| } |
| return false; |
| } |
| |
| /// \brief Diagnose use of %s directive in an NSString which is being passed |
| /// as formatting string to formatting method. |
| static void |
| DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, |
| const NamedDecl *FDecl, |
| Expr **Args, |
| unsigned NumArgs) { |
| unsigned Idx = 0; |
| bool Format = false; |
| ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); |
| if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { |
| Idx = 2; |
| Format = true; |
| } |
| else |
| for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { |
| if (S.GetFormatNSStringIdx(I, Idx)) { |
| Format = true; |
| break; |
| } |
| } |
| if (!Format || NumArgs <= Idx) |
| return; |
| const Expr *FormatExpr = Args[Idx]; |
| if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) |
| FormatExpr = CSCE->getSubExpr(); |
| const StringLiteral *FormatString; |
| if (const ObjCStringLiteral *OSL = |
| dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) |
| FormatString = OSL->getString(); |
| else |
| FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); |
| if (!FormatString) |
| return; |
| if (S.FormatStringHasSArg(FormatString)) { |
| S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) |
| << "%s" << 1 << 1; |
| S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) |
| << FDecl->getDeclName(); |
| } |
| |