| //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// |
| // |
| // The LLVM Compiler Infrastructure |
| // |
| // This file is distributed under the University of Illinois Open Source |
| // License. See LICENSE.TXT for details. |
| // |
| //===----------------------------------------------------------------------===// |
| // |
| // This contains code to emit Builtin calls as LLVM code. |
| // |
| //===----------------------------------------------------------------------===// |
| |
| #include "CGCXXABI.h" |
| #include "CGObjCRuntime.h" |
| #include "CGOpenCLRuntime.h" |
| #include "CodeGenFunction.h" |
| #include "CodeGenModule.h" |
| #include "ConstantEmitter.h" |
| #include "TargetInfo.h" |
| #include "clang/AST/ASTContext.h" |
| #include "clang/AST/Decl.h" |
| #include "clang/Analysis/Analyses/OSLog.h" |
| #include "clang/Basic/TargetBuiltins.h" |
| #include "clang/Basic/TargetInfo.h" |
| #include "clang/CodeGen/CGFunctionInfo.h" |
| #include "llvm/ADT/StringExtras.h" |
| #include "llvm/IR/CallSite.h" |
| #include "llvm/IR/DataLayout.h" |
| #include "llvm/IR/InlineAsm.h" |
| #include "llvm/IR/Intrinsics.h" |
| #include "llvm/IR/MDBuilder.h" |
| #include "llvm/Support/ConvertUTF.h" |
| #include "llvm/Support/ScopedPrinter.h" |
| #include "llvm/Support/TargetParser.h" |
| #include <sstream> |
| |
| using namespace clang; |
| using namespace CodeGen; |
| using namespace llvm; |
| |
| static |
| int64_t clamp(int64_t Value, int64_t Low, int64_t High) { |
| return std::min(High, std::max(Low, Value)); |
| } |
| |
| /// getBuiltinLibFunction - Given a builtin id for a function like |
| /// "__builtin_fabsf", return a Function* for "fabsf". |
| llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD, |
| unsigned BuiltinID) { |
| assert(Context.BuiltinInfo.isLibFunction(BuiltinID)); |
| |
| // Get the name, skip over the __builtin_ prefix (if necessary). |
| StringRef Name; |
| GlobalDecl D(FD); |
| |
| // If the builtin has been declared explicitly with an assembler label, |
| // use the mangled name. This differs from the plain label on platforms |
| // that prefix labels. |
| if (FD->hasAttr<AsmLabelAttr>()) |
| Name = getMangledName(D); |
| else |
| Name = Context.BuiltinInfo.getName(BuiltinID) + 10; |
| |
| llvm::FunctionType *Ty = |
| cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType())); |
| |
| return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false); |
| } |
| |
| /// Emit the conversions required to turn the given value into an |
| /// integer of the given size. |
| static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, |
| QualType T, llvm::IntegerType *IntType) { |
| V = CGF.EmitToMemory(V, T); |
| |
| if (V->getType()->isPointerTy()) |
| return CGF.Builder.CreatePtrToInt(V, IntType); |
| |
| assert(V->getType() == IntType); |
| return V; |
| } |
| |
| static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, |
| QualType T, llvm::Type *ResultType) { |
| V = CGF.EmitFromMemory(V, T); |
| |
| if (ResultType->isPointerTy()) |
| return CGF.Builder.CreateIntToPtr(V, ResultType); |
| |
| assert(V->getType() == ResultType); |
| return V; |
| } |
| |
| /// Utility to insert an atomic instruction based on Instrinsic::ID |
| /// and the expression node. |
| static Value *MakeBinaryAtomicValue(CodeGenFunction &CGF, |
| llvm::AtomicRMWInst::BinOp Kind, |
| const CallExpr *E) { |
| QualType T = E->getType(); |
| assert(E->getArg(0)->getType()->isPointerType()); |
| assert(CGF.getContext().hasSameUnqualifiedType(T, |
| E->getArg(0)->getType()->getPointeeType())); |
| assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); |
| |
| llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
| unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
| |
| llvm::IntegerType *IntType = |
| llvm::IntegerType::get(CGF.getLLVMContext(), |
| CGF.getContext().getTypeSize(T)); |
| llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
| |
| llvm::Value *Args[2]; |
| Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
| Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
| llvm::Type *ValueType = Args[1]->getType(); |
| Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
| |
| llvm::Value *Result = CGF.Builder.CreateAtomicRMW( |
| Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent); |
| return EmitFromInt(CGF, Result, T, ValueType); |
| } |
| |
| static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) { |
| Value *Val = CGF.EmitScalarExpr(E->getArg(0)); |
| Value *Address = CGF.EmitScalarExpr(E->getArg(1)); |
| |
| // Convert the type of the pointer to a pointer to the stored type. |
| Val = CGF.EmitToMemory(Val, E->getArg(0)->getType()); |
| Value *BC = CGF.Builder.CreateBitCast( |
| Address, llvm::PointerType::getUnqual(Val->getType()), "cast"); |
| LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType()); |
| LV.setNontemporal(true); |
| CGF.EmitStoreOfScalar(Val, LV, false); |
| return nullptr; |
| } |
| |
| static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) { |
| Value *Address = CGF.EmitScalarExpr(E->getArg(0)); |
| |
| LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType()); |
| LV.setNontemporal(true); |
| return CGF.EmitLoadOfScalar(LV, E->getExprLoc()); |
| } |
| |
| static RValue EmitBinaryAtomic(CodeGenFunction &CGF, |
| llvm::AtomicRMWInst::BinOp Kind, |
| const CallExpr *E) { |
| return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E)); |
| } |
| |
| /// Utility to insert an atomic instruction based Instrinsic::ID and |
| /// the expression node, where the return value is the result of the |
| /// operation. |
| static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, |
| llvm::AtomicRMWInst::BinOp Kind, |
| const CallExpr *E, |
| Instruction::BinaryOps Op, |
| bool Invert = false) { |
| QualType T = E->getType(); |
| assert(E->getArg(0)->getType()->isPointerType()); |
| assert(CGF.getContext().hasSameUnqualifiedType(T, |
| E->getArg(0)->getType()->getPointeeType())); |
| assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); |
| |
| llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
| unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
| |
| llvm::IntegerType *IntType = |
| llvm::IntegerType::get(CGF.getLLVMContext(), |
| CGF.getContext().getTypeSize(T)); |
| llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
| |
| llvm::Value *Args[2]; |
| Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
| llvm::Type *ValueType = Args[1]->getType(); |
| Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
| Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
| |
| llvm::Value *Result = CGF.Builder.CreateAtomicRMW( |
| Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent); |
| Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]); |
| if (Invert) |
| Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result, |
| llvm::ConstantInt::get(IntType, -1)); |
| Result = EmitFromInt(CGF, Result, T, ValueType); |
| return RValue::get(Result); |
| } |
| |
| /// @brief Utility to insert an atomic cmpxchg instruction. |
| /// |
| /// @param CGF The current codegen function. |
| /// @param E Builtin call expression to convert to cmpxchg. |
| /// arg0 - address to operate on |
| /// arg1 - value to compare with |
| /// arg2 - new value |
| /// @param ReturnBool Specifies whether to return success flag of |
| /// cmpxchg result or the old value. |
| /// |
| /// @returns result of cmpxchg, according to ReturnBool |
| static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, |
| bool ReturnBool) { |
| QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType(); |
| llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
| unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
| |
| llvm::IntegerType *IntType = llvm::IntegerType::get( |
| CGF.getLLVMContext(), CGF.getContext().getTypeSize(T)); |
| llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
| |
| Value *Args[3]; |
| Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
| Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
| llvm::Type *ValueType = Args[1]->getType(); |
| Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
| Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType); |
| |
| Value *Pair = CGF.Builder.CreateAtomicCmpXchg( |
| Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent, |
| llvm::AtomicOrdering::SequentiallyConsistent); |
| if (ReturnBool) |
| // Extract boolean success flag and zext it to int. |
| return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1), |
| CGF.ConvertType(E->getType())); |
| else |
| // Extract old value and emit it using the same type as compare value. |
| return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T, |
| ValueType); |
| } |
| |
| // Emit a simple mangled intrinsic that has 1 argument and a return type |
| // matching the argument type. |
| static Value *emitUnaryBuiltin(CodeGenFunction &CGF, |
| const CallExpr *E, |
| unsigned IntrinsicID) { |
| llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
| |
| Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateCall(F, Src0); |
| } |
| |
| // Emit an intrinsic that has 2 operands of the same type as its result. |
| static Value *emitBinaryBuiltin(CodeGenFunction &CGF, |
| const CallExpr *E, |
| unsigned IntrinsicID) { |
| llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
| llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
| |
| Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateCall(F, { Src0, Src1 }); |
| } |
| |
| // Emit an intrinsic that has 3 operands of the same type as its result. |
| static Value *emitTernaryBuiltin(CodeGenFunction &CGF, |
| const CallExpr *E, |
| unsigned IntrinsicID) { |
| llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
| llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
| llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); |
| |
| Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 }); |
| } |
| |
| // Emit an intrinsic that has 1 float or double operand, and 1 integer. |
| static Value *emitFPIntBuiltin(CodeGenFunction &CGF, |
| const CallExpr *E, |
| unsigned IntrinsicID) { |
| llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
| llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
| |
| Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateCall(F, {Src0, Src1}); |
| } |
| |
| /// EmitFAbs - Emit a call to @llvm.fabs(). |
| static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) { |
| Value *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType()); |
| llvm::CallInst *Call = CGF.Builder.CreateCall(F, V); |
| Call->setDoesNotAccessMemory(); |
| return Call; |
| } |
| |
| /// Emit the computation of the sign bit for a floating point value. Returns |
| /// the i1 sign bit value. |
| static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) { |
| LLVMContext &C = CGF.CGM.getLLVMContext(); |
| |
| llvm::Type *Ty = V->getType(); |
| int Width = Ty->getPrimitiveSizeInBits(); |
| llvm::Type *IntTy = llvm::IntegerType::get(C, Width); |
| V = CGF.Builder.CreateBitCast(V, IntTy); |
| if (Ty->isPPC_FP128Ty()) { |
| // We want the sign bit of the higher-order double. The bitcast we just |
| // did works as if the double-double was stored to memory and then |
| // read as an i128. The "store" will put the higher-order double in the |
| // lower address in both little- and big-Endian modes, but the "load" |
| // will treat those bits as a different part of the i128: the low bits in |
| // little-Endian, the high bits in big-Endian. Therefore, on big-Endian |
| // we need to shift the high bits down to the low before truncating. |
| Width >>= 1; |
| if (CGF.getTarget().isBigEndian()) { |
| Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width); |
| V = CGF.Builder.CreateLShr(V, ShiftCst); |
| } |
| // We are truncating value in order to extract the higher-order |
| // double, which we will be using to extract the sign from. |
| IntTy = llvm::IntegerType::get(C, Width); |
| V = CGF.Builder.CreateTrunc(V, IntTy); |
| } |
| Value *Zero = llvm::Constant::getNullValue(IntTy); |
| return CGF.Builder.CreateICmpSLT(V, Zero); |
| } |
| |
| static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD, |
| const CallExpr *E, llvm::Constant *calleeValue) { |
| CGCallee callee = CGCallee::forDirect(calleeValue, FD); |
| return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot()); |
| } |
| |
| /// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.* |
| /// depending on IntrinsicID. |
| /// |
| /// \arg CGF The current codegen function. |
| /// \arg IntrinsicID The ID for the Intrinsic we wish to generate. |
| /// \arg X The first argument to the llvm.*.with.overflow.*. |
| /// \arg Y The second argument to the llvm.*.with.overflow.*. |
| /// \arg Carry The carry returned by the llvm.*.with.overflow.*. |
| /// \returns The result (i.e. sum/product) returned by the intrinsic. |
| static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF, |
| const llvm::Intrinsic::ID IntrinsicID, |
| llvm::Value *X, llvm::Value *Y, |
| llvm::Value *&Carry) { |
| // Make sure we have integers of the same width. |
| assert(X->getType() == Y->getType() && |
| "Arguments must be the same type. (Did you forget to make sure both " |
| "arguments have the same integer width?)"); |
| |
| llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType()); |
| llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y}); |
| Carry = CGF.Builder.CreateExtractValue(Tmp, 1); |
| return CGF.Builder.CreateExtractValue(Tmp, 0); |
| } |
| |
| static Value *emitRangedBuiltin(CodeGenFunction &CGF, |
| unsigned IntrinsicID, |
| int low, int high) { |
| llvm::MDBuilder MDHelper(CGF.getLLVMContext()); |
| llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high)); |
| Value *F = CGF.CGM.getIntrinsic(IntrinsicID, {}); |
| llvm::Instruction *Call = CGF.Builder.CreateCall(F); |
| Call->setMetadata(llvm::LLVMContext::MD_range, RNode); |
| return Call; |
| } |
| |
| namespace { |
| struct WidthAndSignedness { |
| unsigned Width; |
| bool Signed; |
| }; |
| } |
| |
| static WidthAndSignedness |
| getIntegerWidthAndSignedness(const clang::ASTContext &context, |
| const clang::QualType Type) { |
| assert(Type->isIntegerType() && "Given type is not an integer."); |
| unsigned Width = Type->isBooleanType() ? 1 : context.getTypeInfo(Type).Width; |
| bool Signed = Type->isSignedIntegerType(); |
| return {Width, Signed}; |
| } |
| |
| // Given one or more integer types, this function produces an integer type that |
| // encompasses them: any value in one of the given types could be expressed in |
| // the encompassing type. |
| static struct WidthAndSignedness |
| EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) { |
| assert(Types.size() > 0 && "Empty list of types."); |
| |
| // If any of the given types is signed, we must return a signed type. |
| bool Signed = false; |
| for (const auto &Type : Types) { |
| Signed |= Type.Signed; |
| } |
| |
| // The encompassing type must have a width greater than or equal to the width |
| // of the specified types. Aditionally, if the encompassing type is signed, |
| // its width must be strictly greater than the width of any unsigned types |
| // given. |
| unsigned Width = 0; |
| for (const auto &Type : Types) { |
| unsigned MinWidth = Type.Width + (Signed && !Type.Signed); |
| if (Width < MinWidth) { |
| Width = MinWidth; |
| } |
| } |
| |
| return {Width, Signed}; |
| } |
| |
| Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) { |
| llvm::Type *DestType = Int8PtrTy; |
| if (ArgValue->getType() != DestType) |
| ArgValue = |
| Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data()); |
| |
| Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend; |
| return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue); |
| } |
| |
| /// Checks if using the result of __builtin_object_size(p, @p From) in place of |
| /// __builtin_object_size(p, @p To) is correct |
| static bool areBOSTypesCompatible(int From, int To) { |
| // Note: Our __builtin_object_size implementation currently treats Type=0 and |
| // Type=2 identically. Encoding this implementation detail here may make |
| // improving __builtin_object_size difficult in the future, so it's omitted. |
| return From == To || (From == 0 && To == 1) || (From == 3 && To == 2); |
| } |
| |
| static llvm::Value * |
| getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) { |
| return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true); |
| } |
| |
| llvm::Value * |
| CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type, |
| llvm::IntegerType *ResType, |
| llvm::Value *EmittedE) { |
| uint64_t ObjectSize; |
| if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type)) |
| return emitBuiltinObjectSize(E, Type, ResType, EmittedE); |
| return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true); |
| } |
| |
| /// Returns a Value corresponding to the size of the given expression. |
| /// This Value may be either of the following: |
| /// - A llvm::Argument (if E is a param with the pass_object_size attribute on |
| /// it) |
| /// - A call to the @llvm.objectsize intrinsic |
| /// |
| /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null |
| /// and we wouldn't otherwise try to reference a pass_object_size parameter, |
| /// we'll call @llvm.objectsize on EmittedE, rather than emitting E. |
| llvm::Value * |
| CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, |
| llvm::IntegerType *ResType, |
| llvm::Value *EmittedE) { |
| // We need to reference an argument if the pointer is a parameter with the |
| // pass_object_size attribute. |
| if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) { |
| auto *Param = dyn_cast<ParmVarDecl>(D->getDecl()); |
| auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>(); |
| if (Param != nullptr && PS != nullptr && |
| areBOSTypesCompatible(PS->getType(), Type)) { |
| auto Iter = SizeArguments.find(Param); |
| assert(Iter != SizeArguments.end()); |
| |
| const ImplicitParamDecl *D = Iter->second; |
| auto DIter = LocalDeclMap.find(D); |
| assert(DIter != LocalDeclMap.end()); |
| |
| return EmitLoadOfScalar(DIter->second, /*volatile=*/false, |
| getContext().getSizeType(), E->getLocStart()); |
| } |
| } |
| |
| // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't |
| // evaluate E for side-effects. In either case, we shouldn't lower to |
| // @llvm.objectsize. |
| if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext()))) |
| return getDefaultBuiltinObjectSizeResult(Type, ResType); |
| |
| Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E); |
| assert(Ptr->getType()->isPointerTy() && |
| "Non-pointer passed to __builtin_object_size?"); |
| |
| Value *F = CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()}); |
| |
| // LLVM only supports 0 and 2, make sure that we pass along that as a boolean. |
| Value *Min = Builder.getInt1((Type & 2) != 0); |
| // For GCC compatability, __builtin_object_size treat NULL as unknown size. |
| Value *NullIsUnknown = Builder.getTrue(); |
| return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown}); |
| } |
| |
| // Many of MSVC builtins are on both x64 and ARM; to avoid repeating code, we |
| // handle them here. |
| enum class CodeGenFunction::MSVCIntrin { |
| _BitScanForward, |
| _BitScanReverse, |
| _InterlockedAnd, |
| _InterlockedDecrement, |
| _InterlockedExchange, |
| _InterlockedExchangeAdd, |
| _InterlockedExchangeSub, |
| _InterlockedIncrement, |
| _InterlockedOr, |
| _InterlockedXor, |
| _interlockedbittestandset, |
| __fastfail, |
| }; |
| |
| Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, |
| const CallExpr *E) { |
| switch (BuiltinID) { |
| case MSVCIntrin::_BitScanForward: |
| case MSVCIntrin::_BitScanReverse: { |
| Value *ArgValue = EmitScalarExpr(E->getArg(1)); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| llvm::Type *IndexType = |
| EmitScalarExpr(E->getArg(0))->getType()->getPointerElementType(); |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| |
| Value *ArgZero = llvm::Constant::getNullValue(ArgType); |
| Value *ResZero = llvm::Constant::getNullValue(ResultType); |
| Value *ResOne = llvm::ConstantInt::get(ResultType, 1); |
| |
| BasicBlock *Begin = Builder.GetInsertBlock(); |
| BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn); |
| Builder.SetInsertPoint(End); |
| PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result"); |
| |
| Builder.SetInsertPoint(Begin); |
| Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero); |
| BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn); |
| Builder.CreateCondBr(IsZero, End, NotZero); |
| Result->addIncoming(ResZero, Begin); |
| |
| Builder.SetInsertPoint(NotZero); |
| Address IndexAddress = EmitPointerWithAlignment(E->getArg(0)); |
| |
| if (BuiltinID == MSVCIntrin::_BitScanForward) { |
| Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
| Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()}); |
| ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false); |
| Builder.CreateStore(ZeroCount, IndexAddress, false); |
| } else { |
| unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth(); |
| Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1); |
| |
| Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
| Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()}); |
| ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false); |
| Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount); |
| Builder.CreateStore(Index, IndexAddress, false); |
| } |
| Builder.CreateBr(End); |
| Result->addIncoming(ResOne, NotZero); |
| |
| Builder.SetInsertPoint(End); |
| return Result; |
| } |
| case MSVCIntrin::_InterlockedAnd: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E); |
| case MSVCIntrin::_InterlockedExchange: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E); |
| case MSVCIntrin::_InterlockedExchangeAdd: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E); |
| case MSVCIntrin::_InterlockedExchangeSub: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E); |
| case MSVCIntrin::_InterlockedOr: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E); |
| case MSVCIntrin::_InterlockedXor: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E); |
| |
| case MSVCIntrin::_interlockedbittestandset: { |
| llvm::Value *Addr = EmitScalarExpr(E->getArg(0)); |
| llvm::Value *Bit = EmitScalarExpr(E->getArg(1)); |
| AtomicRMWInst *RMWI = Builder.CreateAtomicRMW( |
| AtomicRMWInst::Or, Addr, |
| Builder.CreateShl(ConstantInt::get(Bit->getType(), 1), Bit), |
| llvm::AtomicOrdering::SequentiallyConsistent); |
| // Shift the relevant bit to the least significant position, truncate to |
| // the result type, and test the low bit. |
| llvm::Value *Shifted = Builder.CreateLShr(RMWI, Bit); |
| llvm::Value *Truncated = |
| Builder.CreateTrunc(Shifted, ConvertType(E->getType())); |
| return Builder.CreateAnd(Truncated, |
| ConstantInt::get(Truncated->getType(), 1)); |
| } |
| |
| case MSVCIntrin::_InterlockedDecrement: { |
| llvm::Type *IntTy = ConvertType(E->getType()); |
| AtomicRMWInst *RMWI = Builder.CreateAtomicRMW( |
| AtomicRMWInst::Sub, |
| EmitScalarExpr(E->getArg(0)), |
| ConstantInt::get(IntTy, 1), |
| llvm::AtomicOrdering::SequentiallyConsistent); |
| return Builder.CreateSub(RMWI, ConstantInt::get(IntTy, 1)); |
| } |
| case MSVCIntrin::_InterlockedIncrement: { |
| llvm::Type *IntTy = ConvertType(E->getType()); |
| AtomicRMWInst *RMWI = Builder.CreateAtomicRMW( |
| AtomicRMWInst::Add, |
| EmitScalarExpr(E->getArg(0)), |
| ConstantInt::get(IntTy, 1), |
| llvm::AtomicOrdering::SequentiallyConsistent); |
| return Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1)); |
| } |
| |
| case MSVCIntrin::__fastfail: { |
| // Request immediate process termination from the kernel. The instruction |
| // sequences to do this are documented on MSDN: |
| // https://msdn.microsoft.com/en-us/library/dn774154.aspx |
| llvm::Triple::ArchType ISA = getTarget().getTriple().getArch(); |
| StringRef Asm, Constraints; |
| switch (ISA) { |
| default: |
| ErrorUnsupported(E, "__fastfail call for this architecture"); |
| break; |
| case llvm::Triple::x86: |
| case llvm::Triple::x86_64: |
| Asm = "int $$0x29"; |
| Constraints = "{cx}"; |
| break; |
| case llvm::Triple::thumb: |
| Asm = "udf #251"; |
| Constraints = "{r0}"; |
| break; |
| } |
| llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false); |
| llvm::InlineAsm *IA = |
| llvm::InlineAsm::get(FTy, Asm, Constraints, /*SideEffects=*/true); |
| llvm::AttributeList NoReturnAttr = llvm::AttributeList::get( |
| getLLVMContext(), llvm::AttributeList::FunctionIndex, |
| llvm::Attribute::NoReturn); |
| CallSite CS = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0))); |
| CS.setAttributes(NoReturnAttr); |
| return CS.getInstruction(); |
| } |
| } |
| llvm_unreachable("Incorrect MSVC intrinsic!"); |
| } |
| |
| namespace { |
| // ARC cleanup for __builtin_os_log_format |
| struct CallObjCArcUse final : EHScopeStack::Cleanup { |
| CallObjCArcUse(llvm::Value *object) : object(object) {} |
| llvm::Value *object; |
| |
| void Emit(CodeGenFunction &CGF, Flags flags) override { |
| CGF.EmitARCIntrinsicUse(object); |
| } |
| }; |
| } |
| |
| Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E, |
| BuiltinCheckKind Kind) { |
| assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) |
| && "Unsupported builtin check kind"); |
| |
| Value *ArgValue = EmitScalarExpr(E); |
| if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef()) |
| return ArgValue; |
| |
| SanitizerScope SanScope(this); |
| Value *Cond = Builder.CreateICmpNE( |
| ArgValue, llvm::Constant::getNullValue(ArgValue->getType())); |
| EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin), |
| SanitizerHandler::InvalidBuiltin, |
| {EmitCheckSourceLocation(E->getExprLoc()), |
| llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)}, |
| None); |
| return ArgValue; |
| } |
| |
| /// Get the argument type for arguments to os_log_helper. |
| static CanQualType getOSLogArgType(ASTContext &C, int Size) { |
| QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false); |
| return C.getCanonicalType(UnsignedTy); |
| } |
| |
| llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction( |
| const analyze_os_log::OSLogBufferLayout &Layout, |
| CharUnits BufferAlignment) { |
| ASTContext &Ctx = getContext(); |
| |
| llvm::SmallString<64> Name; |
| { |
| raw_svector_ostream OS(Name); |
| OS << "__os_log_helper"; |
| OS << "_" << BufferAlignment.getQuantity(); |
| OS << "_" << int(Layout.getSummaryByte()); |
| OS << "_" << int(Layout.getNumArgsByte()); |
| for (const auto &Item : Layout.Items) |
| OS << "_" << int(Item.getSizeByte()) << "_" |
| << int(Item.getDescriptorByte()); |
| } |
| |
| if (llvm::Function *F = CGM.getModule().getFunction(Name)) |
| return F; |
| |
| llvm::SmallVector<ImplicitParamDecl, 4> Params; |
| Params.emplace_back(Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), |
| Ctx.VoidPtrTy, ImplicitParamDecl::Other); |
| |
| for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) { |
| char Size = Layout.Items[I].getSizeByte(); |
| if (!Size) |
| continue; |
| |
| Params.emplace_back( |
| Ctx, nullptr, SourceLocation(), |
| &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), |
| getOSLogArgType(Ctx, Size), ImplicitParamDecl::Other); |
| } |
| |
| FunctionArgList Args; |
| for (auto &P : Params) |
| Args.push_back(&P); |
| |
| // The helper function has linkonce_odr linkage to enable the linker to merge |
| // identical functions. To ensure the merging always happens, 'noinline' is |
| // attached to the function when compiling with -Oz. |
| const CGFunctionInfo &FI = |
| CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args); |
| llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI); |
| llvm::Function *Fn = llvm::Function::Create( |
| FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule()); |
| Fn->setVisibility(llvm::GlobalValue::HiddenVisibility); |
| CGM.SetLLVMFunctionAttributes(nullptr, FI, Fn); |
| CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn); |
| |
| // Attach 'noinline' at -Oz. |
| if (CGM.getCodeGenOpts().OptimizeSize == 2) |
| Fn->addFnAttr(llvm::Attribute::NoInline); |
| |
| auto NL = ApplyDebugLocation::CreateEmpty(*this); |
| IdentifierInfo *II = &Ctx.Idents.get(Name); |
| FunctionDecl *FD = FunctionDecl::Create( |
| Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, |
| Ctx.VoidTy, nullptr, SC_PrivateExtern, false, false); |
| |
| StartFunction(FD, Ctx.VoidTy, Fn, FI, Args); |
| |
| // Create a scope with an artificial location for the body of this function. |
| auto AL = ApplyDebugLocation::CreateArtificial(*this); |
| |
| CharUnits Offset; |
| Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(&Params[0]), "buf"), |
| BufferAlignment); |
| Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()), |
| Builder.CreateConstByteGEP(BufAddr, Offset++, "summary")); |
| Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()), |
| Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs")); |
| |
| unsigned I = 1; |
| for (const auto &Item : Layout.Items) { |
| Builder.CreateStore( |
| Builder.getInt8(Item.getDescriptorByte()), |
| Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor")); |
| Builder.CreateStore( |
| Builder.getInt8(Item.getSizeByte()), |
| Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize")); |
| |
| CharUnits Size = Item.size(); |
| if (!Size.getQuantity()) |
| continue; |
| |
| Address Arg = GetAddrOfLocalVar(&Params[I]); |
| Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData"); |
| Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(), |
| "argDataCast"); |
| Builder.CreateStore(Builder.CreateLoad(Arg), Addr); |
| Offset += Size; |
| ++I; |
| } |
| |
| FinishFunction(); |
| |
| return Fn; |
| } |
| |
| RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) { |
| assert(E.getNumArgs() >= 2 && |
| "__builtin_os_log_format takes at least 2 arguments"); |
| ASTContext &Ctx = getContext(); |
| analyze_os_log::OSLogBufferLayout Layout; |
| analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout); |
| Address BufAddr = EmitPointerWithAlignment(E.getArg(0)); |
| llvm::SmallVector<llvm::Value *, 4> RetainableOperands; |
| |
| // Ignore argument 1, the format string. It is not currently used. |
| CallArgList Args; |
| Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy); |
| |
| for (const auto &Item : Layout.Items) { |
| int Size = Item.getSizeByte(); |
| if (!Size) |
| continue; |
| |
| llvm::Value *ArgVal; |
| |
| if (const Expr *TheExpr = Item.getExpr()) { |
| ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false); |
| |
| // Check if this is a retainable type. |
| if (TheExpr->getType()->isObjCRetainableType()) { |
| assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar && |
| "Only scalar can be a ObjC retainable type"); |
| // Check if the object is constant, if not, save it in |
| // RetainableOperands. |
| if (!isa<Constant>(ArgVal)) |
| RetainableOperands.push_back(ArgVal); |
| } |
| } else { |
| ArgVal = Builder.getInt32(Item.getConstValue().getQuantity()); |
| } |
| |
| unsigned ArgValSize = |
| CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType()); |
| llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(), |
| ArgValSize); |
| ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy); |
| CanQualType ArgTy = getOSLogArgType(Ctx, Size); |
| // If ArgVal has type x86_fp80, zero-extend ArgVal. |
| ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy)); |
| Args.add(RValue::get(ArgVal), ArgTy); |
| } |
| |
| const CGFunctionInfo &FI = |
| CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args); |
| llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction( |
| Layout, BufAddr.getAlignment()); |
| EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args); |
| |
| // Push a clang.arc.use cleanup for each object in RetainableOperands. The |
| // cleanup will cause the use to appear after the final log call, keeping |
| // the object valid while it’s held in the log buffer. Note that if there’s |
| // a release cleanup on the object, it will already be active; since |
| // cleanups are emitted in reverse order, the use will occur before the |
| // object is released. |
| if (!RetainableOperands.empty() && getLangOpts().ObjCAutoRefCount && |
| CGM.getCodeGenOpts().OptimizationLevel != 0) |
| for (llvm::Value *Object : RetainableOperands) |
| pushFullExprCleanup<CallObjCArcUse>(getARCCleanupKind(), Object); |
| |
| return RValue::get(BufAddr.getPointer()); |
| } |
| |
| /// Determine if a binop is a checked mixed-sign multiply we can specialize. |
| static bool isSpecialMixedSignMultiply(unsigned BuiltinID, |
| WidthAndSignedness Op1Info, |
| WidthAndSignedness Op2Info, |
| WidthAndSignedness ResultInfo) { |
| return BuiltinID == Builtin::BI__builtin_mul_overflow && |
| Op1Info.Width == Op2Info.Width && Op1Info.Width >= ResultInfo.Width && |
| Op1Info.Signed != Op2Info.Signed; |
| } |
| |
| /// Emit a checked mixed-sign multiply. This is a cheaper specialization of |
| /// the generic checked-binop irgen. |
| static RValue |
| EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, |
| WidthAndSignedness Op1Info, const clang::Expr *Op2, |
| WidthAndSignedness Op2Info, |
| const clang::Expr *ResultArg, QualType ResultQTy, |
| WidthAndSignedness ResultInfo) { |
| assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, |
| Op2Info, ResultInfo) && |
| "Not a mixed-sign multipliction we can specialize"); |
| |
| // Emit the signed and unsigned operands. |
| const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2; |
| const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1; |
| llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp); |
| llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp); |
| |
| llvm::Type *OpTy = Signed->getType(); |
| llvm::Value *Zero = llvm::Constant::getNullValue(OpTy); |
| Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg); |
| llvm::Type *ResTy = ResultPtr.getElementType(); |
| |
| // Take the absolute value of the signed operand. |
| llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero); |
| llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed); |
| llvm::Value *AbsSigned = |
| CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed); |
| |
| // Perform a checked unsigned multiplication. |
| llvm::Value *UnsignedOverflow; |
| llvm::Value *UnsignedResult = |
| EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned, |
| Unsigned, UnsignedOverflow); |
| |
| llvm::Value *Overflow, *Result; |
| if (ResultInfo.Signed) { |
| // Signed overflow occurs if the result is greater than INT_MAX or lesser |
| // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative). |
| auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width) |
| .zextOrSelf(Op1Info.Width); |
| llvm::Value *MaxResult = |
| CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax), |
| CGF.Builder.CreateZExt(IsNegative, OpTy)); |
| llvm::Value *SignedOverflow = |
| CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult); |
| Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow); |
| |
| // Prepare the signed result (possibly by negating it). |
| llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult); |
| llvm::Value *SignedResult = |
| CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult); |
| Result = CGF.Builder.CreateTrunc(SignedResult, ResTy); |
| } else { |
| // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX. |
| llvm::Value *Underflow = CGF.Builder.CreateAnd( |
| IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult)); |
| Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow); |
| if (ResultInfo.Width < Op1Info.Width) { |
| auto IntMax = |
| llvm::APInt::getMaxValue(ResultInfo.Width).zext(Op1Info.Width); |
| llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT( |
| UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax)); |
| Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow); |
| } |
| |
| // Negate the product if it would be negative in infinite precision. |
| Result = CGF.Builder.CreateSelect( |
| IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult); |
| |
| Result = CGF.Builder.CreateTrunc(Result, ResTy); |
| } |
| assert(Overflow && Result && "Missing overflow or result"); |
| |
| bool isVolatile = |
| ResultArg->getType()->getPointeeType().isVolatileQualified(); |
| CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr, |
| isVolatile); |
| return RValue::get(Overflow); |
| } |
| |
| RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, |
| unsigned BuiltinID, const CallExpr *E, |
| ReturnValueSlot ReturnValue) { |
| // See if we can constant fold this builtin. If so, don't emit it at all. |
| Expr::EvalResult Result; |
| if (E->EvaluateAsRValue(Result, CGM.getContext()) && |
| !Result.hasSideEffects()) { |
| if (Result.Val.isInt()) |
| return RValue::get(llvm::ConstantInt::get(getLLVMContext(), |
| Result.Val.getInt())); |
| if (Result.Val.isFloat()) |
| return RValue::get(llvm::ConstantFP::get(getLLVMContext(), |
| Result.Val.getFloat())); |
| } |
| |
| // There are LLVM math intrinsics/instructions corresponding to math library |
| // functions except the LLVM op will never set errno while the math library |
| // might. Also, math builtins have the same semantics as their math library |
| // twins. Thus, we can transform math library and builtin calls to their |
| // LLVM counterparts if the call is marked 'const' (known to never set errno). |
| if (FD->hasAttr<ConstAttr>()) { |
| switch (BuiltinID) { |
| case Builtin::BIceil: |
| case Builtin::BIceilf: |
| case Builtin::BIceill: |
| case Builtin::BI__builtin_ceil: |
| case Builtin::BI__builtin_ceilf: |
| case Builtin::BI__builtin_ceill: |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::ceil)); |
| |
| case Builtin::BIcopysign: |
| case Builtin::BIcopysignf: |
| case Builtin::BIcopysignl: |
| case Builtin::BI__builtin_copysign: |
| case Builtin::BI__builtin_copysignf: |
| case Builtin::BI__builtin_copysignl: |
| return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign)); |
| |
| case Builtin::BIcos: |
| case Builtin::BIcosf: |
| case Builtin::BIcosl: |
| case Builtin::BI__builtin_cos: |
| case Builtin::BI__builtin_cosf: |
| case Builtin::BI__builtin_cosl: |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::cos)); |
| |
| case Builtin::BIexp: |
| case Builtin::BIexpf: |
| case Builtin::BIexpl: |
| case Builtin::BI__builtin_exp: |
| case Builtin::BI__builtin_expf: |
| case Builtin::BI__builtin_expl: |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp)); |
| |
| case Builtin::BIexp2: |
| case Builtin::BIexp2f: |
| case Builtin::BIexp2l: |
| case Builtin::BI__builtin_exp2: |
| case Builtin::BI__builtin_exp2f: |
| case Builtin::BI__builtin_exp2l: |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp2)); |
| |
| case Builtin::BIfabs: |
| case Builtin::BIfabsf: |
| case Builtin::BIfabsl: |
| case Builtin::BI__builtin_fabs: |
| case Builtin::BI__builtin_fabsf: |
| case Builtin::BI__builtin_fabsl: |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs)); |
| |
| case Builtin::BIfloor: |
| case Builtin::BIfloorf: |
| case Builtin::BIfloorl: |
| case Builtin::BI__builtin_floor: |
| case Builtin::BI__builtin_floorf: |
| case Builtin::BI__builtin_floorl: |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::floor)); |
| |
| case Builtin::BIfma: |
| case Builtin::BIfmaf: |
| case Builtin::BIfmal: |
| case Builtin::BI__builtin_fma: |
| case Builtin::BI__builtin_fmaf: |
| case Builtin::BI__builtin_fmal: |
| return RValue::get(emitTernaryBuiltin(*this, E, Intrinsic::fma)); |
| |
| case Builtin::BIfmax: |
| case Builtin::BIfmaxf: |
| case Builtin::BIfmaxl: |
| case Builtin::BI__builtin_fmax: |
| case Builtin::BI__builtin_fmaxf: |
| case Builtin::BI__builtin_fmaxl: |
| return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::maxnum)); |
| |
| case Builtin::BIfmin: |
| case Builtin::BIfminf: |
| case Builtin::BIfminl: |
| case Builtin::BI__builtin_fmin: |
| case Builtin::BI__builtin_fminf: |
| case Builtin::BI__builtin_fminl: |
| return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::minnum)); |
| |
| // fmod() is a special-case. It maps to the frem instruction rather than an |
| // LLVM intrinsic. |
| case Builtin::BIfmod: |
| case Builtin::BIfmodf: |
| case Builtin::BIfmodl: |
| case Builtin::BI__builtin_fmod: |
| case Builtin::BI__builtin_fmodf: |
| case Builtin::BI__builtin_fmodl: { |
| Value *Arg1 = EmitScalarExpr(E->getArg(0)); |
| Value *Arg2 = EmitScalarExpr(E->getArg(1)); |
| return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod")); |
| } |
| |
| case Builtin::BIlog: |
| case Builtin::BIlogf: |
| case Builtin::BIlogl: |
| case Builtin::BI__builtin_log: |
| case Builtin::BI__builtin_logf: |
| case Builtin::BI__builtin_logl: |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log)); |
| |
| case Builtin::BIlog10: |
| case Builtin::BIlog10f: |
| case Builtin::BIlog10l: |
| case Builtin::BI__builtin_log10: |
| case Builtin::BI__builtin_log10f: |
| case Builtin::BI__builtin_log10l: |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log10)); |
| |
| case Builtin::BIlog2: |
| case Builtin::BIlog2f: |
| case Builtin::BIlog2l: |
| case Builtin::BI__builtin_log2: |
| case Builtin::BI__builtin_log2f: |
| case Builtin::BI__builtin_log2l: |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log2)); |
| |
| case Builtin::BInearbyint: |
| case Builtin::BInearbyintf: |
| case Builtin::BInearbyintl: |
| case Builtin::BI__builtin_nearbyint: |
| case Builtin::BI__builtin_nearbyintf: |
| case Builtin::BI__builtin_nearbyintl: |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::nearbyint)); |
| |
| case Builtin::BIpow: |
| case Builtin::BIpowf: |
| case Builtin::BIpowl: |
| case Builtin::BI__builtin_pow: |
| case Builtin::BI__builtin_powf: |
| case Builtin::BI__builtin_powl: |
| return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::pow)); |
| |
| case Builtin::BIrint: |
| case Builtin::BIrintf: |
| case Builtin::BIrintl: |
| case Builtin::BI__builtin_rint: |
| case Builtin::BI__builtin_rintf: |
| case Builtin::BI__builtin_rintl: |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::rint)); |
| |
| case Builtin::BIround: |
| case Builtin::BIroundf: |
| case Builtin::BIroundl: |
| case Builtin::BI__builtin_round: |
| case Builtin::BI__builtin_roundf: |
| case Builtin::BI__builtin_roundl: |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::round)); |
| |
| case Builtin::BIsin: |
| case Builtin::BIsinf: |
| case Builtin::BIsinl: |
| case Builtin::BI__builtin_sin: |
| case Builtin::BI__builtin_sinf: |
| case Builtin::BI__builtin_sinl: |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::sin)); |
| |
| case Builtin::BIsqrt: |
| case Builtin::BIsqrtf: |
| case Builtin::BIsqrtl: |
| case Builtin::BI__builtin_sqrt: |
| case Builtin::BI__builtin_sqrtf: |
| case Builtin::BI__builtin_sqrtl: |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::sqrt)); |
| |
| case Builtin::BItrunc: |
| case Builtin::BItruncf: |
| case Builtin::BItruncl: |
| case Builtin::BI__builtin_trunc: |
| case Builtin::BI__builtin_truncf: |
| case Builtin::BI__builtin_truncl: |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::trunc)); |
| |
| default: |
| break; |
| } |
| } |
| |
| switch (BuiltinID) { |
| default: break; |
| case Builtin::BI__builtin___CFStringMakeConstantString: |
| case Builtin::BI__builtin___NSStringMakeConstantString: |
| return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType())); |
| case Builtin::BI__builtin_stdarg_start: |
| case Builtin::BI__builtin_va_start: |
| case Builtin::BI__va_start: |
| case Builtin::BI__builtin_va_end: |
| return RValue::get( |
| EmitVAStartEnd(BuiltinID == Builtin::BI__va_start |
| ? EmitScalarExpr(E->getArg(0)) |
| : EmitVAListRef(E->getArg(0)).getPointer(), |
| BuiltinID != Builtin::BI__builtin_va_end)); |
| case Builtin::BI__builtin_va_copy: { |
| Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer(); |
| Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer(); |
| |
| llvm::Type *Type = Int8PtrTy; |
| |
| DstPtr = Builder.CreateBitCast(DstPtr, Type); |
| SrcPtr = Builder.CreateBitCast(SrcPtr, Type); |
| return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy), |
| {DstPtr, SrcPtr})); |
| } |
| case Builtin::BI__builtin_abs: |
| case Builtin::BI__builtin_labs: |
| case Builtin::BI__builtin_llabs: { |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| |
| Value *NegOp = Builder.CreateNeg(ArgValue, "neg"); |
| Value *CmpResult = |
| Builder.CreateICmpSGE(ArgValue, |
| llvm::Constant::getNullValue(ArgValue->getType()), |
| "abscond"); |
| Value *Result = |
| Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs"); |
| |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_conj: |
| case Builtin::BI__builtin_conjf: |
| case Builtin::BI__builtin_conjl: { |
| ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
| Value *Real = ComplexVal.first; |
| Value *Imag = ComplexVal.second; |
| Value *Zero = |
| Imag->getType()->isFPOrFPVectorTy() |
| ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType()) |
| : llvm::Constant::getNullValue(Imag->getType()); |
| |
| Imag = Builder.CreateFSub(Zero, Imag, "sub"); |
| return RValue::getComplex(std::make_pair(Real, Imag)); |
| } |
| case Builtin::BI__builtin_creal: |
| case Builtin::BI__builtin_crealf: |
| case Builtin::BI__builtin_creall: |
| case Builtin::BIcreal: |
| case Builtin::BIcrealf: |
| case Builtin::BIcreall: { |
| ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
| return RValue::get(ComplexVal.first); |
| } |
| |
| case Builtin::BI__builtin_cimag: |
| case Builtin::BI__builtin_cimagf: |
| case Builtin::BI__builtin_cimagl: |
| case Builtin::BIcimag: |
| case Builtin::BIcimagf: |
| case Builtin::BIcimagl: { |
| ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
| return RValue::get(ComplexVal.second); |
| } |
| |
| case Builtin::BI__builtin_ctzs: |
| case Builtin::BI__builtin_ctz: |
| case Builtin::BI__builtin_ctzl: |
| case Builtin::BI__builtin_ctzll: { |
| Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
| |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); |
| Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); |
| if (Result->getType() != ResultType) |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
| "cast"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_clzs: |
| case Builtin::BI__builtin_clz: |
| case Builtin::BI__builtin_clzl: |
| case Builtin::BI__builtin_clzll: { |
| Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
| |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); |
| Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); |
| if (Result->getType() != ResultType) |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
| "cast"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_ffs: |
| case Builtin::BI__builtin_ffsl: |
| case Builtin::BI__builtin_ffsll: { |
| // ffs(x) -> x ? cttz(x) + 1 : 0 |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
| |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| Value *Tmp = |
| Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}), |
| llvm::ConstantInt::get(ArgType, 1)); |
| Value *Zero = llvm::Constant::getNullValue(ArgType); |
| Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); |
| Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); |
| if (Result->getType() != ResultType) |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
| "cast"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_parity: |
| case Builtin::BI__builtin_parityl: |
| case Builtin::BI__builtin_parityll: { |
| // parity(x) -> ctpop(x) & 1 |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); |
| |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| Value *Tmp = Builder.CreateCall(F, ArgValue); |
| Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1)); |
| if (Result->getType() != ResultType) |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
| "cast"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__popcnt16: |
| case Builtin::BI__popcnt: |
| case Builtin::BI__popcnt64: |
| case Builtin::BI__builtin_popcount: |
| case Builtin::BI__builtin_popcountl: |
| case Builtin::BI__builtin_popcountll: { |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); |
| |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| Value *Result = Builder.CreateCall(F, ArgValue); |
| if (Result->getType() != ResultType) |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
| "cast"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI_rotr8: |
| case Builtin::BI_rotr16: |
| case Builtin::BI_rotr: |
| case Builtin::BI_lrotr: |
| case Builtin::BI_rotr64: { |
| Value *Val = EmitScalarExpr(E->getArg(0)); |
| Value *Shift = EmitScalarExpr(E->getArg(1)); |
| |
| llvm::Type *ArgType = Val->getType(); |
| Shift = Builder.CreateIntCast(Shift, ArgType, false); |
| unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth(); |
| Value *ArgTypeSize = llvm::ConstantInt::get(ArgType, ArgWidth); |
| Value *ArgZero = llvm::Constant::getNullValue(ArgType); |
| |
| Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1); |
| Shift = Builder.CreateAnd(Shift, Mask); |
| Value *LeftShift = Builder.CreateSub(ArgTypeSize, Shift); |
| |
| Value *RightShifted = Builder.CreateLShr(Val, Shift); |
| Value *LeftShifted = Builder.CreateShl(Val, LeftShift); |
| Value *Rotated = Builder.CreateOr(LeftShifted, RightShifted); |
| |
| Value *ShiftIsZero = Builder.CreateICmpEQ(Shift, ArgZero); |
| Value *Result = Builder.CreateSelect(ShiftIsZero, Val, Rotated); |
| return RValue::get(Result); |
| } |
| case Builtin::BI_rotl8: |
| case Builtin::BI_rotl16: |
| case Builtin::BI_rotl: |
| case Builtin::BI_lrotl: |
| case Builtin::BI_rotl64: { |
| Value *Val = EmitScalarExpr(E->getArg(0)); |
| Value *Shift = EmitScalarExpr(E->getArg(1)); |
| |
| llvm::Type *ArgType = Val->getType(); |
| Shift = Builder.CreateIntCast(Shift, ArgType, false); |
| unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth(); |
| Value *ArgTypeSize = llvm::ConstantInt::get(ArgType, ArgWidth); |
| Value *ArgZero = llvm::Constant::getNullValue(ArgType); |
| |
| Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1); |
| Shift = Builder.CreateAnd(Shift, Mask); |
| Value *RightShift = Builder.CreateSub(ArgTypeSize, Shift); |
| |
| Value *LeftShifted = Builder.CreateShl(Val, Shift); |
| Value *RightShifted = Builder.CreateLShr(Val, RightShift); |
| Value *Rotated = Builder.CreateOr(LeftShifted, RightShifted); |
| |
| Value *ShiftIsZero = Builder.CreateICmpEQ(Shift, ArgZero); |
| Value *Result = Builder.CreateSelect(ShiftIsZero, Val, Rotated); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_unpredictable: { |
| // Always return the argument of __builtin_unpredictable. LLVM does not |
| // handle this builtin. Metadata for this builtin should be added directly |
| // to instructions such as branches or switches that use it. |
| return RValue::get(EmitScalarExpr(E->getArg(0))); |
| } |
| case Builtin::BI__builtin_expect: { |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| llvm::Type *ArgType = ArgValue->getType(); |
| |
| Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); |
| // Don't generate llvm.expect on -O0 as the backend won't use it for |
| // anything. |
| // Note, we still IRGen ExpectedValue because it could have side-effects. |
| if (CGM.getCodeGenOpts().OptimizationLevel == 0) |
| return RValue::get(ArgValue); |
| |
| Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType); |
| Value *Result = |
| Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_assume_aligned: { |
| Value *PtrValue = EmitScalarExpr(E->getArg(0)); |
| Value *OffsetValue = |
| (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr; |
| |
| Value *AlignmentValue = EmitScalarExpr(E->getArg(1)); |
| ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue); |
| unsigned Alignment = (unsigned) AlignmentCI->getZExtValue(); |
| |
| EmitAlignmentAssumption(PtrValue, Alignment, OffsetValue); |
| return RValue::get(PtrValue); |
| } |
| case Builtin::BI__assume: |
| case Builtin::BI__builtin_assume: { |
| if (E->getArg(0)->HasSideEffects(getContext())) |
| return RValue::get(nullptr); |
| |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| Value *FnAssume = CGM.getIntrinsic(Intrinsic::assume); |
| return RValue::get(Builder.CreateCall(FnAssume, ArgValue)); |
| } |
| case Builtin::BI__builtin_bswap16: |
| case Builtin::BI__builtin_bswap32: |
| case Builtin::BI__builtin_bswap64: { |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap)); |
| } |
| case Builtin::BI__builtin_bitreverse8: |
| case Builtin::BI__builtin_bitreverse16: |
| case Builtin::BI__builtin_bitreverse32: |
| case Builtin::BI__builtin_bitreverse64: { |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse)); |
| } |
| case Builtin::BI__builtin_object_size: { |
| unsigned Type = |
| E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue(); |
| auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType())); |
| |
| // We pass this builtin onto the optimizer so that it can figure out the |
| // object size in more complex cases. |
| return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType, |
| /*EmittedE=*/nullptr)); |
| } |
| case Builtin::BI__builtin_prefetch: { |
| Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); |
| // FIXME: Technically these constants should of type 'int', yes? |
| RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : |
| llvm::ConstantInt::get(Int32Ty, 0); |
| Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : |
| llvm::ConstantInt::get(Int32Ty, 3); |
| Value *Data = llvm::ConstantInt::get(Int32Ty, 1); |
| Value *F = CGM.getIntrinsic(Intrinsic::prefetch); |
| return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data})); |
| } |
| case Builtin::BI__builtin_readcyclecounter: { |
| Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter); |
| return RValue::get(Builder.CreateCall(F)); |
| } |
| case Builtin::BI__builtin___clear_cache: { |
| Value *Begin = EmitScalarExpr(E->getArg(0)); |
| Value *End = EmitScalarExpr(E->getArg(1)); |
| Value *F = CGM.getIntrinsic(Intrinsic::clear_cache); |
| return RValue::get(Builder.CreateCall(F, {Begin, End})); |
| } |
| case Builtin::BI__builtin_trap: |
| return RValue::get(EmitTrapCall(Intrinsic::trap)); |
| case Builtin::BI__debugbreak: |
| return RValue::get(EmitTrapCall(Intrinsic::debugtrap)); |
| case Builtin::BI__builtin_unreachable: { |
| EmitUnreachable(E->getExprLoc()); |
| |
| // We do need to preserve an insertion point. |
| EmitBlock(createBasicBlock("unreachable.cont")); |
| |
| return RValue::get(nullptr); |
| } |
| |
| case Builtin::BI__builtin_powi: |
| case Builtin::BI__builtin_powif: |
| case Builtin::BI__builtin_powil: { |
| Value *Base = EmitScalarExpr(E->getArg(0)); |
| Value *Exponent = EmitScalarExpr(E->getArg(1)); |
| llvm::Type *ArgType = Base->getType(); |
| Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType); |
| return RValue::get(Builder.CreateCall(F, {Base, Exponent})); |
| } |
| |
| case Builtin::BI__builtin_isgreater: |
| case Builtin::BI__builtin_isgreaterequal: |
| case Builtin::BI__builtin_isless: |
| case Builtin::BI__builtin_islessequal: |
| case Builtin::BI__builtin_islessgreater: |
| case Builtin::BI__builtin_isunordered: { |
| // Ordered comparisons: we know the arguments to these are matching scalar |
| // floating point values. |
| Value *LHS = EmitScalarExpr(E->getArg(0)); |
| Value *RHS = EmitScalarExpr(E->getArg(1)); |
| |
| switch (BuiltinID) { |
| default: llvm_unreachable("Unknown ordered comparison"); |
| case Builtin::BI__builtin_isgreater: |
| LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); |
| break; |
| case Builtin::BI__builtin_isgreaterequal: |
| LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp"); |
| break; |
| case Builtin::BI__builtin_isless: |
| LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp"); |
| break; |
| case Builtin::BI__builtin_islessequal: |
| LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp"); |
| break; |
| case Builtin::BI__builtin_islessgreater: |
| LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp"); |
| break; |
| case Builtin::BI__builtin_isunordered: |
| LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp"); |
| break; |
| } |
| // ZExt bool to int type. |
| return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()))); |
| } |
| case Builtin::BI__builtin_isnan: { |
| Value *V = EmitScalarExpr(E->getArg(0)); |
| V = Builder.CreateFCmpUNO(V, V, "cmp"); |
| return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); |
| } |
| |
| case Builtin::BIfinite: |
| case Builtin::BI__finite: |
| case Builtin::BIfinitef: |
| case Builtin::BI__finitef: |
| case Builtin::BIfinitel: |
| case Builtin::BI__finitel: |
| case Builtin::BI__builtin_isinf: |
| case Builtin::BI__builtin_isfinite: { |
| // isinf(x) --> fabs(x) == infinity |
| // isfinite(x) --> fabs(x) != infinity |
| // x != NaN via the ordered compare in either case. |
| Value *V = EmitScalarExpr(E->getArg(0)); |
| Value *Fabs = EmitFAbs(*this, V); |
| Constant *Infinity = ConstantFP::getInfinity(V->getType()); |
| CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf) |
| ? CmpInst::FCMP_OEQ |
| : CmpInst::FCMP_ONE; |
| Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf"); |
| return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType()))); |
| } |
| |
| case Builtin::BI__builtin_isinf_sign: { |
| // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0 |
| Value *Arg = EmitScalarExpr(E->getArg(0)); |
| Value *AbsArg = EmitFAbs(*this, Arg); |
| Value *IsInf = Builder.CreateFCmpOEQ( |
| AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf"); |
| Value *IsNeg = EmitSignBit(*this, Arg); |
| |
| llvm::Type *IntTy = ConvertType(E->getType()); |
| Value *Zero = Constant::getNullValue(IntTy); |
| Value *One = ConstantInt::get(IntTy, 1); |
| Value *NegativeOne = ConstantInt::get(IntTy, -1); |
| Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One); |
| Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero); |
| return RValue::get(Result); |
| } |
| |
| case Builtin::BI__builtin_isnormal: { |
| // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min |
| Value *V = EmitScalarExpr(E->getArg(0)); |
| Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); |
| |
| Value *Abs = EmitFAbs(*this, V); |
| Value *IsLessThanInf = |
| Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); |
| APFloat Smallest = APFloat::getSmallestNormalized( |
| getContext().getFloatTypeSemantics(E->getArg(0)->getType())); |
| Value *IsNormal = |
| Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest), |
| "isnormal"); |
| V = Builder.CreateAnd(Eq, IsLessThanInf, "and"); |
| V = Builder.CreateAnd(V, IsNormal, "and"); |
| return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); |
| } |
| |
| case Builtin::BI__builtin_fpclassify: { |
| Value *V = EmitScalarExpr(E->getArg(5)); |
| llvm::Type *Ty = ConvertType(E->getArg(5)->getType()); |
| |
| // Create Result |
| BasicBlock *Begin = Builder.GetInsertBlock(); |
| BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn); |
| Builder.SetInsertPoint(End); |
| PHINode *Result = |
| Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4, |
| "fpclassify_result"); |
| |
| // if (V==0) return FP_ZERO |
| Builder.SetInsertPoint(Begin); |
| Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), |
| "iszero"); |
| Value *ZeroLiteral = EmitScalarExpr(E->getArg(4)); |
| BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn); |
| Builder.CreateCondBr(IsZero, End, NotZero); |
| Result->addIncoming(ZeroLiteral, Begin); |
| |
| // if (V != V) return FP_NAN |
| Builder.SetInsertPoint(NotZero); |
| Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp"); |
| Value *NanLiteral = EmitScalarExpr(E->getArg(0)); |
| BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn); |
| Builder.CreateCondBr(IsNan, End, NotNan); |
| Result->addIncoming(NanLiteral, NotZero); |
| |
| // if (fabs(V) == infinity) return FP_INFINITY |
| Builder.SetInsertPoint(NotNan); |
| Value *VAbs = EmitFAbs(*this, V); |
| Value *IsInf = |
| Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()), |
| "isinf"); |
| Value *InfLiteral = EmitScalarExpr(E->getArg(1)); |
| BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn); |
| Builder.CreateCondBr(IsInf, End, NotInf); |
| Result->addIncoming(InfLiteral, NotNan); |
| |
| // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL |
| Builder.SetInsertPoint(NotInf); |
| APFloat Smallest = APFloat::getSmallestNormalized( |
| getContext().getFloatTypeSemantics(E->getArg(5)->getType())); |
| Value *IsNormal = |
| Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest), |
| "isnormal"); |
| Value *NormalResult = |
| Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)), |
| EmitScalarExpr(E->getArg(3))); |
| Builder.CreateBr(End); |
| Result->addIncoming(NormalResult, NotInf); |
| |
| // return Result |
| Builder.SetInsertPoint(End); |
| return RValue::get(Result); |
| } |
| |
| case Builtin::BIalloca: |
| case Builtin::BI_alloca: |
| case Builtin::BI__builtin_alloca: { |
| Value *Size = EmitScalarExpr(E->getArg(0)); |
| const TargetInfo &TI = getContext().getTargetInfo(); |
| // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__. |
| unsigned SuitableAlignmentInBytes = |
| CGM.getContext() |
| .toCharUnitsFromBits(TI.getSuitableAlign()) |
| .getQuantity(); |
| AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size); |
| AI->setAlignment(SuitableAlignmentInBytes); |
| return RValue::get(AI); |
| } |
| |
| case Builtin::BI__builtin_alloca_with_align: { |
| Value *Size = EmitScalarExpr(E->getArg(0)); |
| Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1)); |
| auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue); |
| unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue(); |
| unsigned AlignmentInBytes = |
| CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getQuantity(); |
| AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size); |
| AI->setAlignment(AlignmentInBytes); |
| return RValue::get(AI); |
| } |
| |
| case Builtin::BIbzero: |
| case Builtin::BI__builtin_bzero: { |
| Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
| Value *SizeVal = EmitScalarExpr(E->getArg(1)); |
| EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
| E->getArg(0)->getExprLoc(), FD, 0); |
| Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false); |
| return RValue::get(nullptr); |
| } |
| case Builtin::BImemcpy: |
| case Builtin::BI__builtin_memcpy: { |
| Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
| Address Src = EmitPointerWithAlignment(E->getArg(1)); |
| Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
| EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
| E->getArg(0)->getExprLoc(), FD, 0); |
| EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), |
| E->getArg(1)->getExprLoc(), FD, 1); |
| Builder.CreateMemCpy(Dest, Src, SizeVal, false); |
| return RValue::get(Dest.getPointer()); |
| } |
| |
| case Builtin::BI__builtin_char_memchr: |
| BuiltinID = Builtin::BI__builtin_memchr; |
| break; |
| |
| case Builtin::BI__builtin___memcpy_chk: { |
| // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2. |
| llvm::APSInt Size, DstSize; |
| if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || |
| !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) |
| break; |
| if (Size.ugt(DstSize)) |
| break; |
| Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
| Address Src = EmitPointerWithAlignment(E->getArg(1)); |
| Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
| Builder.CreateMemCpy(Dest, Src, SizeVal, false); |
| return RValue::get(Dest.getPointer()); |
| } |
| |
| case Builtin::BI__builtin_objc_memmove_collectable: { |
| Address DestAddr = EmitPointerWithAlignment(E->getArg(0)); |
| Address SrcAddr = EmitPointerWithAlignment(E->getArg(1)); |
| Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
| CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, |
| DestAddr, SrcAddr, SizeVal); |
| return RValue::get(DestAddr.getPointer()); |
| } |
| |
| case Builtin::BI__builtin___memmove_chk: { |
| // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2. |
| llvm::APSInt Size, DstSize; |
| if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || |
| !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) |
| break; |
| if (Size.ugt(DstSize)) |
| break; |
| Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
| Address Src = EmitPointerWithAlignment(E->getArg(1)); |
| Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
| Builder.CreateMemMove(Dest, Src, SizeVal, false); |
| return RValue::get(Dest.getPointer()); |
| } |
| |
| case Builtin::BImemmove: |
| case Builtin::BI__builtin_memmove: { |
| Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
| Address Src = EmitPointerWithAlignment(E->getArg(1)); |
| Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
| EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
| E->getArg(0)->getExprLoc(), FD, 0); |
| EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), |
| E->getArg(1)->getExprLoc(), FD, 1); |
| Builder.CreateMemMove(Dest, Src, SizeVal, false); |
| return RValue::get(Dest.getPointer()); |
| } |
| case Builtin::BImemset: |
| case Builtin::BI__builtin_memset: { |
| Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
| Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), |
| Builder.getInt8Ty()); |
| Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
| EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
| E->getArg(0)->getExprLoc(), FD, 0); |
| Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); |
| return RValue::get(Dest.getPointer()); |
| } |
| case Builtin::BI__builtin___memset_chk: { |
| // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. |
| llvm::APSInt Size, DstSize; |
| if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || |
| !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) |
| break; |
| if (Size.ugt(DstSize)) |
| break; |
| Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
| Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), |
| Builder.getInt8Ty()); |
| Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
| Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); |
| return RValue::get(Dest.getPointer()); |
| } |
| case Builtin::BI__builtin_dwarf_cfa: { |
| // The offset in bytes from the first argument to the CFA. |
| // |
| // Why on earth is this in the frontend? Is there any reason at |
| // all that the backend can't reasonably determine this while |
| // lowering llvm.eh.dwarf.cfa()? |
| // |
| // TODO: If there's a satisfactory reason, add a target hook for |
| // this instead of hard-coding 0, which is correct for most targets. |
| int32_t Offset = 0; |
| |
| Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa); |
| return RValue::get(Builder.CreateCall(F, |
| llvm::ConstantInt::get(Int32Ty, Offset))); |
| } |
| case Builtin::BI__builtin_return_address: { |
| Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0), |
| getContext().UnsignedIntTy); |
| Value *F = CGM.getIntrinsic(Intrinsic::returnaddress); |
| return RValue::get(Builder.CreateCall(F, Depth)); |
| } |
| case Builtin::BI_ReturnAddress: { |
| Value *F = CGM.getIntrinsic(Intrinsic::returnaddress); |
| return RValue::get(Builder.CreateCall(F, Builder.getInt32(0))); |
| } |
| case Builtin::BI__builtin_frame_address: { |
| Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0), |
| getContext().UnsignedIntTy); |
| Value *F = CGM.getIntrinsic(Intrinsic::frameaddress); |
| return RValue::get(Builder.CreateCall(F, Depth)); |
| } |
| case Builtin::BI__builtin_extract_return_addr: { |
| Value *Address = EmitScalarExpr(E->getArg(0)); |
| Value *Result = getTargetHooks().decodeReturnAddress(*this, Address); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_frob_return_addr: { |
| Value *Address = EmitScalarExpr(E->getArg(0)); |
| Value *Result = getTargetHooks().encodeReturnAddress(*this, Address); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_dwarf_sp_column: { |
| llvm::IntegerType *Ty |
| = cast<llvm::IntegerType>(ConvertType(E->getType())); |
| int Column = getTargetHooks().getDwarfEHStackPointer(CGM); |
| if (Column == -1) { |
| CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column"); |
| return RValue::get(llvm::UndefValue::get(Ty)); |
| } |
| return RValue::get(llvm::ConstantInt::get(Ty, Column, true)); |
| } |
| case Builtin::BI__builtin_init_dwarf_reg_size_table: { |
| Value *Address = EmitScalarExpr(E->getArg(0)); |
| if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address)) |
| CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table"); |
| return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); |
| } |
| case Builtin::BI__builtin_eh_return: { |
| Value *Int = EmitScalarExpr(E->getArg(0)); |
| Value *Ptr = EmitScalarExpr(E->getArg(1)); |
| |
| llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType()); |
| assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && |
| "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"); |
| Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32 |
| ? Intrinsic::eh_return_i32 |
| : Intrinsic::eh_return_i64); |
| Builder.CreateCall(F, {Int, Ptr}); |
| Builder.CreateUnreachable(); |
| |
| // We do need to preserve an insertion point. |
| EmitBlock(createBasicBlock("builtin_eh_return.cont")); |
| |
| return RValue::get(nullptr); |
| } |
| case Builtin::BI__builtin_unwind_init: { |
| Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init); |
| return RValue::get(Builder.CreateCall(F)); |
| } |
| case Builtin::BI__builtin_extend_pointer: { |
| // Extends a pointer to the size of an _Unwind_Word, which is |
| // uint64_t on all platforms. Generally this gets poked into a |
| // register and eventually used as an address, so if the |
| // addressing registers are wider than pointers and the platform |
| // doesn't implicitly ignore high-order bits when doing |
| // addressing, we need to make sure we zext / sext based on |
| // the platform's expectations. |
| // |
| // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html |
| |
| // Cast the pointer to intptr_t. |
| Value *Ptr = EmitScalarExpr(E->getArg(0)); |
| Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast"); |
| |
| // If that's 64 bits, we're done. |
| if (IntPtrTy->getBitWidth() == 64) |
| return RValue::get(Result); |
| |
| // Otherwise, ask the codegen data what to do. |
| if (getTargetHooks().extendPointerWithSExt()) |
| return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext")); |
| else |
| return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext")); |
| } |
| case Builtin::BI__builtin_setjmp: { |
| // Buffer is a void**. |
| Address Buf = EmitPointerWithAlignment(E->getArg(0)); |
| |
| // Store the frame pointer to the setjmp buffer. |
| Value *FrameAddr = |
| Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), |
| ConstantInt::get(Int32Ty, 0)); |
| Builder.CreateStore(FrameAddr, Buf); |
| |
| // Store the stack pointer to the setjmp buffer. |
| Value *StackAddr = |
| Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); |
| Address StackSaveSlot = |
| Builder.CreateConstInBoundsGEP(Buf, 2, getPointerSize()); |
| Builder.CreateStore(StackAddr, StackSaveSlot); |
| |
| // Call LLVM's EH setjmp, which is lightweight. |
| Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp); |
| Buf = Builder.CreateBitCast(Buf, Int8PtrTy); |
| return RValue::get(Builder.CreateCall(F, Buf.getPointer())); |
| } |
| case Builtin::BI__builtin_longjmp: { |
| Value *Buf = EmitScalarExpr(E->getArg(0)); |
| Buf = Builder.CreateBitCast(Buf, Int8PtrTy); |
| |
| // Call LLVM's EH longjmp, which is lightweight. |
| Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf); |
| |
| // longjmp doesn't return; mark this as unreachable. |
| Builder.CreateUnreachable(); |
| |
| // We do need to preserve an insertion point. |
| EmitBlock(createBasicBlock("longjmp.cont")); |
| |
| return RValue::get(nullptr); |
| } |
| case Builtin::BI__sync_fetch_and_add: |
| case Builtin::BI__sync_fetch_and_sub: |
| case Builtin::BI__sync_fetch_and_or: |
| case Builtin::BI__sync_fetch_and_and: |
| case Builtin::BI__sync_fetch_and_xor: |
| case Builtin::BI__sync_fetch_and_nand: |
| case Builtin::BI__sync_add_and_fetch: |
| case Builtin::BI__sync_sub_and_fetch: |
| case Builtin::BI__sync_and_and_fetch: |
| case Builtin::BI__sync_or_and_fetch: |
| case Builtin::BI__sync_xor_and_fetch: |
| case Builtin::BI__sync_nand_and_fetch: |
| case Builtin::BI__sync_val_compare_and_swap: |
| case Builtin::BI__sync_bool_compare_and_swap: |
| case Builtin::BI__sync_lock_test_and_set: |
| case Builtin::BI__sync_lock_release: |
| case Builtin::BI__sync_swap: |
| llvm_unreachable("Shouldn't make it through sema"); |
| case Builtin::BI__sync_fetch_and_add_1: |
| case Builtin::BI__sync_fetch_and_add_2: |
| case Builtin::BI__sync_fetch_and_add_4: |
| case Builtin::BI__sync_fetch_and_add_8: |
| case Builtin::BI__sync_fetch_and_add_16: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E); |
| case Builtin::BI__sync_fetch_and_sub_1: |
| case Builtin::BI__sync_fetch_and_sub_2: |
| case Builtin::BI__sync_fetch_and_sub_4: |
| case Builtin::BI__sync_fetch_and_sub_8: |
| case Builtin::BI__sync_fetch_and_sub_16: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E); |
| case Builtin::BI__sync_fetch_and_or_1: |
| case Builtin::BI__sync_fetch_and_or_2: |
| case Builtin::BI__sync_fetch_and_or_4: |
| case Builtin::BI__sync_fetch_and_or_8: |
| case Builtin::BI__sync_fetch_and_or_16: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E); |
| case Builtin::BI__sync_fetch_and_and_1: |
| case Builtin::BI__sync_fetch_and_and_2: |
| case Builtin::BI__sync_fetch_and_and_4: |
| case Builtin::BI__sync_fetch_and_and_8: |
| case Builtin::BI__sync_fetch_and_and_16: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E); |
| case Builtin::BI__sync_fetch_and_xor_1: |
| case Builtin::BI__sync_fetch_and_xor_2: |
| case Builtin::BI__sync_fetch_and_xor_4: |
| case Builtin::BI__sync_fetch_and_xor_8: |
| case Builtin::BI__sync_fetch_and_xor_16: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E); |
| case Builtin::BI__sync_fetch_and_nand_1: |
| case Builtin::BI__sync_fetch_and_nand_2: |
| case Builtin::BI__sync_fetch_and_nand_4: |
| case Builtin::BI__sync_fetch_and_nand_8: |
| case Builtin::BI__sync_fetch_and_nand_16: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E); |
| |
| // Clang extensions: not overloaded yet. |
| case Builtin::BI__sync_fetch_and_min: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E); |
| case Builtin::BI__sync_fetch_and_max: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E); |
| case Builtin::BI__sync_fetch_and_umin: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E); |
| case Builtin::BI__sync_fetch_and_umax: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E); |
| |
| case Builtin::BI__sync_add_and_fetch_1: |
| case Builtin::BI__sync_add_and_fetch_2: |
| case Builtin::BI__sync_add_and_fetch_4: |
| case Builtin::BI__sync_add_and_fetch_8: |
| case Builtin::BI__sync_add_and_fetch_16: |
| return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E, |
| llvm::Instruction::Add); |
| case Builtin::BI__sync_sub_and_fetch_1: |
| case Builtin::BI__sync_sub_and_fetch_2: |
| case Builtin::BI__sync_sub_and_fetch_4: |
| case Builtin::BI__sync_sub_and_fetch_8: |
| case Builtin::BI__sync_sub_and_fetch_16: |
| return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E, |
| llvm::Instruction::Sub); |
| case Builtin::BI__sync_and_and_fetch_1: |
| case Builtin::BI__sync_and_and_fetch_2: |
| case Builtin::BI__sync_and_and_fetch_4: |
| case Builtin::BI__sync_and_and_fetch_8: |
| case Builtin::BI__sync_and_and_fetch_16: |
| return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E, |
| llvm::Instruction::And); |
| case Builtin::BI__sync_or_and_fetch_1: |
| case Builtin::BI__sync_or_and_fetch_2: |
| case Builtin::BI__sync_or_and_fetch_4: |
| case Builtin::BI__sync_or_and_fetch_8: |
| case Builtin::BI__sync_or_and_fetch_16: |
| return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E, |
| llvm::Instruction::Or); |
| case Builtin::BI__sync_xor_and_fetch_1: |
| case Builtin::BI__sync_xor_and_fetch_2: |
| case Builtin::BI__sync_xor_and_fetch_4: |
| case Builtin::BI__sync_xor_and_fetch_8: |
| case Builtin::BI__sync_xor_and_fetch_16: |
| return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E, |
| llvm::Instruction::Xor); |
| case Builtin::BI__sync_nand_and_fetch_1: |
| case Builtin::BI__sync_nand_and_fetch_2: |
| case Builtin::BI__sync_nand_and_fetch_4: |
| case Builtin::BI__sync_nand_and_fetch_8: |
| case Builtin::BI__sync_nand_and_fetch_16: |
| return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E, |
| llvm::Instruction::And, true); |
| |
| case Builtin::BI__sync_val_compare_and_swap_1: |
| case Builtin::BI__sync_val_compare_and_swap_2: |
| case Builtin::BI__sync_val_compare_and_swap_4: |
| case Builtin::BI__sync_val_compare_and_swap_8: |
| case Builtin::BI__sync_val_compare_and_swap_16: |
| return RValue::get(MakeAtomicCmpXchgValue(*this, E, false)); |
| |
| case Builtin::BI__sync_bool_compare_and_swap_1: |
| case Builtin::BI__sync_bool_compare_and_swap_2: |
| case Builtin::BI__sync_bool_compare_and_swap_4: |
| case Builtin::BI__sync_bool_compare_and_swap_8: |
| case Builtin::BI__sync_bool_compare_and_swap_16: |
| return RValue::get(MakeAtomicCmpXchgValue(*this, E, true)); |
| |
| case Builtin::BI__sync_swap_1: |
| case Builtin::BI__sync_swap_2: |
| case Builtin::BI__sync_swap_4: |
| case Builtin::BI__sync_swap_8: |
| case Builtin::BI__sync_swap_16: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); |
| |
| case Builtin::BI__sync_lock_test_and_set_1: |
| case Builtin::BI__sync_lock_test_and_set_2: |
| case Builtin::BI__sync_lock_test_and_set_4: |
| case Builtin::BI__sync_lock_test_and_set_8: |
| case Builtin::BI__sync_lock_test_and_set_16: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); |
| |
| case Builtin::BI__sync_lock_release_1: |
| case Builtin::BI__sync_lock_release_2: |
| case Builtin::BI__sync_lock_release_4: |
| case Builtin::BI__sync_lock_release_8: |
| case Builtin::BI__sync_lock_release_16: { |
| Value *Ptr = EmitScalarExpr(E->getArg(0)); |
| QualType ElTy = E->getArg(0)->getType()->getPointeeType(); |
| CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy); |
| llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(), |
| StoreSize.getQuantity() * 8); |
| Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo()); |
| llvm::StoreInst *Store = |
| Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr, |
| StoreSize); |
| Store->setAtomic(llvm::AtomicOrdering::Release); |
| return RValue::get(nullptr); |
| } |
| |
| case Builtin::BI__sync_synchronize: { |
| // We assume this is supposed to correspond to a C++0x-style |
| // sequentially-consistent fence (i.e. this is only usable for |
| // synchonization, not device I/O or anything like that). This intrinsic |
| // is really badly designed in the sense that in theory, there isn't |
| // any way to safely use it... but in practice, it mostly works |
| // to use it with non-atomic loads and stores to get acquire/release |
| // semantics. |
| Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent); |
| return RValue::get(nullptr); |
| } |
| |
| case Builtin::BI__builtin_nontemporal_load: |
| return RValue::get(EmitNontemporalLoad(*this, E)); |
| case Builtin::BI__builtin_nontemporal_store: |
| return RValue::get(EmitNontemporalStore(*this, E)); |
| case Builtin::BI__c11_atomic_is_lock_free: |
| case Builtin::BI__atomic_is_lock_free: { |
| // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the |
| // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since |
| // _Atomic(T) is always properly-aligned. |
| const char *LibCallName = "__atomic_is_lock_free"; |
| CallArgList Args; |
| Args.add(RValue::get(EmitScalarExpr(E->getArg(0))), |
| getContext().getSizeType()); |
| if (BuiltinID == Builtin::BI__atomic_is_lock_free) |
| Args.add(RValue::get(EmitScalarExpr(E->getArg(1))), |
| getContext().VoidPtrTy); |
| else |
| Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)), |
| getContext().VoidPtrTy); |
| const CGFunctionInfo &FuncInfo = |
| CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args); |
| llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo); |
| llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName); |
| return EmitCall(FuncInfo, CGCallee::forDirect(Func), |
| ReturnValueSlot(), Args); |
| } |
| |
| case Builtin::BI__atomic_test_and_set: { |
| // Look at the argument type to determine whether this is a volatile |
| // operation. The parameter type is always volatile. |
| QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); |
| bool Volatile = |
| PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
| |
| Value *Ptr = EmitScalarExpr(E->getArg(0)); |
| unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace(); |
| Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); |
| Value *NewVal = Builder.getInt8(1); |
| Value *Order = EmitScalarExpr(E->getArg(1)); |
| if (isa<llvm::ConstantInt>(Order)) { |
| int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
| AtomicRMWInst *Result = nullptr; |
| switch (ord) { |
| case 0: // memory_order_relaxed |
| default: // invalid order |
| Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
| llvm::AtomicOrdering::Monotonic); |
| break; |
| case 1: // memory_order_consume |
| case 2: // memory_order_acquire |
| Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
| llvm::AtomicOrdering::Acquire); |
| break; |
| case 3: // memory_order_release |
| Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
| llvm::AtomicOrdering::Release); |
| break; |
| case 4: // memory_order_acq_rel |
| |
| Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
| llvm::AtomicOrdering::AcquireRelease); |
| break; |
| case 5: // memory_order_seq_cst |
| Result = Builder.CreateAtomicRMW( |
| llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
| llvm::AtomicOrdering::SequentiallyConsistent); |
| break; |
| } |
| Result->setVolatile(Volatile); |
| return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); |
| } |
| |
| llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
| |
| llvm::BasicBlock *BBs[5] = { |
| createBasicBlock("monotonic", CurFn), |
| createBasicBlock("acquire", CurFn), |
| createBasicBlock("release", CurFn), |
| createBasicBlock("acqrel", CurFn), |
| createBasicBlock("seqcst", CurFn) |
| }; |
| llvm::AtomicOrdering Orders[5] = { |
| llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire, |
| llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease, |
| llvm::AtomicOrdering::SequentiallyConsistent}; |
| |
| Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
| llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); |
| |
| Builder.SetInsertPoint(ContBB); |
| PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set"); |
| |
| for (unsigned i = 0; i < 5; ++i) { |
| Builder.SetInsertPoint(BBs[i]); |
| AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, |
| Ptr, NewVal, Orders[i]); |
| RMW->setVolatile(Volatile); |
| Result->addIncoming(RMW, BBs[i]); |
| Builder.CreateBr(ContBB); |
| } |
| |
| SI->addCase(Builder.getInt32(0), BBs[0]); |
| SI->addCase(Builder.getInt32(1), BBs[1]); |
| SI->addCase(Builder.getInt32(2), BBs[1]); |
| SI->addCase(Builder.getInt32(3), BBs[2]); |
| SI->addCase(Builder.getInt32(4), BBs[3]); |
| SI->addCase(Builder.getInt32(5), BBs[4]); |
| |
| Builder.SetInsertPoint(ContBB); |
| return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); |
| } |
| |
| case Builtin::BI__atomic_clear: { |
| QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); |
| bool Volatile = |
| PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
| |
| Address Ptr = EmitPointerWithAlignment(E->getArg(0)); |
| unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace(); |
| Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); |
| Value *NewVal = Builder.getInt8(0); |
| Value *Order = EmitScalarExpr(E->getArg(1)); |
| if (isa<llvm::ConstantInt>(Order)) { |
| int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
| StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); |
| switch (ord) { |
| case 0: // memory_order_relaxed |
| default: // invalid order |
| Store->setOrdering(llvm::AtomicOrdering::Monotonic); |
| break; |
| case 3: // memory_order_release |
| Store->setOrdering(llvm::AtomicOrdering::Release); |
| break; |
| case 5: // memory_order_seq_cst |
| Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent); |
| break; |
| } |
| return RValue::get(nullptr); |
| } |
| |
| llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
| |
| llvm::BasicBlock *BBs[3] = { |
| createBasicBlock("monotonic", CurFn), |
| createBasicBlock("release", CurFn), |
| createBasicBlock("seqcst", CurFn) |
| }; |
| llvm::AtomicOrdering Orders[3] = { |
| llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release, |
| llvm::AtomicOrdering::SequentiallyConsistent}; |
| |
| Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
| llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); |
| |
| for (unsigned i = 0; i < 3; ++i) { |
| Builder.SetInsertPoint(BBs[i]); |
| StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); |
| Store->setOrdering(Orders[i]); |
| Builder.CreateBr(ContBB); |
| } |
| |
| SI->addCase(Builder.getInt32(0), BBs[0]); |
| SI->addCase(Builder.getInt32(3), BBs[1]); |
| SI->addCase(Builder.getInt32(5), BBs[2]); |
| |
| Builder.SetInsertPoint(ContBB); |
| return RValue::get(nullptr); |
| } |
| |
| case Builtin::BI__atomic_thread_fence: |
| case Builtin::BI__atomic_signal_fence: |
| case Builtin::BI__c11_atomic_thread_fence: |
| case Builtin::BI__c11_atomic_signal_fence: { |
| llvm::SyncScope::ID SSID; |
| if (BuiltinID == Builtin::BI__atomic_signal_fence || |
| BuiltinID == Builtin::BI__c11_atomic_signal_fence) |
| SSID = llvm::SyncScope::SingleThread; |
| else |
| SSID = llvm::SyncScope::System; |
| Value *Order = EmitScalarExpr(E->getArg(0)); |
| if (isa<llvm::ConstantInt>(Order)) { |
| int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
| switch (ord) { |
| case 0: // memory_order_relaxed |
| default: // invalid order |
| break; |
| case 1: // memory_order_consume |
| case 2: // memory_order_acquire |
| Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID); |
| break; |
| case 3: // memory_order_release |
| Builder.CreateFence(llvm::AtomicOrdering::Release, SSID); |
| break; |
| case 4: // memory_order_acq_rel |
| Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID); |
| break; |
| case 5: // memory_order_seq_cst |
| Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID); |
| break; |
| } |
| return RValue::get(nullptr); |
| } |
| |
| llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB; |
| AcquireBB = createBasicBlock("acquire", CurFn); |
| ReleaseBB = createBasicBlock("release", CurFn); |
| AcqRelBB = createBasicBlock("acqrel", CurFn); |
| SeqCstBB = createBasicBlock("seqcst", CurFn); |
| llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
| |
| Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
| llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB); |
| |
| Builder.SetInsertPoint(AcquireBB); |
| Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID); |
| Builder.CreateBr(ContBB); |
| SI->addCase(Builder.getInt32(1), AcquireBB); |
| SI->addCase(Builder.getInt32(2), AcquireBB); |
| |
| Builder.SetInsertPoint(ReleaseBB); |
| Builder.CreateFence(llvm::AtomicOrdering::Release, SSID); |
| Builder.CreateBr(ContBB); |
| SI->addCase(Builder.getInt32(3), ReleaseBB); |
| |
| Builder.SetInsertPoint(AcqRelBB); |
| Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID); |
| Builder.CreateBr(ContBB); |
| SI->addCase(Builder.getInt32(4), AcqRelBB); |
| |
| Builder.SetInsertPoint(SeqCstBB); |
| Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID); |
| Builder.CreateBr(ContBB); |
| SI->addCase(Builder.getInt32(5), SeqCstBB); |
| |
| Builder.SetInsertPoint(ContBB); |
| return RValue::get(nullptr); |
| } |
| |
| case Builtin::BI__builtin_signbit: |
| case Builtin::BI__builtin_signbitf: |
| case Builtin::BI__builtin_signbitl: { |
| return RValue::get( |
| Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))), |
| ConvertType(E->getType()))); |
| } |
| case Builtin::BI__annotation: { |
| // Re-encode each wide string to UTF8 and make an MDString. |
| SmallVector<Metadata *, 1> Strings; |
| for (const Expr *Arg : E->arguments()) { |
| const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts()); |
| assert(Str->getCharByteWidth() == 2); |
| StringRef WideBytes = Str->getBytes(); |
| std::string StrUtf8; |
| if (!convertUTF16ToUTF8String( |
| makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) { |
| CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument"); |
| continue; |
| } |
| Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8)); |
| } |
| |
| // Build and MDTuple of MDStrings and emit the intrinsic call. |
| llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {}); |
| MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings); |
| Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple)); |
| return RValue::getIgnored(); |
| } |
| case Builtin::BI__builtin_annotation: { |
| llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0)); |
| llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation, |
| AnnVal->getType()); |
| |
| // Get the annotation string, go through casts. Sema requires this to be a |
| // non-wide string literal, potentially casted, so the cast<> is safe. |
| const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts(); |
| StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString(); |
| return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc())); |
| } |
| case Builtin::BI__builtin_addcb: |
| case Builtin::BI__builtin_addcs: |
| case Builtin::BI__builtin_addc: |
| case Builtin::BI__builtin_addcl: |
| case Builtin::BI__builtin_addcll: |
| case Builtin::BI__builtin_subcb: |
| case Builtin::BI__builtin_subcs: |
| case Builtin::BI__builtin_subc: |
| case Builtin::BI__builtin_subcl: |
| case Builtin::BI__builtin_subcll: { |
| |
| // We translate all of these builtins from expressions of the form: |
| // int x = ..., y = ..., carryin = ..., carryout, result; |
| // result = __builtin_addc(x, y, carryin, &carryout); |
| // |
| // to LLVM IR of the form: |
| // |
| // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y) |
| // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0 |
| // %carry1 = extractvalue {i32, i1} %tmp1, 1 |
| // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1, |
| // i32 %carryin) |
| // %result = extractvalue {i32, i1} %tmp2, 0 |
| // %carry2 = extractvalue {i32, i1} %tmp2, 1 |
| // %tmp3 = or i1 %carry1, %carry2 |
| // %tmp4 = zext i1 %tmp3 to i32 |
| // store i32 %tmp4, i32* %carryout |
| |
| // Scalarize our inputs. |
| llvm::Value *X = EmitScalarExpr(E->getArg(0)); |
| llvm::Value *Y = EmitScalarExpr(E->getArg(1)); |
| llvm::Value *Carryin = EmitScalarExpr(E->getArg(2)); |
| Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3)); |
| |
| // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow. |
| llvm::Intrinsic::ID IntrinsicId; |
| switch (BuiltinID) { |
| default: llvm_unreachable("Unknown multiprecision builtin id."); |
| case Builtin::BI__builtin_addcb: |
| case Builtin::BI__builtin_addcs: |
| case Builtin::BI__builtin_addc: |
| case Builtin::BI__builtin_addcl: |
| case Builtin::BI__builtin_addcll: |
| IntrinsicId = llvm::Intrinsic::uadd_with_overflow; |
| break; |
| case Builtin::BI__builtin_subcb: |
| case Builtin::BI__builtin_subcs: |
| case Builtin::BI__builtin_subc: |
| case Builtin::BI__builtin_subcl: |
| case Builtin::BI__builtin_subcll: |
| IntrinsicId = llvm::Intrinsic::usub_with_overflow; |
| break; |
| } |
| |
| // Construct our resulting LLVM IR expression. |
| llvm::Value *Carry1; |
| llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId, |
| X, Y, Carry1); |
| llvm::Value *Carry2; |
| llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId, |
| Sum1, Carryin, Carry2); |
| llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2), |
| X->getType()); |
|