| //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// |
| // |
| // The LLVM Compiler Infrastructure |
| // |
| // This file is distributed under the University of Illinois Open Source |
| // License. See LICENSE.TXT for details. |
| // |
| //===----------------------------------------------------------------------===// |
| // |
| // This contains code to emit Builtin calls as LLVM code. |
| // |
| //===----------------------------------------------------------------------===// |
| |
| #include "CodeGenFunction.h" |
| #include "CGCXXABI.h" |
| #include "CGObjCRuntime.h" |
| #include "CodeGenModule.h" |
| #include "TargetInfo.h" |
| #include "clang/AST/ASTContext.h" |
| #include "clang/AST/Decl.h" |
| #include "clang/Analysis/Analyses/OSLog.h" |
| #include "clang/Basic/TargetBuiltins.h" |
| #include "clang/Basic/TargetInfo.h" |
| #include "clang/CodeGen/CGFunctionInfo.h" |
| #include "llvm/ADT/StringExtras.h" |
| #include "llvm/IR/CallSite.h" |
| #include "llvm/IR/DataLayout.h" |
| #include "llvm/IR/InlineAsm.h" |
| #include "llvm/IR/Intrinsics.h" |
| #include "llvm/IR/MDBuilder.h" |
| #include <sstream> |
| |
| using namespace clang; |
| using namespace CodeGen; |
| using namespace llvm; |
| |
| /// getBuiltinLibFunction - Given a builtin id for a function like |
| /// "__builtin_fabsf", return a Function* for "fabsf". |
| llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD, |
| unsigned BuiltinID) { |
| assert(Context.BuiltinInfo.isLibFunction(BuiltinID)); |
| |
| // Get the name, skip over the __builtin_ prefix (if necessary). |
| StringRef Name; |
| GlobalDecl D(FD); |
| |
| // If the builtin has been declared explicitly with an assembler label, |
| // use the mangled name. This differs from the plain label on platforms |
| // that prefix labels. |
| if (FD->hasAttr<AsmLabelAttr>()) |
| Name = getMangledName(D); |
| else |
| Name = Context.BuiltinInfo.getName(BuiltinID) + 10; |
| |
| llvm::FunctionType *Ty = |
| cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType())); |
| |
| return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false); |
| } |
| |
| /// Emit the conversions required to turn the given value into an |
| /// integer of the given size. |
| static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, |
| QualType T, llvm::IntegerType *IntType) { |
| V = CGF.EmitToMemory(V, T); |
| |
| if (V->getType()->isPointerTy()) |
| return CGF.Builder.CreatePtrToInt(V, IntType); |
| |
| assert(V->getType() == IntType); |
| return V; |
| } |
| |
| static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, |
| QualType T, llvm::Type *ResultType) { |
| V = CGF.EmitFromMemory(V, T); |
| |
| if (ResultType->isPointerTy()) |
| return CGF.Builder.CreateIntToPtr(V, ResultType); |
| |
| assert(V->getType() == ResultType); |
| return V; |
| } |
| |
| /// Utility to insert an atomic instruction based on Instrinsic::ID |
| /// and the expression node. |
| static Value *MakeBinaryAtomicValue(CodeGenFunction &CGF, |
| llvm::AtomicRMWInst::BinOp Kind, |
| const CallExpr *E) { |
| QualType T = E->getType(); |
| assert(E->getArg(0)->getType()->isPointerType()); |
| assert(CGF.getContext().hasSameUnqualifiedType(T, |
| E->getArg(0)->getType()->getPointeeType())); |
| assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); |
| |
| llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
| unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
| |
| llvm::IntegerType *IntType = |
| llvm::IntegerType::get(CGF.getLLVMContext(), |
| CGF.getContext().getTypeSize(T)); |
| llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
| |
| llvm::Value *Args[2]; |
| Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
| Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
| llvm::Type *ValueType = Args[1]->getType(); |
| Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
| |
| llvm::Value *Result = CGF.Builder.CreateAtomicRMW( |
| Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent); |
| return EmitFromInt(CGF, Result, T, ValueType); |
| } |
| |
| static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) { |
| Value *Val = CGF.EmitScalarExpr(E->getArg(0)); |
| Value *Address = CGF.EmitScalarExpr(E->getArg(1)); |
| |
| // Convert the type of the pointer to a pointer to the stored type. |
| Val = CGF.EmitToMemory(Val, E->getArg(0)->getType()); |
| Value *BC = CGF.Builder.CreateBitCast( |
| Address, llvm::PointerType::getUnqual(Val->getType()), "cast"); |
| LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType()); |
| LV.setNontemporal(true); |
| CGF.EmitStoreOfScalar(Val, LV, false); |
| return nullptr; |
| } |
| |
| static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) { |
| Value *Address = CGF.EmitScalarExpr(E->getArg(0)); |
| |
| LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType()); |
| LV.setNontemporal(true); |
| return CGF.EmitLoadOfScalar(LV, E->getExprLoc()); |
| } |
| |
| static RValue EmitBinaryAtomic(CodeGenFunction &CGF, |
| llvm::AtomicRMWInst::BinOp Kind, |
| const CallExpr *E) { |
| return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E)); |
| } |
| |
| /// Utility to insert an atomic instruction based Instrinsic::ID and |
| /// the expression node, where the return value is the result of the |
| /// operation. |
| static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, |
| llvm::AtomicRMWInst::BinOp Kind, |
| const CallExpr *E, |
| Instruction::BinaryOps Op, |
| bool Invert = false) { |
| QualType T = E->getType(); |
| assert(E->getArg(0)->getType()->isPointerType()); |
| assert(CGF.getContext().hasSameUnqualifiedType(T, |
| E->getArg(0)->getType()->getPointeeType())); |
| assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); |
| |
| llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
| unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
| |
| llvm::IntegerType *IntType = |
| llvm::IntegerType::get(CGF.getLLVMContext(), |
| CGF.getContext().getTypeSize(T)); |
| llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
| |
| llvm::Value *Args[2]; |
| Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
| llvm::Type *ValueType = Args[1]->getType(); |
| Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
| Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
| |
| llvm::Value *Result = CGF.Builder.CreateAtomicRMW( |
| Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent); |
| Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]); |
| if (Invert) |
| Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result, |
| llvm::ConstantInt::get(IntType, -1)); |
| Result = EmitFromInt(CGF, Result, T, ValueType); |
| return RValue::get(Result); |
| } |
| |
| /// @brief Utility to insert an atomic cmpxchg instruction. |
| /// |
| /// @param CGF The current codegen function. |
| /// @param E Builtin call expression to convert to cmpxchg. |
| /// arg0 - address to operate on |
| /// arg1 - value to compare with |
| /// arg2 - new value |
| /// @param ReturnBool Specifies whether to return success flag of |
| /// cmpxchg result or the old value. |
| /// |
| /// @returns result of cmpxchg, according to ReturnBool |
| static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, |
| bool ReturnBool) { |
| QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType(); |
| llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
| unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
| |
| llvm::IntegerType *IntType = llvm::IntegerType::get( |
| CGF.getLLVMContext(), CGF.getContext().getTypeSize(T)); |
| llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
| |
| Value *Args[3]; |
| Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
| Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
| llvm::Type *ValueType = Args[1]->getType(); |
| Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
| Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType); |
| |
| Value *Pair = CGF.Builder.CreateAtomicCmpXchg( |
| Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent, |
| llvm::AtomicOrdering::SequentiallyConsistent); |
| if (ReturnBool) |
| // Extract boolean success flag and zext it to int. |
| return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1), |
| CGF.ConvertType(E->getType())); |
| else |
| // Extract old value and emit it using the same type as compare value. |
| return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T, |
| ValueType); |
| } |
| |
| // Emit a simple mangled intrinsic that has 1 argument and a return type |
| // matching the argument type. |
| static Value *emitUnaryBuiltin(CodeGenFunction &CGF, |
| const CallExpr *E, |
| unsigned IntrinsicID) { |
| llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
| |
| Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateCall(F, Src0); |
| } |
| |
| // Emit an intrinsic that has 2 operands of the same type as its result. |
| static Value *emitBinaryBuiltin(CodeGenFunction &CGF, |
| const CallExpr *E, |
| unsigned IntrinsicID) { |
| llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
| llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
| |
| Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateCall(F, { Src0, Src1 }); |
| } |
| |
| // Emit an intrinsic that has 3 operands of the same type as its result. |
| static Value *emitTernaryBuiltin(CodeGenFunction &CGF, |
| const CallExpr *E, |
| unsigned IntrinsicID) { |
| llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
| llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
| llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); |
| |
| Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 }); |
| } |
| |
| // Emit an intrinsic that has 1 float or double operand, and 1 integer. |
| static Value *emitFPIntBuiltin(CodeGenFunction &CGF, |
| const CallExpr *E, |
| unsigned IntrinsicID) { |
| llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
| llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
| |
| Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateCall(F, {Src0, Src1}); |
| } |
| |
| /// EmitFAbs - Emit a call to @llvm.fabs(). |
| static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) { |
| Value *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType()); |
| llvm::CallInst *Call = CGF.Builder.CreateCall(F, V); |
| Call->setDoesNotAccessMemory(); |
| return Call; |
| } |
| |
| /// Emit the computation of the sign bit for a floating point value. Returns |
| /// the i1 sign bit value. |
| static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) { |
| LLVMContext &C = CGF.CGM.getLLVMContext(); |
| |
| llvm::Type *Ty = V->getType(); |
| int Width = Ty->getPrimitiveSizeInBits(); |
| llvm::Type *IntTy = llvm::IntegerType::get(C, Width); |
| V = CGF.Builder.CreateBitCast(V, IntTy); |
| if (Ty->isPPC_FP128Ty()) { |
| // We want the sign bit of the higher-order double. The bitcast we just |
| // did works as if the double-double was stored to memory and then |
| // read as an i128. The "store" will put the higher-order double in the |
| // lower address in both little- and big-Endian modes, but the "load" |
| // will treat those bits as a different part of the i128: the low bits in |
| // little-Endian, the high bits in big-Endian. Therefore, on big-Endian |
| // we need to shift the high bits down to the low before truncating. |
| Width >>= 1; |
| if (CGF.getTarget().isBigEndian()) { |
| Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width); |
| V = CGF.Builder.CreateLShr(V, ShiftCst); |
| } |
| // We are truncating value in order to extract the higher-order |
| // double, which we will be using to extract the sign from. |
| IntTy = llvm::IntegerType::get(C, Width); |
| V = CGF.Builder.CreateTrunc(V, IntTy); |
| } |
| Value *Zero = llvm::Constant::getNullValue(IntTy); |
| return CGF.Builder.CreateICmpSLT(V, Zero); |
| } |
| |
| static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn, |
| const CallExpr *E, llvm::Value *calleeValue) { |
| return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E, |
| ReturnValueSlot(), Fn); |
| } |
| |
| /// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.* |
| /// depending on IntrinsicID. |
| /// |
| /// \arg CGF The current codegen function. |
| /// \arg IntrinsicID The ID for the Intrinsic we wish to generate. |
| /// \arg X The first argument to the llvm.*.with.overflow.*. |
| /// \arg Y The second argument to the llvm.*.with.overflow.*. |
| /// \arg Carry The carry returned by the llvm.*.with.overflow.*. |
| /// \returns The result (i.e. sum/product) returned by the intrinsic. |
| static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF, |
| const llvm::Intrinsic::ID IntrinsicID, |
| llvm::Value *X, llvm::Value *Y, |
| llvm::Value *&Carry) { |
| // Make sure we have integers of the same width. |
| assert(X->getType() == Y->getType() && |
| "Arguments must be the same type. (Did you forget to make sure both " |
| "arguments have the same integer width?)"); |
| |
| llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType()); |
| llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y}); |
| Carry = CGF.Builder.CreateExtractValue(Tmp, 1); |
| return CGF.Builder.CreateExtractValue(Tmp, 0); |
| } |
| |
| static Value *emitRangedBuiltin(CodeGenFunction &CGF, |
| unsigned IntrinsicID, |
| int low, int high) { |
| llvm::MDBuilder MDHelper(CGF.getLLVMContext()); |
| llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high)); |
| Value *F = CGF.CGM.getIntrinsic(IntrinsicID, {}); |
| llvm::Instruction *Call = CGF.Builder.CreateCall(F); |
| Call->setMetadata(llvm::LLVMContext::MD_range, RNode); |
| return Call; |
| } |
| |
| namespace { |
| struct WidthAndSignedness { |
| unsigned Width; |
| bool Signed; |
| }; |
| } |
| |
| static WidthAndSignedness |
| getIntegerWidthAndSignedness(const clang::ASTContext &context, |
| const clang::QualType Type) { |
| assert(Type->isIntegerType() && "Given type is not an integer."); |
| unsigned Width = Type->isBooleanType() ? 1 : context.getTypeInfo(Type).Width; |
| bool Signed = Type->isSignedIntegerType(); |
| return {Width, Signed}; |
| } |
| |
| // Given one or more integer types, this function produces an integer type that |
| // encompasses them: any value in one of the given types could be expressed in |
| // the encompassing type. |
| static struct WidthAndSignedness |
| EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) { |
| assert(Types.size() > 0 && "Empty list of types."); |
| |
| // If any of the given types is signed, we must return a signed type. |
| bool Signed = false; |
| for (const auto &Type : Types) { |
| Signed |= Type.Signed; |
| } |
| |
| // The encompassing type must have a width greater than or equal to the width |
| // of the specified types. Aditionally, if the encompassing type is signed, |
| // its width must be strictly greater than the width of any unsigned types |
| // given. |
| unsigned Width = 0; |
| for (const auto &Type : Types) { |
| unsigned MinWidth = Type.Width + (Signed && !Type.Signed); |
| if (Width < MinWidth) { |
| Width = MinWidth; |
| } |
| } |
| |
| return {Width, Signed}; |
| } |
| |
| Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) { |
| llvm::Type *DestType = Int8PtrTy; |
| if (ArgValue->getType() != DestType) |
| ArgValue = |
| Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data()); |
| |
| Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend; |
| return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue); |
| } |
| |
| /// Checks if using the result of __builtin_object_size(p, @p From) in place of |
| /// __builtin_object_size(p, @p To) is correct |
| static bool areBOSTypesCompatible(int From, int To) { |
| // Note: Our __builtin_object_size implementation currently treats Type=0 and |
| // Type=2 identically. Encoding this implementation detail here may make |
| // improving __builtin_object_size difficult in the future, so it's omitted. |
| return From == To || (From == 0 && To == 1) || (From == 3 && To == 2); |
| } |
| |
| static llvm::Value * |
| getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) { |
| return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true); |
| } |
| |
| llvm::Value * |
| CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type, |
| llvm::IntegerType *ResType) { |
| uint64_t ObjectSize; |
| if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type)) |
| return emitBuiltinObjectSize(E, Type, ResType); |
| return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true); |
| } |
| |
| /// Returns a Value corresponding to the size of the given expression. |
| /// This Value may be either of the following: |
| /// - A llvm::Argument (if E is a param with the pass_object_size attribute on |
| /// it) |
| /// - A call to the @llvm.objectsize intrinsic |
| llvm::Value * |
| CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, |
| llvm::IntegerType *ResType) { |
| // We need to reference an argument if the pointer is a parameter with the |
| // pass_object_size attribute. |
| if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) { |
| auto *Param = dyn_cast<ParmVarDecl>(D->getDecl()); |
| auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>(); |
| if (Param != nullptr && PS != nullptr && |
| areBOSTypesCompatible(PS->getType(), Type)) { |
| auto Iter = SizeArguments.find(Param); |
| assert(Iter != SizeArguments.end()); |
| |
| const ImplicitParamDecl *D = Iter->second; |
| auto DIter = LocalDeclMap.find(D); |
| assert(DIter != LocalDeclMap.end()); |
| |
| return EmitLoadOfScalar(DIter->second, /*volatile=*/false, |
| getContext().getSizeType(), E->getLocStart()); |
| } |
| } |
| |
| // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't |
| // evaluate E for side-effects. In either case, we shouldn't lower to |
| // @llvm.objectsize. |
| if (Type == 3 || E->HasSideEffects(getContext())) |
| return getDefaultBuiltinObjectSizeResult(Type, ResType); |
| |
| // LLVM only supports 0 and 2, make sure that we pass along that |
| // as a boolean. |
| auto *CI = ConstantInt::get(Builder.getInt1Ty(), (Type & 2) >> 1); |
| // FIXME: Get right address space. |
| llvm::Type *Tys[] = {ResType, Builder.getInt8PtrTy(0)}; |
| Value *F = CGM.getIntrinsic(Intrinsic::objectsize, Tys); |
| return Builder.CreateCall(F, {EmitScalarExpr(E), CI}); |
| } |
| |
| namespace { |
| struct CallObjCArcUse final : EHScopeStack::Cleanup { |
| CallObjCArcUse(llvm::Value *object) : object(object) {} |
| llvm::Value *object; |
| |
| void Emit(CodeGenFunction &CGF, Flags flags) override { |
| CGF.EmitARCIntrinsicUse(object); |
| } |
| }; |
| } |
| |
| RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, |
| unsigned BuiltinID, const CallExpr *E, |
| ReturnValueSlot ReturnValue) { |
| // See if we can constant fold this builtin. If so, don't emit it at all. |
| Expr::EvalResult Result; |
| if (E->EvaluateAsRValue(Result, CGM.getContext()) && |
| !Result.hasSideEffects()) { |
| if (Result.Val.isInt()) |
| return RValue::get(llvm::ConstantInt::get(getLLVMContext(), |
| Result.Val.getInt())); |
| if (Result.Val.isFloat()) |
| return RValue::get(llvm::ConstantFP::get(getLLVMContext(), |
| Result.Val.getFloat())); |
| } |
| |
| switch (BuiltinID) { |
| default: break; // Handle intrinsics and libm functions below. |
| case Builtin::BI__builtin___CFStringMakeConstantString: |
| case Builtin::BI__builtin___NSStringMakeConstantString: |
| return RValue::get(CGM.EmitConstantExpr(E, E->getType(), nullptr)); |
| case Builtin::BI__builtin_stdarg_start: |
| case Builtin::BI__builtin_va_start: |
| case Builtin::BI__va_start: |
| case Builtin::BI__builtin_va_end: |
| return RValue::get( |
| EmitVAStartEnd(BuiltinID == Builtin::BI__va_start |
| ? EmitScalarExpr(E->getArg(0)) |
| : EmitVAListRef(E->getArg(0)).getPointer(), |
| BuiltinID != Builtin::BI__builtin_va_end)); |
| case Builtin::BI__builtin_va_copy: { |
| Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer(); |
| Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer(); |
| |
| llvm::Type *Type = Int8PtrTy; |
| |
| DstPtr = Builder.CreateBitCast(DstPtr, Type); |
| SrcPtr = Builder.CreateBitCast(SrcPtr, Type); |
| return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy), |
| {DstPtr, SrcPtr})); |
| } |
| case Builtin::BI__builtin_abs: |
| case Builtin::BI__builtin_labs: |
| case Builtin::BI__builtin_llabs: { |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| |
| Value *NegOp = Builder.CreateNeg(ArgValue, "neg"); |
| Value *CmpResult = |
| Builder.CreateICmpSGE(ArgValue, |
| llvm::Constant::getNullValue(ArgValue->getType()), |
| "abscond"); |
| Value *Result = |
| Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs"); |
| |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_fabs: |
| case Builtin::BI__builtin_fabsf: |
| case Builtin::BI__builtin_fabsl: { |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs)); |
| } |
| case Builtin::BI__builtin_fmod: |
| case Builtin::BI__builtin_fmodf: |
| case Builtin::BI__builtin_fmodl: { |
| Value *Arg1 = EmitScalarExpr(E->getArg(0)); |
| Value *Arg2 = EmitScalarExpr(E->getArg(1)); |
| Value *Result = Builder.CreateFRem(Arg1, Arg2, "fmod"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_copysign: |
| case Builtin::BI__builtin_copysignf: |
| case Builtin::BI__builtin_copysignl: { |
| return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign)); |
| } |
| case Builtin::BI__builtin_ceil: |
| case Builtin::BI__builtin_ceilf: |
| case Builtin::BI__builtin_ceill: { |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::ceil)); |
| } |
| case Builtin::BI__builtin_floor: |
| case Builtin::BI__builtin_floorf: |
| case Builtin::BI__builtin_floorl: { |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::floor)); |
| } |
| case Builtin::BI__builtin_trunc: |
| case Builtin::BI__builtin_truncf: |
| case Builtin::BI__builtin_truncl: { |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::trunc)); |
| } |
| case Builtin::BI__builtin_rint: |
| case Builtin::BI__builtin_rintf: |
| case Builtin::BI__builtin_rintl: { |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::rint)); |
| } |
| case Builtin::BI__builtin_nearbyint: |
| case Builtin::BI__builtin_nearbyintf: |
| case Builtin::BI__builtin_nearbyintl: { |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::nearbyint)); |
| } |
| case Builtin::BI__builtin_round: |
| case Builtin::BI__builtin_roundf: |
| case Builtin::BI__builtin_roundl: { |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::round)); |
| } |
| case Builtin::BI__builtin_fmin: |
| case Builtin::BI__builtin_fminf: |
| case Builtin::BI__builtin_fminl: { |
| return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::minnum)); |
| } |
| case Builtin::BI__builtin_fmax: |
| case Builtin::BI__builtin_fmaxf: |
| case Builtin::BI__builtin_fmaxl: { |
| return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::maxnum)); |
| } |
| case Builtin::BI__builtin_conj: |
| case Builtin::BI__builtin_conjf: |
| case Builtin::BI__builtin_conjl: { |
| ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
| Value *Real = ComplexVal.first; |
| Value *Imag = ComplexVal.second; |
| Value *Zero = |
| Imag->getType()->isFPOrFPVectorTy() |
| ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType()) |
| : llvm::Constant::getNullValue(Imag->getType()); |
| |
| Imag = Builder.CreateFSub(Zero, Imag, "sub"); |
| return RValue::getComplex(std::make_pair(Real, Imag)); |
| } |
| case Builtin::BI__builtin_creal: |
| case Builtin::BI__builtin_crealf: |
| case Builtin::BI__builtin_creall: |
| case Builtin::BIcreal: |
| case Builtin::BIcrealf: |
| case Builtin::BIcreall: { |
| ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
| return RValue::get(ComplexVal.first); |
| } |
| |
| case Builtin::BI__builtin_cimag: |
| case Builtin::BI__builtin_cimagf: |
| case Builtin::BI__builtin_cimagl: |
| case Builtin::BIcimag: |
| case Builtin::BIcimagf: |
| case Builtin::BIcimagl: { |
| ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
| return RValue::get(ComplexVal.second); |
| } |
| |
| case Builtin::BI__builtin_ctzs: |
| case Builtin::BI__builtin_ctz: |
| case Builtin::BI__builtin_ctzl: |
| case Builtin::BI__builtin_ctzll: { |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
| |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); |
| Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); |
| if (Result->getType() != ResultType) |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
| "cast"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_clzs: |
| case Builtin::BI__builtin_clz: |
| case Builtin::BI__builtin_clzl: |
| case Builtin::BI__builtin_clzll: { |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
| |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); |
| Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); |
| if (Result->getType() != ResultType) |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
| "cast"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_ffs: |
| case Builtin::BI__builtin_ffsl: |
| case Builtin::BI__builtin_ffsll: { |
| // ffs(x) -> x ? cttz(x) + 1 : 0 |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
| |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| Value *Tmp = |
| Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}), |
| llvm::ConstantInt::get(ArgType, 1)); |
| Value *Zero = llvm::Constant::getNullValue(ArgType); |
| Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); |
| Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); |
| if (Result->getType() != ResultType) |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
| "cast"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_parity: |
| case Builtin::BI__builtin_parityl: |
| case Builtin::BI__builtin_parityll: { |
| // parity(x) -> ctpop(x) & 1 |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); |
| |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| Value *Tmp = Builder.CreateCall(F, ArgValue); |
| Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1)); |
| if (Result->getType() != ResultType) |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
| "cast"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_popcount: |
| case Builtin::BI__builtin_popcountl: |
| case Builtin::BI__builtin_popcountll: { |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); |
| |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| Value *Result = Builder.CreateCall(F, ArgValue); |
| if (Result->getType() != ResultType) |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
| "cast"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_unpredictable: { |
| // Always return the argument of __builtin_unpredictable. LLVM does not |
| // handle this builtin. Metadata for this builtin should be added directly |
| // to instructions such as branches or switches that use it. |
| return RValue::get(EmitScalarExpr(E->getArg(0))); |
| } |
| case Builtin::BI__builtin_expect: { |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| llvm::Type *ArgType = ArgValue->getType(); |
| |
| Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); |
| // Don't generate llvm.expect on -O0 as the backend won't use it for |
| // anything. |
| // Note, we still IRGen ExpectedValue because it could have side-effects. |
| if (CGM.getCodeGenOpts().OptimizationLevel == 0) |
| return RValue::get(ArgValue); |
| |
| Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType); |
| Value *Result = |
| Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_assume_aligned: { |
| Value *PtrValue = EmitScalarExpr(E->getArg(0)); |
| Value *OffsetValue = |
| (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr; |
| |
| Value *AlignmentValue = EmitScalarExpr(E->getArg(1)); |
| ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue); |
| unsigned Alignment = (unsigned) AlignmentCI->getZExtValue(); |
| |
| EmitAlignmentAssumption(PtrValue, Alignment, OffsetValue); |
| return RValue::get(PtrValue); |
| } |
| case Builtin::BI__assume: |
| case Builtin::BI__builtin_assume: { |
| if (E->getArg(0)->HasSideEffects(getContext())) |
| return RValue::get(nullptr); |
| |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| Value *FnAssume = CGM.getIntrinsic(Intrinsic::assume); |
| return RValue::get(Builder.CreateCall(FnAssume, ArgValue)); |
| } |
| case Builtin::BI__builtin_bswap16: |
| case Builtin::BI__builtin_bswap32: |
| case Builtin::BI__builtin_bswap64: { |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap)); |
| } |
| case Builtin::BI__builtin_bitreverse8: |
| case Builtin::BI__builtin_bitreverse16: |
| case Builtin::BI__builtin_bitreverse32: |
| case Builtin::BI__builtin_bitreverse64: { |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse)); |
| } |
| case Builtin::BI__builtin_object_size: { |
| unsigned Type = |
| E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue(); |
| auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType())); |
| |
| // We pass this builtin onto the optimizer so that it can figure out the |
| // object size in more complex cases. |
| return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType)); |
| } |
| case Builtin::BI__builtin_prefetch: { |
| Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); |
| // FIXME: Technically these constants should of type 'int', yes? |
| RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : |
| llvm::ConstantInt::get(Int32Ty, 0); |
| Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : |
| llvm::ConstantInt::get(Int32Ty, 3); |
| Value *Data = llvm::ConstantInt::get(Int32Ty, 1); |
| Value *F = CGM.getIntrinsic(Intrinsic::prefetch); |
| return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data})); |
| } |
| case Builtin::BI__builtin_readcyclecounter: { |
| Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter); |
| return RValue::get(Builder.CreateCall(F)); |
| } |
| case Builtin::BI__builtin___clear_cache: { |
| Value *Begin = EmitScalarExpr(E->getArg(0)); |
| Value *End = EmitScalarExpr(E->getArg(1)); |
| Value *F = CGM.getIntrinsic(Intrinsic::clear_cache); |
| return RValue::get(Builder.CreateCall(F, {Begin, End})); |
| } |
| case Builtin::BI__builtin_trap: |
| return RValue::get(EmitTrapCall(Intrinsic::trap)); |
| case Builtin::BI__debugbreak: |
| return RValue::get(EmitTrapCall(Intrinsic::debugtrap)); |
| case Builtin::BI__builtin_unreachable: { |
| if (SanOpts.has(SanitizerKind::Unreachable)) { |
| SanitizerScope SanScope(this); |
| EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()), |
| SanitizerKind::Unreachable), |
| "builtin_unreachable", EmitCheckSourceLocation(E->getExprLoc()), |
| None); |
| } else |
| Builder.CreateUnreachable(); |
| |
| // We do need to preserve an insertion point. |
| EmitBlock(createBasicBlock("unreachable.cont")); |
| |
| return RValue::get(nullptr); |
| } |
| |
| case Builtin::BI__builtin_powi: |
| case Builtin::BI__builtin_powif: |
| case Builtin::BI__builtin_powil: { |
| Value *Base = EmitScalarExpr(E->getArg(0)); |
| Value *Exponent = EmitScalarExpr(E->getArg(1)); |
| llvm::Type *ArgType = Base->getType(); |
| Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType); |
| return RValue::get(Builder.CreateCall(F, {Base, Exponent})); |
| } |
| |
| case Builtin::BI__builtin_isgreater: |
| case Builtin::BI__builtin_isgreaterequal: |
| case Builtin::BI__builtin_isless: |
| case Builtin::BI__builtin_islessequal: |
| case Builtin::BI__builtin_islessgreater: |
| case Builtin::BI__builtin_isunordered: { |
| // Ordered comparisons: we know the arguments to these are matching scalar |
| // floating point values. |
| Value *LHS = EmitScalarExpr(E->getArg(0)); |
| Value *RHS = EmitScalarExpr(E->getArg(1)); |
| |
| switch (BuiltinID) { |
| default: llvm_unreachable("Unknown ordered comparison"); |
| case Builtin::BI__builtin_isgreater: |
| LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); |
| break; |
| case Builtin::BI__builtin_isgreaterequal: |
| LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp"); |
| break; |
| case Builtin::BI__builtin_isless: |
| LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp"); |
| break; |
| case Builtin::BI__builtin_islessequal: |
| LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp"); |
| break; |
| case Builtin::BI__builtin_islessgreater: |
| LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp"); |
| break; |
| case Builtin::BI__builtin_isunordered: |
| LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp"); |
| break; |
| } |
| // ZExt bool to int type. |
| return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()))); |
| } |
| case Builtin::BI__builtin_isnan: { |
| Value *V = EmitScalarExpr(E->getArg(0)); |
| V = Builder.CreateFCmpUNO(V, V, "cmp"); |
| return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); |
| } |
| |
| case Builtin::BI__builtin_isinf: |
| case Builtin::BI__builtin_isfinite: { |
| // isinf(x) --> fabs(x) == infinity |
| // isfinite(x) --> fabs(x) != infinity |
| // x != NaN via the ordered compare in either case. |
| Value *V = EmitScalarExpr(E->getArg(0)); |
| Value *Fabs = EmitFAbs(*this, V); |
| Constant *Infinity = ConstantFP::getInfinity(V->getType()); |
| CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf) |
| ? CmpInst::FCMP_OEQ |
| : CmpInst::FCMP_ONE; |
| Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf"); |
| return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType()))); |
| } |
| |
| case Builtin::BI__builtin_isinf_sign: { |
| // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0 |
| Value *Arg = EmitScalarExpr(E->getArg(0)); |
| Value *AbsArg = EmitFAbs(*this, Arg); |
| Value *IsInf = Builder.CreateFCmpOEQ( |
| AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf"); |
| Value *IsNeg = EmitSignBit(*this, Arg); |
| |
| llvm::Type *IntTy = ConvertType(E->getType()); |
| Value *Zero = Constant::getNullValue(IntTy); |
| Value *One = ConstantInt::get(IntTy, 1); |
| Value *NegativeOne = ConstantInt::get(IntTy, -1); |
| Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One); |
| Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero); |
| return RValue::get(Result); |
| } |
| |
| case Builtin::BI__builtin_isnormal: { |
| // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min |
| Value *V = EmitScalarExpr(E->getArg(0)); |
| Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); |
| |
| Value *Abs = EmitFAbs(*this, V); |
| Value *IsLessThanInf = |
| Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); |
| APFloat Smallest = APFloat::getSmallestNormalized( |
| getContext().getFloatTypeSemantics(E->getArg(0)->getType())); |
| Value *IsNormal = |
| Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest), |
| "isnormal"); |
| V = Builder.CreateAnd(Eq, IsLessThanInf, "and"); |
| V = Builder.CreateAnd(V, IsNormal, "and"); |
| return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); |
| } |
| |
| case Builtin::BI__builtin_fpclassify: { |
| Value *V = EmitScalarExpr(E->getArg(5)); |
| llvm::Type *Ty = ConvertType(E->getArg(5)->getType()); |
| |
| // Create Result |
| BasicBlock *Begin = Builder.GetInsertBlock(); |
| BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn); |
| Builder.SetInsertPoint(End); |
| PHINode *Result = |
| Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4, |
| "fpclassify_result"); |
| |
| // if (V==0) return FP_ZERO |
| Builder.SetInsertPoint(Begin); |
| Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), |
| "iszero"); |
| Value *ZeroLiteral = EmitScalarExpr(E->getArg(4)); |
| BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn); |
| Builder.CreateCondBr(IsZero, End, NotZero); |
| Result->addIncoming(ZeroLiteral, Begin); |
| |
| // if (V != V) return FP_NAN |
| Builder.SetInsertPoint(NotZero); |
| Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp"); |
| Value *NanLiteral = EmitScalarExpr(E->getArg(0)); |
| BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn); |
| Builder.CreateCondBr(IsNan, End, NotNan); |
| Result->addIncoming(NanLiteral, NotZero); |
| |
| // if (fabs(V) == infinity) return FP_INFINITY |
| Builder.SetInsertPoint(NotNan); |
| Value *VAbs = EmitFAbs(*this, V); |
| Value *IsInf = |
| Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()), |
| "isinf"); |
| Value *InfLiteral = EmitScalarExpr(E->getArg(1)); |
| BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn); |
| Builder.CreateCondBr(IsInf, End, NotInf); |
| Result->addIncoming(InfLiteral, NotNan); |
| |
| // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL |
| Builder.SetInsertPoint(NotInf); |
| APFloat Smallest = APFloat::getSmallestNormalized( |
| getContext().getFloatTypeSemantics(E->getArg(5)->getType())); |
| Value *IsNormal = |
| Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest), |
| "isnormal"); |
| Value *NormalResult = |
| Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)), |
| EmitScalarExpr(E->getArg(3))); |
| Builder.CreateBr(End); |
| Result->addIncoming(NormalResult, NotInf); |
| |
| // return Result |
| Builder.SetInsertPoint(End); |
| return RValue::get(Result); |
| } |
| |
| case Builtin::BIalloca: |
| case Builtin::BI_alloca: |
| case Builtin::BI__builtin_alloca: { |
| Value *Size = EmitScalarExpr(E->getArg(0)); |
| return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size)); |
| } |
| case Builtin::BIbzero: |
| case Builtin::BI__builtin_bzero: { |
| Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
| Value *SizeVal = EmitScalarExpr(E->getArg(1)); |
| EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
| E->getArg(0)->getExprLoc(), FD, 0); |
| Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false); |
| return RValue::get(Dest.getPointer()); |
| } |
| case Builtin::BImemcpy: |
| case Builtin::BI__builtin_memcpy: { |
| Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
| Address Src = EmitPointerWithAlignment(E->getArg(1)); |
| Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
| EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
| E->getArg(0)->getExprLoc(), FD, 0); |
| EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), |
| E->getArg(1)->getExprLoc(), FD, 1); |
| Builder.CreateMemCpy(Dest, Src, SizeVal, false); |
| return RValue::get(Dest.getPointer()); |
| } |
| |
| case Builtin::BI__builtin___memcpy_chk: { |
| // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2. |
| llvm::APSInt Size, DstSize; |
| if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || |
| !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) |
| break; |
| if (Size.ugt(DstSize)) |
| break; |
| Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
| Address Src = EmitPointerWithAlignment(E->getArg(1)); |
| Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
| Builder.CreateMemCpy(Dest, Src, SizeVal, false); |
| return RValue::get(Dest.getPointer()); |
| } |
| |
| case Builtin::BI__builtin_objc_memmove_collectable: { |
| Address DestAddr = EmitPointerWithAlignment(E->getArg(0)); |
| Address SrcAddr = EmitPointerWithAlignment(E->getArg(1)); |
| Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
| CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, |
| DestAddr, SrcAddr, SizeVal); |
| return RValue::get(DestAddr.getPointer()); |
| } |
| |
| case Builtin::BI__builtin___memmove_chk: { |
| // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2. |
| llvm::APSInt Size, DstSize; |
| if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || |
| !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) |
| break; |
| if (Size.ugt(DstSize)) |
| break; |
| Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
| Address Src = EmitPointerWithAlignment(E->getArg(1)); |
| Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
| Builder.CreateMemMove(Dest, Src, SizeVal, false); |
| return RValue::get(Dest.getPointer()); |
| } |
| |
| case Builtin::BImemmove: |
| case Builtin::BI__builtin_memmove: { |
| Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
| Address Src = EmitPointerWithAlignment(E->getArg(1)); |
| Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
| EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
| E->getArg(0)->getExprLoc(), FD, 0); |
| EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), |
| E->getArg(1)->getExprLoc(), FD, 1); |
| Builder.CreateMemMove(Dest, Src, SizeVal, false); |
| return RValue::get(Dest.getPointer()); |
| } |
| case Builtin::BImemset: |
| case Builtin::BI__builtin_memset: { |
| Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
| Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), |
| Builder.getInt8Ty()); |
| Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
| EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
| E->getArg(0)->getExprLoc(), FD, 0); |
| Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); |
| return RValue::get(Dest.getPointer()); |
| } |
| case Builtin::BI__builtin___memset_chk: { |
| // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. |
| llvm::APSInt Size, DstSize; |
| if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || |
| !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) |
| break; |
| if (Size.ugt(DstSize)) |
| break; |
| Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
| Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), |
| Builder.getInt8Ty()); |
| Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
| Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); |
| return RValue::get(Dest.getPointer()); |
| } |
| case Builtin::BI__builtin_dwarf_cfa: { |
| // The offset in bytes from the first argument to the CFA. |
| // |
| // Why on earth is this in the frontend? Is there any reason at |
| // all that the backend can't reasonably determine this while |
| // lowering llvm.eh.dwarf.cfa()? |
| // |
| // TODO: If there's a satisfactory reason, add a target hook for |
| // this instead of hard-coding 0, which is correct for most targets. |
| int32_t Offset = 0; |
| |
| Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa); |
| return RValue::get(Builder.CreateCall(F, |
| llvm::ConstantInt::get(Int32Ty, Offset))); |
| } |
| case Builtin::BI__builtin_return_address: { |
| Value *Depth = |
| CGM.EmitConstantExpr(E->getArg(0), getContext().UnsignedIntTy, this); |
| Value *F = CGM.getIntrinsic(Intrinsic::returnaddress); |
| return RValue::get(Builder.CreateCall(F, Depth)); |
| } |
| case Builtin::BI__builtin_frame_address: { |
| Value *Depth = |
| CGM.EmitConstantExpr(E->getArg(0), getContext().UnsignedIntTy, this); |
| Value *F = CGM.getIntrinsic(Intrinsic::frameaddress); |
| return RValue::get(Builder.CreateCall(F, Depth)); |
| } |
| case Builtin::BI__builtin_extract_return_addr: { |
| Value *Address = EmitScalarExpr(E->getArg(0)); |
| Value *Result = getTargetHooks().decodeReturnAddress(*this, Address); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_frob_return_addr: { |
| Value *Address = EmitScalarExpr(E->getArg(0)); |
| Value *Result = getTargetHooks().encodeReturnAddress(*this, Address); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_dwarf_sp_column: { |
| llvm::IntegerType *Ty |
| = cast<llvm::IntegerType>(ConvertType(E->getType())); |
| int Column = getTargetHooks().getDwarfEHStackPointer(CGM); |
| if (Column == -1) { |
| CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column"); |
| return RValue::get(llvm::UndefValue::get(Ty)); |
| } |
| return RValue::get(llvm::ConstantInt::get(Ty, Column, true)); |
| } |
| case Builtin::BI__builtin_init_dwarf_reg_size_table: { |
| Value *Address = EmitScalarExpr(E->getArg(0)); |
| if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address)) |
| CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table"); |
| return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); |
| } |
| case Builtin::BI__builtin_eh_return: { |
| Value *Int = EmitScalarExpr(E->getArg(0)); |
| Value *Ptr = EmitScalarExpr(E->getArg(1)); |
| |
| llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType()); |
| assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && |
| "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"); |
| Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32 |
| ? Intrinsic::eh_return_i32 |
| : Intrinsic::eh_return_i64); |
| Builder.CreateCall(F, {Int, Ptr}); |
| Builder.CreateUnreachable(); |
| |
| // We do need to preserve an insertion point. |
| EmitBlock(createBasicBlock("builtin_eh_return.cont")); |
| |
| return RValue::get(nullptr); |
| } |
| case Builtin::BI__builtin_unwind_init: { |
| Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init); |
| return RValue::get(Builder.CreateCall(F)); |
| } |
| case Builtin::BI__builtin_extend_pointer: { |
| // Extends a pointer to the size of an _Unwind_Word, which is |
| // uint64_t on all platforms. Generally this gets poked into a |
| // register and eventually used as an address, so if the |
| // addressing registers are wider than pointers and the platform |
| // doesn't implicitly ignore high-order bits when doing |
| // addressing, we need to make sure we zext / sext based on |
| // the platform's expectations. |
| // |
| // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html |
| |
| // Cast the pointer to intptr_t. |
| Value *Ptr = EmitScalarExpr(E->getArg(0)); |
| Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast"); |
| |
| // If that's 64 bits, we're done. |
| if (IntPtrTy->getBitWidth() == 64) |
| return RValue::get(Result); |
| |
| // Otherwise, ask the codegen data what to do. |
| if (getTargetHooks().extendPointerWithSExt()) |
| return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext")); |
| else |
| return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext")); |
| } |
| case Builtin::BI__builtin_setjmp: { |
| // Buffer is a void**. |
| Address Buf = EmitPointerWithAlignment(E->getArg(0)); |
| |
| // Store the frame pointer to the setjmp buffer. |
| Value *FrameAddr = |
| Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), |
| ConstantInt::get(Int32Ty, 0)); |
| Builder.CreateStore(FrameAddr, Buf); |
| |
| // Store the stack pointer to the setjmp buffer. |
| Value *StackAddr = |
| Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); |
| Address StackSaveSlot = |
| Builder.CreateConstInBoundsGEP(Buf, 2, getPointerSize()); |
| Builder.CreateStore(StackAddr, StackSaveSlot); |
| |
| // Call LLVM's EH setjmp, which is lightweight. |
| Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp); |
| Buf = Builder.CreateBitCast(Buf, Int8PtrTy); |
| return RValue::get(Builder.CreateCall(F, Buf.getPointer())); |
| } |
| case Builtin::BI__builtin_longjmp: { |
| Value *Buf = EmitScalarExpr(E->getArg(0)); |
| Buf = Builder.CreateBitCast(Buf, Int8PtrTy); |
| |
| // Call LLVM's EH longjmp, which is lightweight. |
| Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf); |
| |
| // longjmp doesn't return; mark this as unreachable. |
| Builder.CreateUnreachable(); |
| |
| // We do need to preserve an insertion point. |
| EmitBlock(createBasicBlock("longjmp.cont")); |
| |
| return RValue::get(nullptr); |
| } |
| case Builtin::BI__sync_fetch_and_add: |
| case Builtin::BI__sync_fetch_and_sub: |
| case Builtin::BI__sync_fetch_and_or: |
| case Builtin::BI__sync_fetch_and_and: |
| case Builtin::BI__sync_fetch_and_xor: |
| case Builtin::BI__sync_fetch_and_nand: |
| case Builtin::BI__sync_add_and_fetch: |
| case Builtin::BI__sync_sub_and_fetch: |
| case Builtin::BI__sync_and_and_fetch: |
| case Builtin::BI__sync_or_and_fetch: |
| case Builtin::BI__sync_xor_and_fetch: |
| case Builtin::BI__sync_nand_and_fetch: |
| case Builtin::BI__sync_val_compare_and_swap: |
| case Builtin::BI__sync_bool_compare_and_swap: |
| case Builtin::BI__sync_lock_test_and_set: |
| case Builtin::BI__sync_lock_release: |
| case Builtin::BI__sync_swap: |
| llvm_unreachable("Shouldn't make it through sema"); |
| case Builtin::BI__sync_fetch_and_add_1: |
| case Builtin::BI__sync_fetch_and_add_2: |
| case Builtin::BI__sync_fetch_and_add_4: |
| case Builtin::BI__sync_fetch_and_add_8: |
| case Builtin::BI__sync_fetch_and_add_16: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E); |
| case Builtin::BI__sync_fetch_and_sub_1: |
| case Builtin::BI__sync_fetch_and_sub_2: |
| case Builtin::BI__sync_fetch_and_sub_4: |
| case Builtin::BI__sync_fetch_and_sub_8: |
| case Builtin::BI__sync_fetch_and_sub_16: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E); |
| case Builtin::BI__sync_fetch_and_or_1: |
| case Builtin::BI__sync_fetch_and_or_2: |
| case Builtin::BI__sync_fetch_and_or_4: |
| case Builtin::BI__sync_fetch_and_or_8: |
| case Builtin::BI__sync_fetch_and_or_16: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E); |
| case Builtin::BI__sync_fetch_and_and_1: |
| case Builtin::BI__sync_fetch_and_and_2: |
| case Builtin::BI__sync_fetch_and_and_4: |
| case Builtin::BI__sync_fetch_and_and_8: |
| case Builtin::BI__sync_fetch_and_and_16: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E); |
| case Builtin::BI__sync_fetch_and_xor_1: |
| case Builtin::BI__sync_fetch_and_xor_2: |
| case Builtin::BI__sync_fetch_and_xor_4: |
| case Builtin::BI__sync_fetch_and_xor_8: |
| case Builtin::BI__sync_fetch_and_xor_16: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E); |
| case Builtin::BI__sync_fetch_and_nand_1: |
| case Builtin::BI__sync_fetch_and_nand_2: |
| case Builtin::BI__sync_fetch_and_nand_4: |
| case Builtin::BI__sync_fetch_and_nand_8: |
| case Builtin::BI__sync_fetch_and_nand_16: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E); |
| |
| // Clang extensions: not overloaded yet. |
| case Builtin::BI__sync_fetch_and_min: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E); |
| case Builtin::BI__sync_fetch_and_max: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E); |
| case Builtin::BI__sync_fetch_and_umin: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E); |
| case Builtin::BI__sync_fetch_and_umax: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E); |
| |
| case Builtin::BI__sync_add_and_fetch_1: |
| case Builtin::BI__sync_add_and_fetch_2: |
| case Builtin::BI__sync_add_and_fetch_4: |
| case Builtin::BI__sync_add_and_fetch_8: |
| case Builtin::BI__sync_add_and_fetch_16: |
| return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E, |
| llvm::Instruction::Add); |
| case Builtin::BI__sync_sub_and_fetch_1: |
| case Builtin::BI__sync_sub_and_fetch_2: |
| case Builtin::BI__sync_sub_and_fetch_4: |
| case Builtin::BI__sync_sub_and_fetch_8: |
| case Builtin::BI__sync_sub_and_fetch_16: |
| return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E, |
| llvm::Instruction::Sub); |
| case Builtin::BI__sync_and_and_fetch_1: |
| case Builtin::BI__sync_and_and_fetch_2: |
| case Builtin::BI__sync_and_and_fetch_4: |
| case Builtin::BI__sync_and_and_fetch_8: |
| case Builtin::BI__sync_and_and_fetch_16: |
| return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E, |
| llvm::Instruction::And); |
| case Builtin::BI__sync_or_and_fetch_1: |
| case Builtin::BI__sync_or_and_fetch_2: |
| case Builtin::BI__sync_or_and_fetch_4: |
| case Builtin::BI__sync_or_and_fetch_8: |
| case Builtin::BI__sync_or_and_fetch_16: |
| return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E, |
| llvm::Instruction::Or); |
| case Builtin::BI__sync_xor_and_fetch_1: |
| case Builtin::BI__sync_xor_and_fetch_2: |
| case Builtin::BI__sync_xor_and_fetch_4: |
| case Builtin::BI__sync_xor_and_fetch_8: |
| case Builtin::BI__sync_xor_and_fetch_16: |
| return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E, |
| llvm::Instruction::Xor); |
| case Builtin::BI__sync_nand_and_fetch_1: |
| case Builtin::BI__sync_nand_and_fetch_2: |
| case Builtin::BI__sync_nand_and_fetch_4: |
| case Builtin::BI__sync_nand_and_fetch_8: |
| case Builtin::BI__sync_nand_and_fetch_16: |
| return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E, |
| llvm::Instruction::And, true); |
| |
| case Builtin::BI__sync_val_compare_and_swap_1: |
| case Builtin::BI__sync_val_compare_and_swap_2: |
| case Builtin::BI__sync_val_compare_and_swap_4: |
| case Builtin::BI__sync_val_compare_and_swap_8: |
| case Builtin::BI__sync_val_compare_and_swap_16: |
| return RValue::get(MakeAtomicCmpXchgValue(*this, E, false)); |
| |
| case Builtin::BI__sync_bool_compare_and_swap_1: |
| case Builtin::BI__sync_bool_compare_and_swap_2: |
| case Builtin::BI__sync_bool_compare_and_swap_4: |
| case Builtin::BI__sync_bool_compare_and_swap_8: |
| case Builtin::BI__sync_bool_compare_and_swap_16: |
| return RValue::get(MakeAtomicCmpXchgValue(*this, E, true)); |
| |
| case Builtin::BI__sync_swap_1: |
| case Builtin::BI__sync_swap_2: |
| case Builtin::BI__sync_swap_4: |
| case Builtin::BI__sync_swap_8: |
| case Builtin::BI__sync_swap_16: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); |
| |
| case Builtin::BI__sync_lock_test_and_set_1: |
| case Builtin::BI__sync_lock_test_and_set_2: |
| case Builtin::BI__sync_lock_test_and_set_4: |
| case Builtin::BI__sync_lock_test_and_set_8: |
| case Builtin::BI__sync_lock_test_and_set_16: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); |
| |
| case Builtin::BI__sync_lock_release_1: |
| case Builtin::BI__sync_lock_release_2: |
| case Builtin::BI__sync_lock_release_4: |
| case Builtin::BI__sync_lock_release_8: |
| case Builtin::BI__sync_lock_release_16: { |
| Value *Ptr = EmitScalarExpr(E->getArg(0)); |
| QualType ElTy = E->getArg(0)->getType()->getPointeeType(); |
| CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy); |
| llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(), |
| StoreSize.getQuantity() * 8); |
| Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo()); |
| llvm::StoreInst *Store = |
| Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr, |
| StoreSize); |
| Store->setAtomic(llvm::AtomicOrdering::Release); |
| return RValue::get(nullptr); |
| } |
| |
| case Builtin::BI__sync_synchronize: { |
| // We assume this is supposed to correspond to a C++0x-style |
| // sequentially-consistent fence (i.e. this is only usable for |
| // synchonization, not device I/O or anything like that). This intrinsic |
| // is really badly designed in the sense that in theory, there isn't |
| // any way to safely use it... but in practice, it mostly works |
| // to use it with non-atomic loads and stores to get acquire/release |
| // semantics. |
| Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent); |
| return RValue::get(nullptr); |
| } |
| |
| case Builtin::BI__builtin_nontemporal_load: |
| return RValue::get(EmitNontemporalLoad(*this, E)); |
| case Builtin::BI__builtin_nontemporal_store: |
| return RValue::get(EmitNontemporalStore(*this, E)); |
| case Builtin::BI__c11_atomic_is_lock_free: |
| case Builtin::BI__atomic_is_lock_free: { |
| // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the |
| // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since |
| // _Atomic(T) is always properly-aligned. |
| const char *LibCallName = "__atomic_is_lock_free"; |
| CallArgList Args; |
| Args.add(RValue::get(EmitScalarExpr(E->getArg(0))), |
| getContext().getSizeType()); |
| if (BuiltinID == Builtin::BI__atomic_is_lock_free) |
| Args.add(RValue::get(EmitScalarExpr(E->getArg(1))), |
| getContext().VoidPtrTy); |
| else |
| Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)), |
| getContext().VoidPtrTy); |
| const CGFunctionInfo &FuncInfo = |
| CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args); |
| llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo); |
| llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName); |
| return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args); |
| } |
| |
| case Builtin::BI__atomic_test_and_set: { |
| // Look at the argument type to determine whether this is a volatile |
| // operation. The parameter type is always volatile. |
| QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); |
| bool Volatile = |
| PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
| |
| Value *Ptr = EmitScalarExpr(E->getArg(0)); |
| unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace(); |
| Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); |
| Value *NewVal = Builder.getInt8(1); |
| Value *Order = EmitScalarExpr(E->getArg(1)); |
| if (isa<llvm::ConstantInt>(Order)) { |
| int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
| AtomicRMWInst *Result = nullptr; |
| switch (ord) { |
| case 0: // memory_order_relaxed |
| default: // invalid order |
| Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
| llvm::AtomicOrdering::Monotonic); |
| break; |
| case 1: // memory_order_consume |
| case 2: // memory_order_acquire |
| Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
| llvm::AtomicOrdering::Acquire); |
| break; |
| case 3: // memory_order_release |
| Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
| llvm::AtomicOrdering::Release); |
| break; |
| case 4: // memory_order_acq_rel |
| |
| Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
| llvm::AtomicOrdering::AcquireRelease); |
| break; |
| case 5: // memory_order_seq_cst |
| Result = Builder.CreateAtomicRMW( |
| llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
| llvm::AtomicOrdering::SequentiallyConsistent); |
| break; |
| } |
| Result->setVolatile(Volatile); |
| return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); |
| } |
| |
| llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
| |
| llvm::BasicBlock *BBs[5] = { |
| createBasicBlock("monotonic", CurFn), |
| createBasicBlock("acquire", CurFn), |
| createBasicBlock("release", CurFn), |
| createBasicBlock("acqrel", CurFn), |
| createBasicBlock("seqcst", CurFn) |
| }; |
| llvm::AtomicOrdering Orders[5] = { |
| llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire, |
| llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease, |
| llvm::AtomicOrdering::SequentiallyConsistent}; |
| |
| Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
| llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); |
| |
| Builder.SetInsertPoint(ContBB); |
| PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set"); |
| |
| for (unsigned i = 0; i < 5; ++i) { |
| Builder.SetInsertPoint(BBs[i]); |
| AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, |
| Ptr, NewVal, Orders[i]); |
| RMW->setVolatile(Volatile); |
| Result->addIncoming(RMW, BBs[i]); |
| Builder.CreateBr(ContBB); |
| } |
| |
| SI->addCase(Builder.getInt32(0), BBs[0]); |
| SI->addCase(Builder.getInt32(1), BBs[1]); |
| SI->addCase(Builder.getInt32(2), BBs[1]); |
| SI->addCase(Builder.getInt32(3), BBs[2]); |
| SI->addCase(Builder.getInt32(4), BBs[3]); |
| SI->addCase(Builder.getInt32(5), BBs[4]); |
| |
| Builder.SetInsertPoint(ContBB); |
| return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); |
| } |
| |
| case Builtin::BI__atomic_clear: { |
| QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); |
| bool Volatile = |
| PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
| |
| Address Ptr = EmitPointerWithAlignment(E->getArg(0)); |
| unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace(); |
| Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); |
| Value *NewVal = Builder.getInt8(0); |
| Value *Order = EmitScalarExpr(E->getArg(1)); |
| if (isa<llvm::ConstantInt>(Order)) { |
| int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
| StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); |
| switch (ord) { |
| case 0: // memory_order_relaxed |
| default: // invalid order |
| Store->setOrdering(llvm::AtomicOrdering::Monotonic); |
| break; |
| case 3: // memory_order_release |
| Store->setOrdering(llvm::AtomicOrdering::Release); |
| break; |
| case 5: // memory_order_seq_cst |
| Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent); |
| break; |
| } |
| return RValue::get(nullptr); |
| } |
| |
| llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
| |
| llvm::BasicBlock *BBs[3] = { |
| createBasicBlock("monotonic", CurFn), |
| createBasicBlock("release", CurFn), |
| createBasicBlock("seqcst", CurFn) |
| }; |
| llvm::AtomicOrdering Orders[3] = { |
| llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release, |
| llvm::AtomicOrdering::SequentiallyConsistent}; |
| |
| Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
| llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); |
| |
| for (unsigned i = 0; i < 3; ++i) { |
| Builder.SetInsertPoint(BBs[i]); |
| StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); |
| Store->setOrdering(Orders[i]); |
| Builder.CreateBr(ContBB); |
| } |
| |
| SI->addCase(Builder.getInt32(0), BBs[0]); |
| SI->addCase(Builder.getInt32(3), BBs[1]); |
| SI->addCase(Builder.getInt32(5), BBs[2]); |
| |
| Builder.SetInsertPoint(ContBB); |
| return RValue::get(nullptr); |
| } |
| |
| case Builtin::BI__atomic_thread_fence: |
| case Builtin::BI__atomic_signal_fence: |
| case Builtin::BI__c11_atomic_thread_fence: |
| case Builtin::BI__c11_atomic_signal_fence: { |
| llvm::SynchronizationScope Scope; |
| if (BuiltinID == Builtin::BI__atomic_signal_fence || |
| BuiltinID == Builtin::BI__c11_atomic_signal_fence) |
| Scope = llvm::SingleThread; |
| else |
| Scope = llvm::CrossThread; |
| Value *Order = EmitScalarExpr(E->getArg(0)); |
| if (isa<llvm::ConstantInt>(Order)) { |
| int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
| switch (ord) { |
| case 0: // memory_order_relaxed |
| default: // invalid order |
| break; |
| case 1: // memory_order_consume |
| case 2: // memory_order_acquire |
| Builder.CreateFence(llvm::AtomicOrdering::Acquire, Scope); |
| break; |
| case 3: // memory_order_release |
| Builder.CreateFence(llvm::AtomicOrdering::Release, Scope); |
| break; |
| case 4: // memory_order_acq_rel |
| Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, Scope); |
| break; |
| case 5: // memory_order_seq_cst |
| Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, |
| Scope); |
| break; |
| } |
| return RValue::get(nullptr); |
| } |
| |
| llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB; |
| AcquireBB = createBasicBlock("acquire", CurFn); |
| ReleaseBB = createBasicBlock("release", CurFn); |
| AcqRelBB = createBasicBlock("acqrel", CurFn); |
| SeqCstBB = createBasicBlock("seqcst", CurFn); |
| llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
| |
| Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
| llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB); |
| |
| Builder.SetInsertPoint(AcquireBB); |
| Builder.CreateFence(llvm::AtomicOrdering::Acquire, Scope); |
| Builder.CreateBr(ContBB); |
| SI->addCase(Builder.getInt32(1), AcquireBB); |
| SI->addCase(Builder.getInt32(2), AcquireBB); |
| |
| Builder.SetInsertPoint(ReleaseBB); |
| Builder.CreateFence(llvm::AtomicOrdering::Release, Scope); |
| Builder.CreateBr(ContBB); |
| SI->addCase(Builder.getInt32(3), ReleaseBB); |
| |
| Builder.SetInsertPoint(AcqRelBB); |
| Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, Scope); |
| Builder.CreateBr(ContBB); |
| SI->addCase(Builder.getInt32(4), AcqRelBB); |
| |
| Builder.SetInsertPoint(SeqCstBB); |
| Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, Scope); |
| Builder.CreateBr(ContBB); |
| SI->addCase(Builder.getInt32(5), SeqCstBB); |
| |
| Builder.SetInsertPoint(ContBB); |
| return RValue::get(nullptr); |
| } |
| |
| // Library functions with special handling. |
| case Builtin::BIsqrt: |
| case Builtin::BIsqrtf: |
| case Builtin::BIsqrtl: { |
| // Transform a call to sqrt* into a @llvm.sqrt.* intrinsic call, but only |
| // in finite- or unsafe-math mode (the intrinsic has different semantics |
| // for handling negative numbers compared to the library function, so |
| // -fmath-errno=0 is not enough). |
| if (!FD->hasAttr<ConstAttr>()) |
| break; |
| if (!(CGM.getCodeGenOpts().UnsafeFPMath || |
| CGM.getCodeGenOpts().NoNaNsFPMath)) |
| break; |
| Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
| llvm::Type *ArgType = Arg0->getType(); |
| Value *F = CGM.getIntrinsic(Intrinsic::sqrt, ArgType); |
| return RValue::get(Builder.CreateCall(F, Arg0)); |
| } |
| |
| case Builtin::BI__builtin_pow: |
| case Builtin::BI__builtin_powf: |
| case Builtin::BI__builtin_powl: |
| case Builtin::BIpow: |
| case Builtin::BIpowf: |
| case Builtin::BIpowl: { |
| // Transform a call to pow* into a @llvm.pow.* intrinsic call. |
| if (!FD->hasAttr<ConstAttr>()) |
| break; |
| Value *Base = EmitScalarExpr(E->getArg(0)); |
| Value *Exponent = EmitScalarExpr(E->getArg(1)); |
| llvm::Type *ArgType = Base->getType(); |
| Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType); |
| return RValue::get(Builder.CreateCall(F, {Base, Exponent})); |
| } |
| |
| case Builtin::BIfma: |
| case Builtin::BIfmaf: |
| case Builtin::BIfmal: |
| case Builtin::BI__builtin_fma: |
| case Builtin::BI__builtin_fmaf: |
| case Builtin::BI__builtin_fmal: { |
| // Rewrite fma to intrinsic. |
| Value *FirstArg = EmitScalarExpr(E->getArg(0)); |
| llvm::Type *ArgType = FirstArg->getType(); |
| Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType); |
| return RValue::get( |
| Builder.CreateCall(F, {FirstArg, EmitScalarExpr(E->getArg(1)), |
| EmitScalarExpr(E->getArg(2))})); |
| } |
| |
| case Builtin::BI__builtin_signbit: |
| case Builtin::BI__builtin_signbitf: |
| case Builtin::BI__builtin_signbitl: { |
| return RValue::get( |
| Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))), |
| ConvertType(E->getType()))); |
| } |
| case Builtin::BI__builtin_annotation: { |
| llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0)); |
| llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation, |
| AnnVal->getType()); |
| |
| // Get the annotation string, go through casts. Sema requires this to be a |
| // non-wide string literal, potentially casted, so the cast<> is safe. |
| const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts(); |
| StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString(); |
| return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc())); |
| } |
| case Builtin::BI__builtin_addcb: |
| case Builtin::BI__builtin_addcs: |
| case Builtin::BI__builtin_addc: |
| case Builtin::BI__builtin_addcl: |
| case Builtin::BI__builtin_addcll: |
| case Builtin::BI__builtin_subcb: |
| case Builtin::BI__builtin_subcs: |
| case Builtin::BI__builtin_subc: |
| case Builtin::BI__builtin_subcl: |
| case Builtin::BI__builtin_subcll: { |
| |
| // We translate all of these builtins from expressions of the form: |
| // int x = ..., y = ..., carryin = ..., carryout, result; |
| // result = __builtin_addc(x, y, carryin, &carryout); |
| // |
| // to LLVM IR of the form: |
| // |
| // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y) |
| // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0 |
| // %carry1 = extractvalue {i32, i1} %tmp1, 1 |
| // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1, |
| // i32 %carryin) |
| // %result = extractvalue {i32, i1} %tmp2, 0 |
| // %carry2 = extractvalue {i32, i1} %tmp2, 1 |
| // %tmp3 = or i1 %carry1, %carry2 |
| // %tmp4 = zext i1 %tmp3 to i32 |
| // store i32 %tmp4, i32* %carryout |
| |
| // Scalarize our inputs. |
| llvm::Value *X = EmitScalarExpr(E->getArg(0)); |
| llvm::Value *Y = EmitScalarExpr(E->getArg(1)); |
| llvm::Value *Carryin = EmitScalarExpr(E->getArg(2)); |
| Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3)); |
| |
| // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow. |
| llvm::Intrinsic::ID IntrinsicId; |
| switch (BuiltinID) { |
| default: llvm_unreachable("Unknown multiprecision builtin id."); |
| case Builtin::BI__builtin_addcb: |
| case Builtin::BI__builtin_addcs: |
| case Builtin::BI__builtin_addc: |
| case Builtin::BI__builtin_addcl: |
| case Builtin::BI__builtin_addcll: |
| IntrinsicId = llvm::Intrinsic::uadd_with_overflow; |
| break; |
| case Builtin::BI__builtin_subcb: |
| case Builtin::BI__builtin_subcs: |
| case Builtin::BI__builtin_subc: |
| case Builtin::BI__builtin_subcl: |
| case Builtin::BI__builtin_subcll: |
| IntrinsicId = llvm::Intrinsic::usub_with_overflow; |
| break; |
| } |
| |
| // Construct our resulting LLVM IR expression. |
| llvm::Value *Carry1; |
| llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId, |
| X, Y, Carry1); |
| llvm::Value *Carry2; |
| llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId, |
| Sum1, Carryin, Carry2); |
| llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2), |
| X->getType()); |
| Builder.CreateStore(CarryOut, CarryOutPtr); |
| return RValue::get(Sum2); |
| } |
| |
| case Builtin::BI__builtin_add_overflow: |
| case Builtin::BI__builtin_sub_overflow: |
| case Builtin::BI__builtin_mul_overflow: { |
| const clang::Expr *LeftArg = E->getArg(0); |
| const clang::Expr *RightArg = E->getArg(1); |
| const clang::Expr *ResultArg = E->getArg(2); |
| |
| clang::QualType ResultQTy = |
| ResultArg->getType()->castAs<PointerType>()->getPointeeType(); |
| |
| WidthAndSignedness LeftInfo = |
| getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType()); |
| WidthAndSignedness RightInfo = |
| getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType()); |
| WidthAndSignedness ResultInfo = |
| getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy); |
| WidthAndSignedness EncompassingInfo = |
| EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo}); |
| |
| llvm::Type *EncompassingLLVMTy = |
| llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width); |
| |
| llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy); |
| |
| llvm::Intrinsic::ID IntrinsicId; |
| switch (BuiltinID) { |
| default: |
| llvm_unreachable("Unknown overflow builtin id."); |
| case Builtin::BI__builtin_add_overflow: |
| IntrinsicId = EncompassingInfo.Signed |
| ? llvm::Intrinsic::sadd_with_overflow |
| : llvm::Intrinsic::uadd_with_overflow; |
| break; |
| case Builtin::BI__builtin_sub_overflow: |
| IntrinsicId = EncompassingInfo.Signed |
| ? llvm::Intrinsic::ssub_with_overflow |
| : llvm::Intrinsic::usub_with_overflow; |
| break; |
| case Builtin::BI__builtin_mul_overflow: |
| IntrinsicId = EncompassingInfo.Signed |
| ? llvm::Intrinsic::smul_with_overflow |
| : llvm::Intrinsic::umul_with_overflow; |
| break; |
| } |
| |
| llvm::Value *Left = EmitScalarExpr(LeftArg); |
| llvm::Value *Right = EmitScalarExpr(RightArg); |
| Address ResultPtr = EmitPointerWithAlignment(ResultArg); |
| |
| // Extend each operand to the encompassing type. |
| Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed); |
| Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed); |
| |
| // Perform the operation on the extended values. |
| llvm::Value *Overflow, *Result; |
| Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow); |
| |
| if (EncompassingInfo.Width > ResultInfo.Width) { |
| // The encompassing type is wider than the result type, so we need to |
| // truncate it. |
| llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy); |
| |
| // To see if the truncation caused an overflow, we will extend |
| // the result and then compare it to the original result. |
| llvm::Value *ResultTruncExt = Builder.CreateIntCast( |
| ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed); |
| llvm::Value *TruncationOverflow = |
| Builder.CreateICmpNE(Result, ResultTruncExt); |
| |
| Overflow = Builder.CreateOr(Overflow, TruncationOverflow); |
| Result = ResultTrunc; |
| } |
| |
| // Finally, store the result using the pointer. |
| bool isVolatile = |
| ResultArg->getType()->getPointeeType().isVolatileQualified(); |
| Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile); |
| |
| return RValue::get(Overflow); |
| } |
| |
| case Builtin::BI__builtin_uadd_overflow: |
| case Builtin::BI__builtin_uaddl_overflow: |
| case Builtin::BI__builtin_uaddll_overflow: |
| case Builtin::BI__builtin_usub_overflow: |
| case Builtin::BI__builtin_usubl_overflow: |
| case Builtin::BI__builtin_usubll_overflow: |
| case Builtin::BI__builtin_umul_overflow: |
| case Builtin::BI__builtin_umull_overflow: |
| case Builtin::BI__builtin_umulll_overflow: |
| case Builtin::BI__builtin_sadd_overflow: |
| case Builtin::BI__builtin_saddl_overflow: |
| case Builtin::BI__builtin_saddll_overflow: |
| case Builtin::BI__builtin_ssub_overflow: |
| case Builtin::BI__builtin_ssubl_overflow: |
| case Builtin::BI__builtin_ssubll_overflow: |
| case Builtin::BI__builtin_smul_overflow: |
| case Builtin::BI__builtin_smull_overflow: |
| case Builtin::BI__builtin_smulll_overflow: { |
| |
| // We translate all of these builtins directly to the relevant llvm IR node. |
| |
| // Scalarize our inputs. |
| llvm::Value *X = EmitScalarExpr(E->getArg(0)); |
| llvm::Value *Y = EmitScalarExpr(E->getArg(1)); |
| Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2)); |
| |
| // Decide which of the overflow intrinsics we are lowering to: |
| llvm::Intrinsic::ID IntrinsicId; |
| switch (BuiltinID) { |
| default: llvm_unreachable("Unknown overflow builtin id."); |
| case Builtin::BI__builtin_uadd_overflow: |
| case Builtin::BI__builtin_uaddl_overflow: |
| case Builtin::BI__builtin_uaddll_overflow: |
| IntrinsicId = llvm::Intrinsic::uadd_with_overflow; |
| break; |
| case Builtin::BI__builtin_usub_overflow: |
| case Builtin::BI__builtin_usubl_overflow: |
| case Builtin::BI__builtin_usubll_overflow: |
| IntrinsicId = llvm::Intrinsic::usub_with_overflow; |
| break; |
| case Builtin::BI__builtin_umul_overflow: |
| case Builtin::BI__builtin_umull_overflow: |
| case Builtin::BI__builtin_umulll_overflow: |
| IntrinsicId = llvm::Intrinsic::umul_with_overflow; |
| break; |
| case Builtin::BI__builtin_sadd_overflow: |
| case Builtin::BI__builtin_saddl_overflow: |
| case Builtin::BI__builtin_saddll_overflow: |
| IntrinsicId = llvm::Intrinsic::sadd_with_overflow; |
| break; |
| case Builtin::BI__builtin_ssub_overflow: |
| case Builtin::BI__builtin_ssubl_overflow: |
| case Builtin::BI__builtin_ssubll_overflow: |
| IntrinsicId = llvm::Intrinsic::ssub_with_overflow; |
| break; |
| case Builtin::BI__builtin_smul_overflow: |
| case Builtin::BI__builtin_smull_overflow: |
| case Builtin::BI__builtin_smulll_overflow: |
| IntrinsicId = llvm::Intrinsic::smul_with_overflow; |
| break; |
| } |
| |
| |
| llvm::Value *Carry; |
| llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry); |
| Builder.CreateStore(Sum, SumOutPtr); |
| |
| return RValue::get(Carry); |
| } |
| case Builtin::BI__builtin_addressof: |
| return RValue::get(EmitLValue(E->getArg(0)).getPointer()); |
| case Builtin::BI__builtin_operator_new: |
| return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(), |
| E->getArg(0), false); |
| case Builtin::BI__builtin_operator_delete: |
| return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(), |
| E->getArg(0), true); |
| case Builtin::BI__noop: |
| // __noop always evaluates to an integer literal zero. |
| return RValue::get(ConstantInt::get(IntTy, 0)); |
| case Builtin::BI__builtin_call_with_static_chain: { |
| const CallExpr *Call = cast<CallExpr>(E->getArg(0)); |
| const Expr *Chain = E->getArg(1); |
| return EmitCall(Call->getCallee()->getType(), |
| EmitScalarExpr(Call->getCallee()), Call, ReturnValue, |
| Call->getCalleeDecl(), EmitScalarExpr(Chain)); |
| } |
| case Builtin::BI_InterlockedExchange: |
| case Builtin::BI_InterlockedExchangePointer: |
| return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); |
| case Builtin::BI_InterlockedCompareExchangePointer: { |
| llvm::Type *RTy; |
| llvm::IntegerType *IntType = |
| IntegerType::get(getLLVMContext(), |
| getContext().getTypeSize(E->getType())); |
| llvm::Type *IntPtrType = IntType->getPointerTo(); |
| |
| llvm::Value *Destination = |
| Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType); |
| |
| llvm::Value *Exchange = EmitScalarExpr(E->getArg(1)); |
| RTy = Exchange->getType(); |
| Exchange = Builder.CreatePtrToInt(Exchange, IntType); |
| |
| llvm::Value *Comparand = |
| Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType); |
| |
| auto Result = |
| Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange, |
| AtomicOrdering::SequentiallyConsistent, |
| AtomicOrdering::SequentiallyConsistent); |
| Result->setVolatile(true); |
| |
| return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result, |
| 0), |
| RTy)); |
| } |
| case Builtin::BI_InterlockedCompareExchange: { |
| AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg( |
| EmitScalarExpr(E->getArg(0)), |
| EmitScalarExpr(E->getArg(2)), |
| EmitScalarExpr(E->getArg(1)), |
| AtomicOrdering::SequentiallyConsistent, |
| AtomicOrdering::SequentiallyConsistent); |
| CXI->setVolatile(true); |
| return RValue::get(Builder.CreateExtractValue(CXI, 0)); |
| } |
| case Builtin::BI_InterlockedIncrement: { |
| llvm::Type *IntTy = ConvertType(E->getType()); |
| AtomicRMWInst *RMWI = Builder.CreateAtomicRMW( |
| AtomicRMWInst::Add, |
| EmitScalarExpr(E->getArg(0)), |
| ConstantInt::get(IntTy, 1), |
| llvm::AtomicOrdering::SequentiallyConsistent); |
| RMWI->setVolatile(true); |
| return RValue::get(Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1))); |
| } |
| case Builtin::BI_InterlockedDecrement: { |
| llvm::Type *IntTy = ConvertType(E->getType()); |
| AtomicRMWInst *RMWI = Builder.CreateAtomicRMW( |
| AtomicRMWInst::Sub, |
| EmitScalarExpr(E->getArg(0)), |
| ConstantInt::get(IntTy, 1), |
| llvm::AtomicOrdering::SequentiallyConsistent); |
| RMWI->setVolatile(true); |
| return RValue::get(Builder.CreateSub(RMWI, ConstantInt::get(IntTy, 1))); |
| } |
| case Builtin::BI_InterlockedExchangeAdd: { |
| AtomicRMWInst *RMWI = Builder.CreateAtomicRMW( |
| AtomicRMWInst::Add, |
| EmitScalarExpr(E->getArg(0)), |
| EmitScalarExpr(E->getArg(1)), |
| llvm::AtomicOrdering::SequentiallyConsistent); |
| RMWI->setVolatile(true); |
| return RValue::get(RMWI); |
| } |
| case Builtin::BI__readfsdword: { |
| llvm::Type *IntTy = ConvertType(E->getType()); |
| Value *IntToPtr = |
| Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)), |
| llvm::PointerType::get(IntTy, 257)); |
| LoadInst *Load = |
| Builder.CreateDefaultAlignedLoad(IntToPtr, /*isVolatile=*/true); |
| return RValue::get(Load); |
| } |
| |
| case Builtin::BI__exception_code: |
| case Builtin::BI_exception_code: |
| return RValue::get(EmitSEHExceptionCode()); |
| case Builtin::BI__exception_info: |
| case Builtin::BI_exception_info: |
| return RValue::get(EmitSEHExceptionInfo()); |
| case Builtin::BI__abnormal_termination: |
| case Builtin::BI_abnormal_termination: |
| return RValue::get(EmitSEHAbnormalTermination()); |
| case Builtin::BI_setjmpex: { |
| if (getTarget().getTriple().isOSMSVCRT()) { |
| llvm::Type *ArgTypes[] = {Int8PtrTy, Int8PtrTy}; |
| llvm::AttributeSet ReturnsTwiceAttr = |
| AttributeSet::get(getLLVMContext(), llvm::AttributeSet::FunctionIndex, |
| llvm::Attribute::ReturnsTwice); |
| llvm::Constant *SetJmpEx = CGM.CreateRuntimeFunction( |
| llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false), |
| "_setjmpex", ReturnsTwiceAttr); |
| llvm::Value *Buf = Builder.CreateBitOrPointerCast( |
| EmitScalarExpr(E->getArg(0)), Int8PtrTy); |
| llvm::Value *FrameAddr = |
| Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), |
| ConstantInt::get(Int32Ty, 0)); |
| llvm::Value *Args[] = {Buf, FrameAddr}; |
| llvm::CallSite CS = EmitRuntimeCallOrInvoke(SetJmpEx, Args); |
| CS.setAttributes(ReturnsTwiceAttr); |
| return RValue::get(CS.getInstruction()); |
| } |
| break; |
| } |
| case Builtin::BI_setjmp: { |
| if (getTarget().getTriple().isOSMSVCRT()) { |
| llvm::AttributeSet ReturnsTwiceAttr = |
| AttributeSet::get(getLLVMContext(), llvm::AttributeSet::FunctionIndex, |
| llvm::Attribute::ReturnsTwice); |
| llvm::Value *Buf = Builder.CreateBitOrPointerCast( |
| EmitScalarExpr(E->getArg(0)), Int8PtrTy); |
| llvm::CallSite CS; |
| if (getTarget().getTriple().getArch() == llvm::Triple::x86) { |
| llvm::Type *ArgTypes[] = {Int8PtrTy, IntTy}; |
| llvm::Constant *SetJmp3 = CGM.CreateRuntimeFunction( |
| llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/true), |
| "_setjmp3", ReturnsTwiceAttr); |
| llvm::Value *Count = ConstantInt::get(IntTy, 0); |
| llvm::Value *Args[] = {Buf, Count}; |
| CS = EmitRuntimeCallOrInvoke(SetJmp3, Args); |
| } else { |
| llvm::Type *ArgTypes[] = {Int8PtrTy, Int8PtrTy}; |
| llvm::Constant *SetJmp = CGM.CreateRuntimeFunction( |
| llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false), |
| "_setjmp", ReturnsTwiceAttr); |
| llvm::Value *FrameAddr = |
| Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), |
| ConstantInt::get(Int32Ty, 0)); |
| llvm::Value *Args[] = {Buf, FrameAddr}; |
| CS = EmitRuntimeCallOrInvoke(SetJmp, Args); |
| } |
| CS.setAttributes(ReturnsTwiceAttr); |
| return RValue::get(CS.getInstruction()); |
| } |
| break; |
| } |
| |
| case Builtin::BI__GetExceptionInfo: { |
| if (llvm::GlobalVariable *GV = |
| CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType())) |
| return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy)); |
| break; |
| } |
| case Builtin::BI__builtin_os_log_format: { |
| assert(E->getNumArgs() >= 2 && |
| "__builtin_os_log_format takes at least 2 arguments"); |
| analyze_os_log::OSLogBufferLayout Layout; |
| analyze_os_log::computeOSLogBufferLayout(CGM.getContext(), E, Layout); |
| Address BufAddr = EmitPointerWithAlignment(E->getArg(0)); |
| // Ignore argument 1, the format string. It is not currently used. |
| CharUnits offset; |
| Builder.CreateStore( |
| Builder.getInt8(Layout.getSummaryByte()), |
| Builder.CreateConstByteGEP(BufAddr, offset++, "summary")); |
| Builder.CreateStore( |
| Builder.getInt8(Layout.getNumArgsByte()), |
| Builder.CreateConstByteGEP(BufAddr, offset++, "numArgs")); |
| |
| llvm::SmallVector<llvm::Value *, 4> RetainableOperands; |
| for (const auto &item : Layout.Items) { |
| Builder.CreateStore( |
| Builder.getInt8(item.getDescriptorByte()), |
| Builder.CreateConstByteGEP(BufAddr, offset++, "argDescriptor")); |
| Builder.CreateStore( |
| Builder.getInt8(item.getSizeByte()), |
| Builder.CreateConstByteGEP(BufAddr, offset++, "argSize")); |
| Address addr = Builder.CreateConstByteGEP(BufAddr, offset); |
| if (const Expr *expr = item.getExpr()) { |
| addr = Builder.CreateElementBitCast(addr, |
| ConvertTypeForMem(expr->getType())); |
| // Check if this is a retainable type. |
| if (expr->getType()->isObjCRetainableType()) { |
| assert(getEvaluationKind(expr->getType()) == TEK_Scalar && |
| "Only scalar can be a ObjC retainable type"); |
| llvm::Value *SV = EmitScalarExpr(expr, /*Ignore*/ false); |
| RValue RV = RValue::get(SV); |
| LValue LV = MakeAddrLValue(addr, expr->getType()); |
| EmitStoreThroughLValue(RV, LV); |
| // Check if the object is constant, if not, save it in |
| // RetainableOperands. |
| if (!isa<Constant>(SV)) |
| RetainableOperands.push_back(SV); |
| } else { |
| EmitAnyExprToMem(expr, addr, Qualifiers(), /*isInit*/true); |
| } |
| } else { |
| addr = Builder.CreateElementBitCast(addr, Int32Ty); |
| Builder.CreateStore( |
| Builder.getInt32(item.getConstValue().getQuantity()), addr); |
| } |
| offset += item.getSize(); |
| } |
| |
| // Push a clang.arc.use cleanup for each object in RetainableOperands. The |
| // cleanup will cause the use to appear after the final log call, keeping |
| // the object valid while it’s held in the log buffer. Note that if there’s |
| // a release cleanup on the object, it will already be active; since |
| // cleanups are emitted in reverse order, the use will occur before the |
| // object is released. |
| if (!RetainableOperands.empty() && getLangOpts().ObjCAutoRefCount && |
| CGM.getCodeGenOpts().OptimizationLevel != 0) |
| for (llvm::Value *object : RetainableOperands) |
| pushFullExprCleanup<CallObjCArcUse>(getARCCleanupKind(), object); |
| |
| return RValue::get(BufAddr.getPointer()); |
| } |
| |
| case Builtin::BI__builtin_os_log_format_buffer_size: { |
| analyze_os_log::OSLogBufferLayout Layout; |
| analyze_os_log::computeOSLogBufferLayout(CGM.getContext(), E, Layout); |
| return RValue::get(ConstantInt::get(ConvertType(E->getType()), |
| Layout.getSize().getQuantity())); |
| } |
| |
| // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions |
| case Builtin::BIread_pipe: |
| case Builtin::BIwrite_pipe: { |
| Value *Arg0 = EmitScalarExpr(E->getArg(0)), |
| *Arg1 = EmitScalarExpr(E->getArg(1)); |
| |
| // Type of the generic packet parameter. |
| unsigned GenericAS = |
| getContext().getTargetAddressSpace(LangAS::opencl_generic); |
| llvm::Type *I8PTy = llvm::PointerType::get( |
| llvm::Type::getInt8Ty(getLLVMContext()), GenericAS); |
| |
| // Testing which overloaded version we should generate the call for. |
| if (2U == E->getNumArgs()) { |
| const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2" |
| : "__write_pipe_2"; |
| // Creating a generic function type to be able to call with any builtin or |
| // user defined type. |
| llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy}; |
| llvm::FunctionType *FTy = llvm::FunctionType::get( |
| Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
| Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy); |
| return RValue::get(Builder.CreateCall( |
| CGM.CreateRuntimeFunction(FTy, Name), {Arg0, BCast})); |
| } else { |
| assert(4 == E->getNumArgs() && |
| "Illegal number of parameters to pipe function"); |
| const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4" |
| : "__write_pipe_4"; |
| |
| llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy}; |
| Value *Arg2 = EmitScalarExpr(E->getArg(2)), |
| *Arg3 = EmitScalarExpr(E->getArg(3)); |
| llvm::FunctionType *FTy = llvm::FunctionType::get( |
| Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
| Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy); |
| // We know the third argument is an integer type, but we may need to cast |
| // it to i32. |
| if (Arg2->getType() != Int32Ty) |
| Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty); |
| return RValue::get(Builder.CreateCall( |
| CGM.CreateRuntimeFunction(FTy, Name), {Arg0, Arg1, Arg2, BCast})); |
| } |
| } |
| // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write |
| // functions |
| case Builtin::BIreserve_read_pipe: |
| case Builtin::BIreserve_write_pipe: |
| case Builtin::BIwork_group_reserve_read_pipe: |
| case Builtin::BIwork_group_reserve_write_pipe: |
| case Builtin::BIsub_group_reserve_read_pipe: |
| case Builtin::BIsub_group_reserve_write_pipe: { |
| // Composing the mangled name for the function. |
| const char *Name; |
| if (BuiltinID == Builtin::BIreserve_read_pipe) |
| Name = "__reserve_read_pipe"; |
| else if (BuiltinID == Builtin::BIreserve_write_pipe) |
| Name = "__reserve_write_pipe"; |
| else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe) |
| Name = "__work_group_reserve_read_pipe"; |
| else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe) |
| Name = "__work_group_reserve_write_pipe"; |
| else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe) |
| Name = "__sub_group_reserve_read_pipe"; |
| else |
| Name = "__sub_group_reserve_write_pipe"; |
| |
| Value *Arg0 = EmitScalarExpr(E->getArg(0)), |
| *Arg1 = EmitScalarExpr(E->getArg(1)); |
| llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy); |
| |
| // Building the generic function prototype. |
| llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty}; |
| llvm::FunctionType *FTy = llvm::FunctionType::get( |
| ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
| // We know the second argument is an integer type, but we may need to cast |
| // it to i32. |
| if (Arg1->getType() != Int32Ty) |
| Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty); |
| return RValue::get( |
| Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), {Arg0, Arg1})); |
| } |
| // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write |
| // functions |
| case Builtin::BIcommit_read_pipe: |
| case Builtin::BIcommit_write_pipe: |
| case Builtin::BIwork_group_commit_read_pipe: |
| case Builtin::BIwork_group_commit_write_pipe: |
| case Builtin::BIsub_group_commit_read_pipe: |
| case Builtin::BIsub_group_commit_write_pipe: { |
| const char *Name; |
| if (BuiltinID == Builtin::BIcommit_read_pipe) |
| Name = "__commit_read_pipe"; |
| else if (BuiltinID == Builtin::BIcommit_write_pipe) |
| Name = "__commit_write_pipe"; |
| else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe) |
| Name = "__work_group_commit_read_pipe"; |
| else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe) |
| Name = "__work_group_commit_write_pipe"; |
| else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe) |
| Name = "__sub_group_commit_read_pipe"; |
| else |
| Name = "__sub_group_commit_write_pipe"; |
| |
| Value *Arg0 = EmitScalarExpr(E->getArg(0)), |
| *Arg1 = EmitScalarExpr(E->getArg(1)); |
| |
| // Building the generic function prototype. |
| llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType()}; |
| llvm::FunctionType *FTy = |
| llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), |
| llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
| |
| return RValue::get( |
| Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), {Arg0, Arg1})); |
| } |
| // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions |
| case Builtin::BIget_pipe_num_packets: |
| case Builtin::BIget_pipe_max_packets: { |
| const char *Name; |
| if (BuiltinID == Builtin::BIget_pipe_num_packets) |
| Name = "__get_pipe_num_packets"; |
| else |
| Name = "__get_pipe_max_packets"; |
| |
| // Building the generic function prototype. |
| Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
| llvm::Type *ArgTys[] = {Arg0->getType()}; |
| llvm::FunctionType *FTy = llvm::FunctionType::get( |
| Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
| |
| return RValue::get( |
| Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), {Arg0})); |
| } |
| |
| // OpenCL v2.0 s6.13.9 - Address space qualifier functions. |
| case Builtin::BIto_global: |
| case Builtin::BIto_local: |
| case Builtin::BIto_private: { |
| auto Arg0 = EmitScalarExpr(E->getArg(0)); |
| auto NewArgT = llvm::PointerType::get(Int8Ty, |
| CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
| auto NewRetT = llvm::PointerType::get(Int8Ty, |
| CGM.getContext().getTargetAddressSpace( |
| E->getType()->getPointeeType().getAddressSpace())); |
| auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false); |
| llvm::Value *NewArg; |
| if (Arg0->getType()->getPointerAddressSpace() != |
| NewArgT->getPointerAddressSpace()) |
| NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT); |
| else |
| NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT); |
| auto NewName = std::string("__") + E->getDirectCallee()->getName().str(); |
| auto NewCall = |
| Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg}); |
| return RValue::get(Builder.CreateBitOrPointerCast(NewCall, |
| ConvertType(E->getType()))); |
| } |
| |
| // OpenCL v2.0, s6.13.17 - Enqueue kernel function. |
| // It contains four different overload formats specified in Table 6.13.17.1. |
| case Builtin::BIenqueue_kernel: { |
| StringRef Name; // Generated function call name |
| unsigned NumArgs = E->getNumArgs(); |
| |
| llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy); |
| llvm::Type *RangeTy = ConvertType(getContext().OCLNDRangeTy); |
| |
| llvm::Value *Queue = EmitScalarExpr(E->getArg(0)); |
| llvm::Value *Flags = EmitScalarExpr(E->getArg(1)); |
| llvm::Value *Range = EmitScalarExpr(E->getArg(2)); |
| |
| if (NumArgs == 4) { |
| // The most basic form of the call with parameters: |
| // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void) |
| Name = "__enqueue_kernel_basic"; |
| llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, Int8PtrTy}; |
| llvm::FunctionType *FTy = llvm::FunctionType::get( |
| Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys, 4), false); |
| |
| llvm::Value *Block = |
| Builder.CreateBitCast(EmitScalarExpr(E->getArg(3)), Int8PtrTy); |
| |
| return RValue::get(Builder.CreateCall( |
| CGM.CreateRuntimeFunction(FTy, Name), {Queue, Flags, Range, Block})); |
| } |
| assert(NumArgs >= 5 && "Invalid enqueue_kernel signature"); |
| |
| // Could have events and/or vaargs. |
| if (E->getArg(3)->getType()->isBlockPointerType()) { |
| // No events passed, but has variadic arguments. |
| Name = "__enqueue_kernel_vaargs"; |
| llvm::Value *Block = |
| Builder.CreateBitCast(EmitScalarExpr(E->getArg(3)), Int8PtrTy); |
| // Create a vector of the arguments, as well as a constant value to |
| // express to the runtime the number of variadic arguments. |
| std::vector<llvm::Value *> Args = {Queue, Flags, Range, Block, |
| ConstantInt::get(IntTy, NumArgs - 4)}; |
| std::vector<llvm::Type *> ArgTys = {QueueTy, IntTy, RangeTy, Int8PtrTy, |
| IntTy}; |
| |
| // Add the variadics. |
| for (unsigned I = 4; I < NumArgs; ++I) { |
| llvm::Value *ArgSize = EmitScalarExpr(E->getArg(I)); |
| unsigned TypeSizeInBytes = |
| getContext() |
| .getTypeSizeInChars(E->getArg(I)->getType()) |
| .getQuantity(); |
| Args.push_back(TypeSizeInBytes < 4 |
| ? Builder.CreateZExt(ArgSize, Int32Ty) |
| : ArgSize); |
| } |
| |
| llvm::FunctionType *FTy = llvm::FunctionType::get( |
| Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), true); |
| return RValue::get( |
| Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), |
| llvm::ArrayRef<llvm::Value *>(Args))); |
| } |
| // Any calls now have event arguments passed. |
| if (NumArgs >= 7) { |
| llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy); |
| unsigned AS4 = |
| E->getArg(4)->getType()->isArrayType() |
| ? E->getArg(4)->getType().getAddressSpace() |
| : E->getArg(4)->getType()->getPointeeType().getAddressSpace(); |
| llvm::Type *EventPtrAS4Ty = |
| EventTy->getPointerTo(CGM.getContext().getTargetAddressSpace(AS4)); |
| unsigned AS5 = |
| E->getArg(5)->getType()->getPointeeType().getAddressSpace(); |
| llvm::Type *EventPtrAS5Ty = |
| EventTy->getPointerTo(CGM.getContext().getTargetAddressSpace(AS5)); |
| |
|