| //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// |
| // |
| // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| // See https://llvm.org/LICENSE.txt for license information. |
| // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| // |
| //===----------------------------------------------------------------------===// |
| // |
| // This contains code to emit Builtin calls as LLVM code. |
| // |
| //===----------------------------------------------------------------------===// |
| |
| #include "CGCXXABI.h" |
| #include "CGObjCRuntime.h" |
| #include "CGOpenCLRuntime.h" |
| #include "CGRecordLayout.h" |
| #include "CodeGenFunction.h" |
| #include "CodeGenModule.h" |
| #include "ConstantEmitter.h" |
| #include "PatternInit.h" |
| #include "TargetInfo.h" |
| #include "clang/AST/ASTContext.h" |
| #include "clang/AST/Attr.h" |
| #include "clang/AST/Decl.h" |
| #include "clang/AST/OSLog.h" |
| #include "clang/Basic/TargetBuiltins.h" |
| #include "clang/Basic/TargetInfo.h" |
| #include "clang/CodeGen/CGFunctionInfo.h" |
| #include "llvm/ADT/SmallPtrSet.h" |
| #include "llvm/ADT/StringExtras.h" |
| #include "llvm/Analysis/ValueTracking.h" |
| #include "llvm/IR/DataLayout.h" |
| #include "llvm/IR/InlineAsm.h" |
| #include "llvm/IR/Intrinsics.h" |
| #include "llvm/IR/IntrinsicsAArch64.h" |
| #include "llvm/IR/IntrinsicsAMDGPU.h" |
| #include "llvm/IR/IntrinsicsARM.h" |
| #include "llvm/IR/IntrinsicsBPF.h" |
| #include "llvm/IR/IntrinsicsHexagon.h" |
| #include "llvm/IR/IntrinsicsNVPTX.h" |
| #include "llvm/IR/IntrinsicsPowerPC.h" |
| #include "llvm/IR/IntrinsicsR600.h" |
| #include "llvm/IR/IntrinsicsS390.h" |
| #include "llvm/IR/IntrinsicsWebAssembly.h" |
| #include "llvm/IR/IntrinsicsX86.h" |
| #include "llvm/IR/MDBuilder.h" |
| #include "llvm/IR/MatrixBuilder.h" |
| #include "llvm/Support/ConvertUTF.h" |
| #include "llvm/Support/ScopedPrinter.h" |
| #include "llvm/Support/X86TargetParser.h" |
| #include <sstream> |
| |
| using namespace clang; |
| using namespace CodeGen; |
| using namespace llvm; |
| |
| static |
| int64_t clamp(int64_t Value, int64_t Low, int64_t High) { |
| return std::min(High, std::max(Low, Value)); |
| } |
| |
| static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, |
| Align AlignmentInBytes) { |
| ConstantInt *Byte; |
| switch (CGF.getLangOpts().getTrivialAutoVarInit()) { |
| case LangOptions::TrivialAutoVarInitKind::Uninitialized: |
| // Nothing to initialize. |
| return; |
| case LangOptions::TrivialAutoVarInitKind::Zero: |
| Byte = CGF.Builder.getInt8(0x00); |
| break; |
| case LangOptions::TrivialAutoVarInitKind::Pattern: { |
| llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext()); |
| Byte = llvm::dyn_cast<llvm::ConstantInt>( |
| initializationPatternFor(CGF.CGM, Int8)); |
| break; |
| } |
| } |
| if (CGF.CGM.stopAutoInit()) |
| return; |
| CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes); |
| } |
| |
| /// getBuiltinLibFunction - Given a builtin id for a function like |
| /// "__builtin_fabsf", return a Function* for "fabsf". |
| llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD, |
| unsigned BuiltinID) { |
| assert(Context.BuiltinInfo.isLibFunction(BuiltinID)); |
| |
| // Get the name, skip over the __builtin_ prefix (if necessary). |
| StringRef Name; |
| GlobalDecl D(FD); |
| |
| // If the builtin has been declared explicitly with an assembler label, |
| // use the mangled name. This differs from the plain label on platforms |
| // that prefix labels. |
| if (FD->hasAttr<AsmLabelAttr>()) |
| Name = getMangledName(D); |
| else |
| Name = Context.BuiltinInfo.getName(BuiltinID) + 10; |
| |
| llvm::FunctionType *Ty = |
| cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType())); |
| |
| return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false); |
| } |
| |
| /// Emit the conversions required to turn the given value into an |
| /// integer of the given size. |
| static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, |
| QualType T, llvm::IntegerType *IntType) { |
| V = CGF.EmitToMemory(V, T); |
| |
| if (V->getType()->isPointerTy()) |
| return CGF.Builder.CreatePtrToInt(V, IntType); |
| |
| assert(V->getType() == IntType); |
| return V; |
| } |
| |
| static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, |
| QualType T, llvm::Type *ResultType) { |
| V = CGF.EmitFromMemory(V, T); |
| |
| if (ResultType->isPointerTy()) |
| return CGF.Builder.CreateIntToPtr(V, ResultType); |
| |
| assert(V->getType() == ResultType); |
| return V; |
| } |
| |
| /// Utility to insert an atomic instruction based on Intrinsic::ID |
| /// and the expression node. |
| static Value *MakeBinaryAtomicValue( |
| CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, |
| AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
| QualType T = E->getType(); |
| assert(E->getArg(0)->getType()->isPointerType()); |
| assert(CGF.getContext().hasSameUnqualifiedType(T, |
| E->getArg(0)->getType()->getPointeeType())); |
| assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); |
| |
| llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
| unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
| |
| llvm::IntegerType *IntType = |
| llvm::IntegerType::get(CGF.getLLVMContext(), |
| CGF.getContext().getTypeSize(T)); |
| llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
| |
| llvm::Value *Args[2]; |
| Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
| Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
| llvm::Type *ValueType = Args[1]->getType(); |
| Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
| |
| llvm::Value *Result = CGF.Builder.CreateAtomicRMW( |
| Kind, Args[0], Args[1], Ordering); |
| return EmitFromInt(CGF, Result, T, ValueType); |
| } |
| |
| static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) { |
| Value *Val = CGF.EmitScalarExpr(E->getArg(0)); |
| Value *Address = CGF.EmitScalarExpr(E->getArg(1)); |
| |
| // Convert the type of the pointer to a pointer to the stored type. |
| Val = CGF.EmitToMemory(Val, E->getArg(0)->getType()); |
| Value *BC = CGF.Builder.CreateBitCast( |
| Address, llvm::PointerType::getUnqual(Val->getType()), "cast"); |
| LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType()); |
| LV.setNontemporal(true); |
| CGF.EmitStoreOfScalar(Val, LV, false); |
| return nullptr; |
| } |
| |
| static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) { |
| Value *Address = CGF.EmitScalarExpr(E->getArg(0)); |
| |
| LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType()); |
| LV.setNontemporal(true); |
| return CGF.EmitLoadOfScalar(LV, E->getExprLoc()); |
| } |
| |
| static RValue EmitBinaryAtomic(CodeGenFunction &CGF, |
| llvm::AtomicRMWInst::BinOp Kind, |
| const CallExpr *E) { |
| return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E)); |
| } |
| |
| /// Utility to insert an atomic instruction based Intrinsic::ID and |
| /// the expression node, where the return value is the result of the |
| /// operation. |
| static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, |
| llvm::AtomicRMWInst::BinOp Kind, |
| const CallExpr *E, |
| Instruction::BinaryOps Op, |
| bool Invert = false) { |
| QualType T = E->getType(); |
| assert(E->getArg(0)->getType()->isPointerType()); |
| assert(CGF.getContext().hasSameUnqualifiedType(T, |
| E->getArg(0)->getType()->getPointeeType())); |
| assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); |
| |
| llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
| unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
| |
| llvm::IntegerType *IntType = |
| llvm::IntegerType::get(CGF.getLLVMContext(), |
| CGF.getContext().getTypeSize(T)); |
| llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
| |
| llvm::Value *Args[2]; |
| Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
| llvm::Type *ValueType = Args[1]->getType(); |
| Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
| Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
| |
| llvm::Value *Result = CGF.Builder.CreateAtomicRMW( |
| Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent); |
| Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]); |
| if (Invert) |
| Result = |
| CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result, |
| llvm::ConstantInt::getAllOnesValue(IntType)); |
| Result = EmitFromInt(CGF, Result, T, ValueType); |
| return RValue::get(Result); |
| } |
| |
| /// Utility to insert an atomic cmpxchg instruction. |
| /// |
| /// @param CGF The current codegen function. |
| /// @param E Builtin call expression to convert to cmpxchg. |
| /// arg0 - address to operate on |
| /// arg1 - value to compare with |
| /// arg2 - new value |
| /// @param ReturnBool Specifies whether to return success flag of |
| /// cmpxchg result or the old value. |
| /// |
| /// @returns result of cmpxchg, according to ReturnBool |
| /// |
| /// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics |
| /// invoke the function EmitAtomicCmpXchgForMSIntrin. |
| static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, |
| bool ReturnBool) { |
| QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType(); |
| llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
| unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
| |
| llvm::IntegerType *IntType = llvm::IntegerType::get( |
| CGF.getLLVMContext(), CGF.getContext().getTypeSize(T)); |
| llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
| |
| Value *Args[3]; |
| Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
| Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
| llvm::Type *ValueType = Args[1]->getType(); |
| Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
| Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType); |
| |
| Value *Pair = CGF.Builder.CreateAtomicCmpXchg( |
| Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent, |
| llvm::AtomicOrdering::SequentiallyConsistent); |
| if (ReturnBool) |
| // Extract boolean success flag and zext it to int. |
| return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1), |
| CGF.ConvertType(E->getType())); |
| else |
| // Extract old value and emit it using the same type as compare value. |
| return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T, |
| ValueType); |
| } |
| |
| /// This function should be invoked to emit atomic cmpxchg for Microsoft's |
| /// _InterlockedCompareExchange* intrinsics which have the following signature: |
| /// T _InterlockedCompareExchange(T volatile *Destination, |
| /// T Exchange, |
| /// T Comparand); |
| /// |
| /// Whereas the llvm 'cmpxchg' instruction has the following syntax: |
| /// cmpxchg *Destination, Comparand, Exchange. |
| /// So we need to swap Comparand and Exchange when invoking |
| /// CreateAtomicCmpXchg. That is the reason we could not use the above utility |
| /// function MakeAtomicCmpXchgValue since it expects the arguments to be |
| /// already swapped. |
| |
| static |
| Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, |
| AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) { |
| assert(E->getArg(0)->getType()->isPointerType()); |
| assert(CGF.getContext().hasSameUnqualifiedType( |
| E->getType(), E->getArg(0)->getType()->getPointeeType())); |
| assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), |
| E->getArg(1)->getType())); |
| assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), |
| E->getArg(2)->getType())); |
| |
| auto *Destination = CGF.EmitScalarExpr(E->getArg(0)); |
| auto *Comparand = CGF.EmitScalarExpr(E->getArg(2)); |
| auto *Exchange = CGF.EmitScalarExpr(E->getArg(1)); |
| |
| // For Release ordering, the failure ordering should be Monotonic. |
| auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ? |
| AtomicOrdering::Monotonic : |
| SuccessOrdering; |
| |
| auto *Result = CGF.Builder.CreateAtomicCmpXchg( |
| Destination, Comparand, Exchange, |
| SuccessOrdering, FailureOrdering); |
| Result->setVolatile(true); |
| return CGF.Builder.CreateExtractValue(Result, 0); |
| } |
| |
| static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E, |
| AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
| assert(E->getArg(0)->getType()->isPointerType()); |
| |
| auto *IntTy = CGF.ConvertType(E->getType()); |
| auto *Result = CGF.Builder.CreateAtomicRMW( |
| AtomicRMWInst::Add, |
| CGF.EmitScalarExpr(E->getArg(0)), |
| ConstantInt::get(IntTy, 1), |
| Ordering); |
| return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1)); |
| } |
| |
| static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E, |
| AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
| assert(E->getArg(0)->getType()->isPointerType()); |
| |
| auto *IntTy = CGF.ConvertType(E->getType()); |
| auto *Result = CGF.Builder.CreateAtomicRMW( |
| AtomicRMWInst::Sub, |
| CGF.EmitScalarExpr(E->getArg(0)), |
| ConstantInt::get(IntTy, 1), |
| Ordering); |
| return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1)); |
| } |
| |
| // Build a plain volatile load. |
| static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) { |
| Value *Ptr = CGF.EmitScalarExpr(E->getArg(0)); |
| QualType ElTy = E->getArg(0)->getType()->getPointeeType(); |
| CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy); |
| llvm::Type *ITy = |
| llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8); |
| Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo()); |
| llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(Ptr, LoadSize); |
| Load->setVolatile(true); |
| return Load; |
| } |
| |
| // Build a plain volatile store. |
| static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) { |
| Value *Ptr = CGF.EmitScalarExpr(E->getArg(0)); |
| Value *Value = CGF.EmitScalarExpr(E->getArg(1)); |
| QualType ElTy = E->getArg(0)->getType()->getPointeeType(); |
| CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy); |
| llvm::Type *ITy = |
| llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8); |
| Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo()); |
| llvm::StoreInst *Store = |
| CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize); |
| Store->setVolatile(true); |
| return Store; |
| } |
| |
| // Emit a simple mangled intrinsic that has 1 argument and a return type |
| // matching the argument type. Depending on mode, this may be a constrained |
| // floating-point intrinsic. |
| static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
| const CallExpr *E, unsigned IntrinsicID, |
| unsigned ConstrainedIntrinsicID) { |
| llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
| |
| if (CGF.Builder.getIsFPConstrained()) { |
| Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateConstrainedFPCall(F, { Src0 }); |
| } else { |
| Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateCall(F, Src0); |
| } |
| } |
| |
| // Emit an intrinsic that has 2 operands of the same type as its result. |
| // Depending on mode, this may be a constrained floating-point intrinsic. |
| static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
| const CallExpr *E, unsigned IntrinsicID, |
| unsigned ConstrainedIntrinsicID) { |
| llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
| llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
| |
| if (CGF.Builder.getIsFPConstrained()) { |
| Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 }); |
| } else { |
| Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateCall(F, { Src0, Src1 }); |
| } |
| } |
| |
| // Emit an intrinsic that has 3 operands of the same type as its result. |
| // Depending on mode, this may be a constrained floating-point intrinsic. |
| static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
| const CallExpr *E, unsigned IntrinsicID, |
| unsigned ConstrainedIntrinsicID) { |
| llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
| llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
| llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); |
| |
| if (CGF.Builder.getIsFPConstrained()) { |
| Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 }); |
| } else { |
| Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 }); |
| } |
| } |
| |
| // Emit an intrinsic where all operands are of the same type as the result. |
| // Depending on mode, this may be a constrained floating-point intrinsic. |
| static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
| unsigned IntrinsicID, |
| unsigned ConstrainedIntrinsicID, |
| llvm::Type *Ty, |
| ArrayRef<Value *> Args) { |
| Function *F; |
| if (CGF.Builder.getIsFPConstrained()) |
| F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty); |
| else |
| F = CGF.CGM.getIntrinsic(IntrinsicID, Ty); |
| |
| if (CGF.Builder.getIsFPConstrained()) |
| return CGF.Builder.CreateConstrainedFPCall(F, Args); |
| else |
| return CGF.Builder.CreateCall(F, Args); |
| } |
| |
| // Emit a simple mangled intrinsic that has 1 argument and a return type |
| // matching the argument type. |
| static Value *emitUnaryBuiltin(CodeGenFunction &CGF, |
| const CallExpr *E, |
| unsigned IntrinsicID) { |
| llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
| |
| Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateCall(F, Src0); |
| } |
| |
| // Emit an intrinsic that has 2 operands of the same type as its result. |
| static Value *emitBinaryBuiltin(CodeGenFunction &CGF, |
| const CallExpr *E, |
| unsigned IntrinsicID) { |
| llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
| llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
| |
| Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateCall(F, { Src0, Src1 }); |
| } |
| |
| // Emit an intrinsic that has 3 operands of the same type as its result. |
| static Value *emitTernaryBuiltin(CodeGenFunction &CGF, |
| const CallExpr *E, |
| unsigned IntrinsicID) { |
| llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
| llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
| llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); |
| |
| Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 }); |
| } |
| |
| // Emit an intrinsic that has 1 float or double operand, and 1 integer. |
| static Value *emitFPIntBuiltin(CodeGenFunction &CGF, |
| const CallExpr *E, |
| unsigned IntrinsicID) { |
| llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
| llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
| |
| Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
| return CGF.Builder.CreateCall(F, {Src0, Src1}); |
| } |
| |
| // Emit an intrinsic that has overloaded integer result and fp operand. |
| static Value * |
| emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E, |
| unsigned IntrinsicID, |
| unsigned ConstrainedIntrinsicID) { |
| llvm::Type *ResultType = CGF.ConvertType(E->getType()); |
| llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
| |
| if (CGF.Builder.getIsFPConstrained()) { |
| Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, |
| {ResultType, Src0->getType()}); |
| return CGF.Builder.CreateConstrainedFPCall(F, {Src0}); |
| } else { |
| Function *F = |
| CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()}); |
| return CGF.Builder.CreateCall(F, Src0); |
| } |
| } |
| |
| /// EmitFAbs - Emit a call to @llvm.fabs(). |
| static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) { |
| Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType()); |
| llvm::CallInst *Call = CGF.Builder.CreateCall(F, V); |
| Call->setDoesNotAccessMemory(); |
| return Call; |
| } |
| |
| /// Emit the computation of the sign bit for a floating point value. Returns |
| /// the i1 sign bit value. |
| static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) { |
| LLVMContext &C = CGF.CGM.getLLVMContext(); |
| |
| llvm::Type *Ty = V->getType(); |
| int Width = Ty->getPrimitiveSizeInBits(); |
| llvm::Type *IntTy = llvm::IntegerType::get(C, Width); |
| V = CGF.Builder.CreateBitCast(V, IntTy); |
| if (Ty->isPPC_FP128Ty()) { |
| // We want the sign bit of the higher-order double. The bitcast we just |
| // did works as if the double-double was stored to memory and then |
| // read as an i128. The "store" will put the higher-order double in the |
| // lower address in both little- and big-Endian modes, but the "load" |
| // will treat those bits as a different part of the i128: the low bits in |
| // little-Endian, the high bits in big-Endian. Therefore, on big-Endian |
| // we need to shift the high bits down to the low before truncating. |
| Width >>= 1; |
| if (CGF.getTarget().isBigEndian()) { |
| Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width); |
| V = CGF.Builder.CreateLShr(V, ShiftCst); |
| } |
| // We are truncating value in order to extract the higher-order |
| // double, which we will be using to extract the sign from. |
| IntTy = llvm::IntegerType::get(C, Width); |
| V = CGF.Builder.CreateTrunc(V, IntTy); |
| } |
| Value *Zero = llvm::Constant::getNullValue(IntTy); |
| return CGF.Builder.CreateICmpSLT(V, Zero); |
| } |
| |
| static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD, |
| const CallExpr *E, llvm::Constant *calleeValue) { |
| CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD)); |
| return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot()); |
| } |
| |
| /// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.* |
| /// depending on IntrinsicID. |
| /// |
| /// \arg CGF The current codegen function. |
| /// \arg IntrinsicID The ID for the Intrinsic we wish to generate. |
| /// \arg X The first argument to the llvm.*.with.overflow.*. |
| /// \arg Y The second argument to the llvm.*.with.overflow.*. |
| /// \arg Carry The carry returned by the llvm.*.with.overflow.*. |
| /// \returns The result (i.e. sum/product) returned by the intrinsic. |
| static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF, |
| const llvm::Intrinsic::ID IntrinsicID, |
| llvm::Value *X, llvm::Value *Y, |
| llvm::Value *&Carry) { |
| // Make sure we have integers of the same width. |
| assert(X->getType() == Y->getType() && |
| "Arguments must be the same type. (Did you forget to make sure both " |
| "arguments have the same integer width?)"); |
| |
| Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType()); |
| llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y}); |
| Carry = CGF.Builder.CreateExtractValue(Tmp, 1); |
| return CGF.Builder.CreateExtractValue(Tmp, 0); |
| } |
| |
| static Value *emitRangedBuiltin(CodeGenFunction &CGF, |
| unsigned IntrinsicID, |
| int low, int high) { |
| llvm::MDBuilder MDHelper(CGF.getLLVMContext()); |
| llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high)); |
| Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {}); |
| llvm::Instruction *Call = CGF.Builder.CreateCall(F); |
| Call->setMetadata(llvm::LLVMContext::MD_range, RNode); |
| return Call; |
| } |
| |
| namespace { |
| struct WidthAndSignedness { |
| unsigned Width; |
| bool Signed; |
| }; |
| } |
| |
| static WidthAndSignedness |
| getIntegerWidthAndSignedness(const clang::ASTContext &context, |
| const clang::QualType Type) { |
| assert(Type->isIntegerType() && "Given type is not an integer."); |
| unsigned Width = Type->isBooleanType() ? 1 |
| : Type->isExtIntType() ? context.getIntWidth(Type) |
| : context.getTypeInfo(Type).Width; |
| bool Signed = Type->isSignedIntegerType(); |
| return {Width, Signed}; |
| } |
| |
| // Given one or more integer types, this function produces an integer type that |
| // encompasses them: any value in one of the given types could be expressed in |
| // the encompassing type. |
| static struct WidthAndSignedness |
| EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) { |
| assert(Types.size() > 0 && "Empty list of types."); |
| |
| // If any of the given types is signed, we must return a signed type. |
| bool Signed = false; |
| for (const auto &Type : Types) { |
| Signed |= Type.Signed; |
| } |
| |
| // The encompassing type must have a width greater than or equal to the width |
| // of the specified types. Additionally, if the encompassing type is signed, |
| // its width must be strictly greater than the width of any unsigned types |
| // given. |
| unsigned Width = 0; |
| for (const auto &Type : Types) { |
| unsigned MinWidth = Type.Width + (Signed && !Type.Signed); |
| if (Width < MinWidth) { |
| Width = MinWidth; |
| } |
| } |
| |
| return {Width, Signed}; |
| } |
| |
| Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) { |
| llvm::Type *DestType = Int8PtrTy; |
| if (ArgValue->getType() != DestType) |
| ArgValue = |
| Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data()); |
| |
| Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend; |
| return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue); |
| } |
| |
| /// Checks if using the result of __builtin_object_size(p, @p From) in place of |
| /// __builtin_object_size(p, @p To) is correct |
| static bool areBOSTypesCompatible(int From, int To) { |
| // Note: Our __builtin_object_size implementation currently treats Type=0 and |
| // Type=2 identically. Encoding this implementation detail here may make |
| // improving __builtin_object_size difficult in the future, so it's omitted. |
| return From == To || (From == 0 && To == 1) || (From == 3 && To == 2); |
| } |
| |
| static llvm::Value * |
| getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) { |
| return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true); |
| } |
| |
| llvm::Value * |
| CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type, |
| llvm::IntegerType *ResType, |
| llvm::Value *EmittedE, |
| bool IsDynamic) { |
| uint64_t ObjectSize; |
| if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type)) |
| return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic); |
| return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true); |
| } |
| |
| /// Returns a Value corresponding to the size of the given expression. |
| /// This Value may be either of the following: |
| /// - A llvm::Argument (if E is a param with the pass_object_size attribute on |
| /// it) |
| /// - A call to the @llvm.objectsize intrinsic |
| /// |
| /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null |
| /// and we wouldn't otherwise try to reference a pass_object_size parameter, |
| /// we'll call @llvm.objectsize on EmittedE, rather than emitting E. |
| llvm::Value * |
| CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, |
| llvm::IntegerType *ResType, |
| llvm::Value *EmittedE, bool IsDynamic) { |
| // We need to reference an argument if the pointer is a parameter with the |
| // pass_object_size attribute. |
| if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) { |
| auto *Param = dyn_cast<ParmVarDecl>(D->getDecl()); |
| auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>(); |
| if (Param != nullptr && PS != nullptr && |
| areBOSTypesCompatible(PS->getType(), Type)) { |
| auto Iter = SizeArguments.find(Param); |
| assert(Iter != SizeArguments.end()); |
| |
| const ImplicitParamDecl *D = Iter->second; |
| auto DIter = LocalDeclMap.find(D); |
| assert(DIter != LocalDeclMap.end()); |
| |
| return EmitLoadOfScalar(DIter->second, /*Volatile=*/false, |
| getContext().getSizeType(), E->getBeginLoc()); |
| } |
| } |
| |
| // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't |
| // evaluate E for side-effects. In either case, we shouldn't lower to |
| // @llvm.objectsize. |
| if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext()))) |
| return getDefaultBuiltinObjectSizeResult(Type, ResType); |
| |
| Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E); |
| assert(Ptr->getType()->isPointerTy() && |
| "Non-pointer passed to __builtin_object_size?"); |
| |
| Function *F = |
| CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()}); |
| |
| // LLVM only supports 0 and 2, make sure that we pass along that as a boolean. |
| Value *Min = Builder.getInt1((Type & 2) != 0); |
| // For GCC compatibility, __builtin_object_size treat NULL as unknown size. |
| Value *NullIsUnknown = Builder.getTrue(); |
| Value *Dynamic = Builder.getInt1(IsDynamic); |
| return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}); |
| } |
| |
| namespace { |
| /// A struct to generically describe a bit test intrinsic. |
| struct BitTest { |
| enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set }; |
| enum InterlockingKind : uint8_t { |
| Unlocked, |
| Sequential, |
| Acquire, |
| Release, |
| NoFence |
| }; |
| |
| ActionKind Action; |
| InterlockingKind Interlocking; |
| bool Is64Bit; |
| |
| static BitTest decodeBitTestBuiltin(unsigned BuiltinID); |
| }; |
| } // namespace |
| |
| BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) { |
| switch (BuiltinID) { |
| // Main portable variants. |
| case Builtin::BI_bittest: |
| return {TestOnly, Unlocked, false}; |
| case Builtin::BI_bittestandcomplement: |
| return {Complement, Unlocked, false}; |
| case Builtin::BI_bittestandreset: |
| return {Reset, Unlocked, false}; |
| case Builtin::BI_bittestandset: |
| return {Set, Unlocked, false}; |
| case Builtin::BI_interlockedbittestandreset: |
| return {Reset, Sequential, false}; |
| case Builtin::BI_interlockedbittestandset: |
| return {Set, Sequential, false}; |
| |
| // X86-specific 64-bit variants. |
| case Builtin::BI_bittest64: |
| return {TestOnly, Unlocked, true}; |
| case Builtin::BI_bittestandcomplement64: |
| return {Complement, Unlocked, true}; |
| case Builtin::BI_bittestandreset64: |
| return {Reset, Unlocked, true}; |
| case Builtin::BI_bittestandset64: |
| return {Set, Unlocked, true}; |
| case Builtin::BI_interlockedbittestandreset64: |
| return {Reset, Sequential, true}; |
| case Builtin::BI_interlockedbittestandset64: |
| return {Set, Sequential, true}; |
| |
| // ARM/AArch64-specific ordering variants. |
| case Builtin::BI_interlockedbittestandset_acq: |
| return {Set, Acquire, false}; |
| case Builtin::BI_interlockedbittestandset_rel: |
| return {Set, Release, false}; |
| case Builtin::BI_interlockedbittestandset_nf: |
| return {Set, NoFence, false}; |
| case Builtin::BI_interlockedbittestandreset_acq: |
| return {Reset, Acquire, false}; |
| case Builtin::BI_interlockedbittestandreset_rel: |
| return {Reset, Release, false}; |
| case Builtin::BI_interlockedbittestandreset_nf: |
| return {Reset, NoFence, false}; |
| } |
| llvm_unreachable("expected only bittest intrinsics"); |
| } |
| |
| static char bitActionToX86BTCode(BitTest::ActionKind A) { |
| switch (A) { |
| case BitTest::TestOnly: return '\0'; |
| case BitTest::Complement: return 'c'; |
| case BitTest::Reset: return 'r'; |
| case BitTest::Set: return 's'; |
| } |
| llvm_unreachable("invalid action"); |
| } |
| |
| static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF, |
| BitTest BT, |
| const CallExpr *E, Value *BitBase, |
| Value *BitPos) { |
| char Action = bitActionToX86BTCode(BT.Action); |
| char SizeSuffix = BT.Is64Bit ? 'q' : 'l'; |
| |
| // Build the assembly. |
| SmallString<64> Asm; |
| raw_svector_ostream AsmOS(Asm); |
| if (BT.Interlocking != BitTest::Unlocked) |
| AsmOS << "lock "; |
| AsmOS << "bt"; |
| if (Action) |
| AsmOS << Action; |
| AsmOS << SizeSuffix << " $2, ($1)\n\tsetc ${0:b}"; |
| |
| // Build the constraints. FIXME: We should support immediates when possible. |
| std::string Constraints = "=r,r,r,~{cc},~{flags},~{fpsr}"; |
| llvm::IntegerType *IntType = llvm::IntegerType::get( |
| CGF.getLLVMContext(), |
| CGF.getContext().getTypeSize(E->getArg(1)->getType())); |
| llvm::Type *IntPtrType = IntType->getPointerTo(); |
| llvm::FunctionType *FTy = |
| llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false); |
| |
| llvm::InlineAsm *IA = |
| llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true); |
| return CGF.Builder.CreateCall(IA, {BitBase, BitPos}); |
| } |
| |
| static llvm::AtomicOrdering |
| getBitTestAtomicOrdering(BitTest::InterlockingKind I) { |
| switch (I) { |
| case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic; |
| case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent; |
| case BitTest::Acquire: return llvm::AtomicOrdering::Acquire; |
| case BitTest::Release: return llvm::AtomicOrdering::Release; |
| case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic; |
| } |
| llvm_unreachable("invalid interlocking"); |
| } |
| |
| /// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of |
| /// bits and a bit position and read and optionally modify the bit at that |
| /// position. The position index can be arbitrarily large, i.e. it can be larger |
| /// than 31 or 63, so we need an indexed load in the general case. |
| static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF, |
| unsigned BuiltinID, |
| const CallExpr *E) { |
| Value *BitBase = CGF.EmitScalarExpr(E->getArg(0)); |
| Value *BitPos = CGF.EmitScalarExpr(E->getArg(1)); |
| |
| BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID); |
| |
| // X86 has special BT, BTC, BTR, and BTS instructions that handle the array |
| // indexing operation internally. Use them if possible. |
| if (CGF.getTarget().getTriple().isX86()) |
| return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos); |
| |
| // Otherwise, use generic code to load one byte and test the bit. Use all but |
| // the bottom three bits as the array index, and the bottom three bits to form |
| // a mask. |
| // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0; |
| Value *ByteIndex = CGF.Builder.CreateAShr( |
| BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx"); |
| Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy); |
| Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8, |
| ByteIndex, "bittest.byteaddr"), |
| CharUnits::One()); |
| Value *PosLow = |
| CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty), |
| llvm::ConstantInt::get(CGF.Int8Ty, 0x7)); |
| |
| // The updating instructions will need a mask. |
| Value *Mask = nullptr; |
| if (BT.Action != BitTest::TestOnly) { |
| Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow, |
| "bittest.mask"); |
| } |
| |
| // Check the action and ordering of the interlocked intrinsics. |
| llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking); |
| |
| Value *OldByte = nullptr; |
| if (Ordering != llvm::AtomicOrdering::NotAtomic) { |
| // Emit a combined atomicrmw load/store operation for the interlocked |
| // intrinsics. |
| llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or; |
| if (BT.Action == BitTest::Reset) { |
| Mask = CGF.Builder.CreateNot(Mask); |
| RMWOp = llvm::AtomicRMWInst::And; |
| } |
| OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask, |
| Ordering); |
| } else { |
| // Emit a plain load for the non-interlocked intrinsics. |
| OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte"); |
| Value *NewByte = nullptr; |
| switch (BT.Action) { |
| case BitTest::TestOnly: |
| // Don't store anything. |
| break; |
| case BitTest::Complement: |
| NewByte = CGF.Builder.CreateXor(OldByte, Mask); |
| break; |
| case BitTest::Reset: |
| NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask)); |
| break; |
| case BitTest::Set: |
| NewByte = CGF.Builder.CreateOr(OldByte, Mask); |
| break; |
| } |
| if (NewByte) |
| CGF.Builder.CreateStore(NewByte, ByteAddr); |
| } |
| |
| // However we loaded the old byte, either by plain load or atomicrmw, shift |
| // the bit into the low position and mask it to 0 or 1. |
| Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr"); |
| return CGF.Builder.CreateAnd( |
| ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res"); |
| } |
| |
| namespace { |
| enum class MSVCSetJmpKind { |
| _setjmpex, |
| _setjmp3, |
| _setjmp |
| }; |
| } |
| |
| /// MSVC handles setjmp a bit differently on different platforms. On every |
| /// architecture except 32-bit x86, the frame address is passed. On x86, extra |
| /// parameters can be passed as variadic arguments, but we always pass none. |
| static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind, |
| const CallExpr *E) { |
| llvm::Value *Arg1 = nullptr; |
| llvm::Type *Arg1Ty = nullptr; |
| StringRef Name; |
| bool IsVarArg = false; |
| if (SJKind == MSVCSetJmpKind::_setjmp3) { |
| Name = "_setjmp3"; |
| Arg1Ty = CGF.Int32Ty; |
| Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0); |
| IsVarArg = true; |
| } else { |
| Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex"; |
| Arg1Ty = CGF.Int8PtrTy; |
| if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) { |
| Arg1 = CGF.Builder.CreateCall( |
| CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy)); |
| } else |
| Arg1 = CGF.Builder.CreateCall( |
| CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy), |
| llvm::ConstantInt::get(CGF.Int32Ty, 0)); |
| } |
| |
| // Mark the call site and declaration with ReturnsTwice. |
| llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty}; |
| llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get( |
| CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, |
| llvm::Attribute::ReturnsTwice); |
| llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction( |
| llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name, |
| ReturnsTwiceAttr, /*Local=*/true); |
| |
| llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast( |
| CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy); |
| llvm::Value *Args[] = {Buf, Arg1}; |
| llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args); |
| CB->setAttributes(ReturnsTwiceAttr); |
| return RValue::get(CB); |
| } |
| |
| // Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code, |
| // we handle them here. |
| enum class CodeGenFunction::MSVCIntrin { |
| _BitScanForward, |
| _BitScanReverse, |
| _InterlockedAnd, |
| _InterlockedDecrement, |
| _InterlockedExchange, |
| _InterlockedExchangeAdd, |
| _InterlockedExchangeSub, |
| _InterlockedIncrement, |
| _InterlockedOr, |
| _InterlockedXor, |
| _InterlockedExchangeAdd_acq, |
| _InterlockedExchangeAdd_rel, |
| _InterlockedExchangeAdd_nf, |
| _InterlockedExchange_acq, |
| _InterlockedExchange_rel, |
| _InterlockedExchange_nf, |
| _InterlockedCompareExchange_acq, |
| _InterlockedCompareExchange_rel, |
| _InterlockedCompareExchange_nf, |
| _InterlockedOr_acq, |
| _InterlockedOr_rel, |
| _InterlockedOr_nf, |
| _InterlockedXor_acq, |
| _InterlockedXor_rel, |
| _InterlockedXor_nf, |
| _InterlockedAnd_acq, |
| _InterlockedAnd_rel, |
| _InterlockedAnd_nf, |
| _InterlockedIncrement_acq, |
| _InterlockedIncrement_rel, |
| _InterlockedIncrement_nf, |
| _InterlockedDecrement_acq, |
| _InterlockedDecrement_rel, |
| _InterlockedDecrement_nf, |
| __fastfail, |
| }; |
| |
| Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, |
| const CallExpr *E) { |
| switch (BuiltinID) { |
| case MSVCIntrin::_BitScanForward: |
| case MSVCIntrin::_BitScanReverse: { |
| Value *ArgValue = EmitScalarExpr(E->getArg(1)); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| llvm::Type *IndexType = |
| EmitScalarExpr(E->getArg(0))->getType()->getPointerElementType(); |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| |
| Value *ArgZero = llvm::Constant::getNullValue(ArgType); |
| Value *ResZero = llvm::Constant::getNullValue(ResultType); |
| Value *ResOne = llvm::ConstantInt::get(ResultType, 1); |
| |
| BasicBlock *Begin = Builder.GetInsertBlock(); |
| BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn); |
| Builder.SetInsertPoint(End); |
| PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result"); |
| |
| Builder.SetInsertPoint(Begin); |
| Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero); |
| BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn); |
| Builder.CreateCondBr(IsZero, End, NotZero); |
| Result->addIncoming(ResZero, Begin); |
| |
| Builder.SetInsertPoint(NotZero); |
| Address IndexAddress = EmitPointerWithAlignment(E->getArg(0)); |
| |
| if (BuiltinID == MSVCIntrin::_BitScanForward) { |
| Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
| Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()}); |
| ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false); |
| Builder.CreateStore(ZeroCount, IndexAddress, false); |
| } else { |
| unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth(); |
| Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1); |
| |
| Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
| Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()}); |
| ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false); |
| Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount); |
| Builder.CreateStore(Index, IndexAddress, false); |
| } |
| Builder.CreateBr(End); |
| Result->addIncoming(ResOne, NotZero); |
| |
| Builder.SetInsertPoint(End); |
| return Result; |
| } |
| case MSVCIntrin::_InterlockedAnd: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E); |
| case MSVCIntrin::_InterlockedExchange: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E); |
| case MSVCIntrin::_InterlockedExchangeAdd: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E); |
| case MSVCIntrin::_InterlockedExchangeSub: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E); |
| case MSVCIntrin::_InterlockedOr: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E); |
| case MSVCIntrin::_InterlockedXor: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E); |
| case MSVCIntrin::_InterlockedExchangeAdd_acq: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
| AtomicOrdering::Acquire); |
| case MSVCIntrin::_InterlockedExchangeAdd_rel: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
| AtomicOrdering::Release); |
| case MSVCIntrin::_InterlockedExchangeAdd_nf: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
| AtomicOrdering::Monotonic); |
| case MSVCIntrin::_InterlockedExchange_acq: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
| AtomicOrdering::Acquire); |
| case MSVCIntrin::_InterlockedExchange_rel: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
| AtomicOrdering::Release); |
| case MSVCIntrin::_InterlockedExchange_nf: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
| AtomicOrdering::Monotonic); |
| case MSVCIntrin::_InterlockedCompareExchange_acq: |
| return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire); |
| case MSVCIntrin::_InterlockedCompareExchange_rel: |
| return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release); |
| case MSVCIntrin::_InterlockedCompareExchange_nf: |
| return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic); |
| case MSVCIntrin::_InterlockedOr_acq: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
| AtomicOrdering::Acquire); |
| case MSVCIntrin::_InterlockedOr_rel: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
| AtomicOrdering::Release); |
| case MSVCIntrin::_InterlockedOr_nf: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
| AtomicOrdering::Monotonic); |
| case MSVCIntrin::_InterlockedXor_acq: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E, |
| AtomicOrdering::Acquire); |
| case MSVCIntrin::_InterlockedXor_rel: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E, |
| AtomicOrdering::Release); |
| case MSVCIntrin::_InterlockedXor_nf: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E, |
| AtomicOrdering::Monotonic); |
| case MSVCIntrin::_InterlockedAnd_acq: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
| AtomicOrdering::Acquire); |
| case MSVCIntrin::_InterlockedAnd_rel: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
| AtomicOrdering::Release); |
| case MSVCIntrin::_InterlockedAnd_nf: |
| return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
| AtomicOrdering::Monotonic); |
| case MSVCIntrin::_InterlockedIncrement_acq: |
| return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire); |
| case MSVCIntrin::_InterlockedIncrement_rel: |
| return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release); |
| case MSVCIntrin::_InterlockedIncrement_nf: |
| return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic); |
| case MSVCIntrin::_InterlockedDecrement_acq: |
| return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire); |
| case MSVCIntrin::_InterlockedDecrement_rel: |
| return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release); |
| case MSVCIntrin::_InterlockedDecrement_nf: |
| return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic); |
| |
| case MSVCIntrin::_InterlockedDecrement: |
| return EmitAtomicDecrementValue(*this, E); |
| case MSVCIntrin::_InterlockedIncrement: |
| return EmitAtomicIncrementValue(*this, E); |
| |
| case MSVCIntrin::__fastfail: { |
| // Request immediate process termination from the kernel. The instruction |
| // sequences to do this are documented on MSDN: |
| // https://msdn.microsoft.com/en-us/library/dn774154.aspx |
| llvm::Triple::ArchType ISA = getTarget().getTriple().getArch(); |
| StringRef Asm, Constraints; |
| switch (ISA) { |
| default: |
| ErrorUnsupported(E, "__fastfail call for this architecture"); |
| break; |
| case llvm::Triple::x86: |
| case llvm::Triple::x86_64: |
| Asm = "int $$0x29"; |
| Constraints = "{cx}"; |
| break; |
| case llvm::Triple::thumb: |
| Asm = "udf #251"; |
| Constraints = "{r0}"; |
| break; |
| case llvm::Triple::aarch64: |
| Asm = "brk #0xF003"; |
| Constraints = "{w0}"; |
| } |
| llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false); |
| llvm::InlineAsm *IA = |
| llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true); |
| llvm::AttributeList NoReturnAttr = llvm::AttributeList::get( |
| getLLVMContext(), llvm::AttributeList::FunctionIndex, |
| llvm::Attribute::NoReturn); |
| llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0))); |
| CI->setAttributes(NoReturnAttr); |
| return CI; |
| } |
| } |
| llvm_unreachable("Incorrect MSVC intrinsic!"); |
| } |
| |
| namespace { |
| // ARC cleanup for __builtin_os_log_format |
| struct CallObjCArcUse final : EHScopeStack::Cleanup { |
| CallObjCArcUse(llvm::Value *object) : object(object) {} |
| llvm::Value *object; |
| |
| void Emit(CodeGenFunction &CGF, Flags flags) override { |
| CGF.EmitARCIntrinsicUse(object); |
| } |
| }; |
| } |
| |
| Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E, |
| BuiltinCheckKind Kind) { |
| assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) |
| && "Unsupported builtin check kind"); |
| |
| Value *ArgValue = EmitScalarExpr(E); |
| if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef()) |
| return ArgValue; |
| |
| SanitizerScope SanScope(this); |
| Value *Cond = Builder.CreateICmpNE( |
| ArgValue, llvm::Constant::getNullValue(ArgValue->getType())); |
| EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin), |
| SanitizerHandler::InvalidBuiltin, |
| {EmitCheckSourceLocation(E->getExprLoc()), |
| llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)}, |
| None); |
| return ArgValue; |
| } |
| |
| /// Get the argument type for arguments to os_log_helper. |
| static CanQualType getOSLogArgType(ASTContext &C, int Size) { |
| QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false); |
| return C.getCanonicalType(UnsignedTy); |
| } |
| |
| llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction( |
| const analyze_os_log::OSLogBufferLayout &Layout, |
| CharUnits BufferAlignment) { |
| ASTContext &Ctx = getContext(); |
| |
| llvm::SmallString<64> Name; |
| { |
| raw_svector_ostream OS(Name); |
| OS << "__os_log_helper"; |
| OS << "_" << BufferAlignment.getQuantity(); |
| OS << "_" << int(Layout.getSummaryByte()); |
| OS << "_" << int(Layout.getNumArgsByte()); |
| for (const auto &Item : Layout.Items) |
| OS << "_" << int(Item.getSizeByte()) << "_" |
| << int(Item.getDescriptorByte()); |
| } |
| |
| if (llvm::Function *F = CGM.getModule().getFunction(Name)) |
| return F; |
| |
| llvm::SmallVector<QualType, 4> ArgTys; |
| FunctionArgList Args; |
| Args.push_back(ImplicitParamDecl::Create( |
| Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy, |
| ImplicitParamDecl::Other)); |
| ArgTys.emplace_back(Ctx.VoidPtrTy); |
| |
| for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) { |
| char Size = Layout.Items[I].getSizeByte(); |
| if (!Size) |
| continue; |
| |
| QualType ArgTy = getOSLogArgType(Ctx, Size); |
| Args.push_back(ImplicitParamDecl::Create( |
| Ctx, nullptr, SourceLocation(), |
| &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy, |
| ImplicitParamDecl::Other)); |
| ArgTys.emplace_back(ArgTy); |
| } |
| |
| QualType ReturnTy = Ctx.VoidTy; |
| QualType FuncionTy = Ctx.getFunctionType(ReturnTy, ArgTys, {}); |
| |
| // The helper function has linkonce_odr linkage to enable the linker to merge |
| // identical functions. To ensure the merging always happens, 'noinline' is |
| // attached to the function when compiling with -Oz. |
| const CGFunctionInfo &FI = |
| CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args); |
| llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI); |
| llvm::Function *Fn = llvm::Function::Create( |
| FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule()); |
| Fn->setVisibility(llvm::GlobalValue::HiddenVisibility); |
| CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn); |
| CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn); |
| Fn->setDoesNotThrow(); |
| |
| // Attach 'noinline' at -Oz. |
| if (CGM.getCodeGenOpts().OptimizeSize == 2) |
| Fn->addFnAttr(llvm::Attribute::NoInline); |
| |
| auto NL = ApplyDebugLocation::CreateEmpty(*this); |
| IdentifierInfo *II = &Ctx.Idents.get(Name); |
| FunctionDecl *FD = FunctionDecl::Create( |
| Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, |
| FuncionTy, nullptr, SC_PrivateExtern, false, false); |
| // Avoid generating debug location info for the function. |
| FD->setImplicit(); |
| |
| StartFunction(FD, ReturnTy, Fn, FI, Args); |
| |
| // Create a scope with an artificial location for the body of this function. |
| auto AL = ApplyDebugLocation::CreateArtificial(*this); |
| |
| CharUnits Offset; |
| Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), |
| BufferAlignment); |
| Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()), |
| Builder.CreateConstByteGEP(BufAddr, Offset++, "summary")); |
| Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()), |
| Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs")); |
| |
| unsigned I = 1; |
| for (const auto &Item : Layout.Items) { |
| Builder.CreateStore( |
| Builder.getInt8(Item.getDescriptorByte()), |
| Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor")); |
| Builder.CreateStore( |
| Builder.getInt8(Item.getSizeByte()), |
| Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize")); |
| |
| CharUnits Size = Item.size(); |
| if (!Size.getQuantity()) |
| continue; |
| |
| Address Arg = GetAddrOfLocalVar(Args[I]); |
| Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData"); |
| Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(), |
| "argDataCast"); |
| Builder.CreateStore(Builder.CreateLoad(Arg), Addr); |
| Offset += Size; |
| ++I; |
| } |
| |
| FinishFunction(); |
| |
| return Fn; |
| } |
| |
| RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) { |
| assert(E.getNumArgs() >= 2 && |
| "__builtin_os_log_format takes at least 2 arguments"); |
| ASTContext &Ctx = getContext(); |
| analyze_os_log::OSLogBufferLayout Layout; |
| analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout); |
| Address BufAddr = EmitPointerWithAlignment(E.getArg(0)); |
| llvm::SmallVector<llvm::Value *, 4> RetainableOperands; |
| |
| // Ignore argument 1, the format string. It is not currently used. |
| CallArgList Args; |
| Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy); |
| |
| for (const auto &Item : Layout.Items) { |
| int Size = Item.getSizeByte(); |
| if (!Size) |
| continue; |
| |
| llvm::Value *ArgVal; |
| |
| if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) { |
| uint64_t Val = 0; |
| for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I) |
| Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8; |
| ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val)); |
| } else if (const Expr *TheExpr = Item.getExpr()) { |
| ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false); |
| |
| // If a temporary object that requires destruction after the full |
| // expression is passed, push a lifetime-extended cleanup to extend its |
| // lifetime to the end of the enclosing block scope. |
| auto LifetimeExtendObject = [&](const Expr *E) { |
| E = E->IgnoreParenCasts(); |
| // Extend lifetimes of objects returned by function calls and message |
| // sends. |
| |
| // FIXME: We should do this in other cases in which temporaries are |
| // created including arguments of non-ARC types (e.g., C++ |
| // temporaries). |
| if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E)) |
| return true; |
| return false; |
| }; |
| |
| if (TheExpr->getType()->isObjCRetainableType() && |
| getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) { |
| assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar && |
| "Only scalar can be a ObjC retainable type"); |
| if (!isa<Constant>(ArgVal)) { |
| CleanupKind Cleanup = getARCCleanupKind(); |
| QualType Ty = TheExpr->getType(); |
| Address Alloca = Address::invalid(); |
| Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca); |
| ArgVal = EmitARCRetain(Ty, ArgVal); |
| Builder.CreateStore(ArgVal, Addr); |
| pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty, |
| CodeGenFunction::destroyARCStrongPrecise, |
| Cleanup & EHCleanup); |
| |
| // Push a clang.arc.use call to ensure ARC optimizer knows that the |
| // argument has to be alive. |
| if (CGM.getCodeGenOpts().OptimizationLevel != 0) |
| pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal); |
| } |
| } |
| } else { |
| ArgVal = Builder.getInt32(Item.getConstValue().getQuantity()); |
| } |
| |
| unsigned ArgValSize = |
| CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType()); |
| llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(), |
| ArgValSize); |
| ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy); |
| CanQualType ArgTy = getOSLogArgType(Ctx, Size); |
| // If ArgVal has type x86_fp80, zero-extend ArgVal. |
| ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy)); |
| Args.add(RValue::get(ArgVal), ArgTy); |
| } |
| |
| const CGFunctionInfo &FI = |
| CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args); |
| llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction( |
| Layout, BufAddr.getAlignment()); |
| EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args); |
| return RValue::get(BufAddr.getPointer()); |
| } |
| |
| /// Determine if a binop is a checked mixed-sign multiply we can specialize. |
| static bool isSpecialMixedSignMultiply(unsigned BuiltinID, |
| WidthAndSignedness Op1Info, |
| WidthAndSignedness Op2Info, |
| WidthAndSignedness ResultInfo) { |
| return BuiltinID == Builtin::BI__builtin_mul_overflow && |
| std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width && |
| Op1Info.Signed != Op2Info.Signed; |
| } |
| |
| /// Emit a checked mixed-sign multiply. This is a cheaper specialization of |
| /// the generic checked-binop irgen. |
| static RValue |
| EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, |
| WidthAndSignedness Op1Info, const clang::Expr *Op2, |
| WidthAndSignedness Op2Info, |
| const clang::Expr *ResultArg, QualType ResultQTy, |
| WidthAndSignedness ResultInfo) { |
| assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, |
| Op2Info, ResultInfo) && |
| "Not a mixed-sign multipliction we can specialize"); |
| |
| // Emit the signed and unsigned operands. |
| const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2; |
| const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1; |
| llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp); |
| llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp); |
| unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width; |
| unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width; |
| |
| // One of the operands may be smaller than the other. If so, [s|z]ext it. |
| if (SignedOpWidth < UnsignedOpWidth) |
| Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext"); |
| if (UnsignedOpWidth < SignedOpWidth) |
| Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext"); |
| |
| llvm::Type *OpTy = Signed->getType(); |
| llvm::Value *Zero = llvm::Constant::getNullValue(OpTy); |
| Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg); |
| llvm::Type *ResTy = ResultPtr.getElementType(); |
| unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width); |
| |
| // Take the absolute value of the signed operand. |
| llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero); |
| llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed); |
| llvm::Value *AbsSigned = |
| CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed); |
| |
| // Perform a checked unsigned multiplication. |
| llvm::Value *UnsignedOverflow; |
| llvm::Value *UnsignedResult = |
| EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned, |
| Unsigned, UnsignedOverflow); |
| |
| llvm::Value *Overflow, *Result; |
| if (ResultInfo.Signed) { |
| // Signed overflow occurs if the result is greater than INT_MAX or lesser |
| // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative). |
| auto IntMax = |
| llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth); |
| llvm::Value *MaxResult = |
| CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax), |
| CGF.Builder.CreateZExt(IsNegative, OpTy)); |
| llvm::Value *SignedOverflow = |
| CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult); |
| Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow); |
| |
| // Prepare the signed result (possibly by negating it). |
| llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult); |
| llvm::Value *SignedResult = |
| CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult); |
| Result = CGF.Builder.CreateTrunc(SignedResult, ResTy); |
| } else { |
| // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX. |
| llvm::Value *Underflow = CGF.Builder.CreateAnd( |
| IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult)); |
| Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow); |
| if (ResultInfo.Width < OpWidth) { |
| auto IntMax = |
| llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth); |
| llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT( |
| UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax)); |
| Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow); |
| } |
| |
| // Negate the product if it would be negative in infinite precision. |
| Result = CGF.Builder.CreateSelect( |
| IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult); |
| |
| Result = CGF.Builder.CreateTrunc(Result, ResTy); |
| } |
| assert(Overflow && Result && "Missing overflow or result"); |
| |
| bool isVolatile = |
| ResultArg->getType()->getPointeeType().isVolatileQualified(); |
| CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr, |
| isVolatile); |
| return RValue::get(Overflow); |
| } |
| |
| static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType, |
| Value *&RecordPtr, CharUnits Align, |
| llvm::FunctionCallee Func, int Lvl) { |
| ASTContext &Context = CGF.getContext(); |
| RecordDecl *RD = RType->castAs<RecordType>()->getDecl()->getDefinition(); |
| std::string Pad = std::string(Lvl * 4, ' '); |
| |
| Value *GString = |
| CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n"); |
| Value *Res = CGF.Builder.CreateCall(Func, {GString}); |
| |
| static llvm::DenseMap<QualType, const char *> Types; |
| if (Types.empty()) { |
| Types[Context.CharTy] = "%c"; |
| Types[Context.BoolTy] = "%d"; |
| Types[Context.SignedCharTy] = "%hhd"; |
| Types[Context.UnsignedCharTy] = "%hhu"; |
| Types[Context.IntTy] = "%d"; |
| Types[Context.UnsignedIntTy] = "%u"; |
| Types[Context.LongTy] = "%ld"; |
| Types[Context.UnsignedLongTy] = "%lu"; |
| Types[Context.LongLongTy] = "%lld"; |
| Types[Context.UnsignedLongLongTy] = "%llu"; |
| Types[Context.ShortTy] = "%hd"; |
| Types[Context.UnsignedShortTy] = "%hu"; |
| Types[Context.VoidPtrTy] = "%p"; |
| Types[Context.FloatTy] = "%f"; |
| Types[Context.DoubleTy] = "%f"; |
| Types[Context.LongDoubleTy] = "%Lf"; |
| Types[Context.getPointerType(Context.CharTy)] = "%s"; |
| Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s"; |
| } |
| |
| for (const auto *FD : RD->fields()) { |
| Value *FieldPtr = RecordPtr; |
| if (RD->isUnion()) |
| FieldPtr = CGF.Builder.CreatePointerCast( |
| FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType()))); |
| else |
| FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr, |
| FD->getFieldIndex()); |
| |
| GString = CGF.Builder.CreateGlobalStringPtr( |
| llvm::Twine(Pad) |
| .concat(FD->getType().getAsString()) |
| .concat(llvm::Twine(' ')) |
| .concat(FD->getNameAsString()) |
| .concat(" : ") |
| .str()); |
| Value *TmpRes = CGF.Builder.CreateCall(Func, {GString}); |
| Res = CGF.Builder.CreateAdd(Res, TmpRes); |
| |
| QualType CanonicalType = |
| FD->getType().getUnqualifiedType().getCanonicalType(); |
| |
| // We check whether we are in a recursive type |
| if (CanonicalType->isRecordType()) { |
| TmpRes = dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1); |
| Res = CGF.Builder.CreateAdd(TmpRes, Res); |
| continue; |
| } |
| |
| // We try to determine the best format to print the current field |
| llvm::Twine Format = Types.find(CanonicalType) == Types.end() |
| ? Types[Context.VoidPtrTy] |
| : Types[CanonicalType]; |
| |
| Address FieldAddress = Address(FieldPtr, Align); |
| FieldPtr = CGF.Builder.CreateLoad(FieldAddress); |
| |
| // FIXME Need to handle bitfield here |
| GString = CGF.Builder.CreateGlobalStringPtr( |
| Format.concat(llvm::Twine('\n')).str()); |
| TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr}); |
| Res = CGF.Builder.CreateAdd(Res, TmpRes); |
| } |
| |
| GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n"); |
| Value *TmpRes = CGF.Builder.CreateCall(Func, {GString}); |
| Res = CGF.Builder.CreateAdd(Res, TmpRes); |
| return Res; |
| } |
| |
| static bool |
| TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty, |
| llvm::SmallPtrSetImpl<const Decl *> &Seen) { |
| if (const auto *Arr = Ctx.getAsArrayType(Ty)) |
| Ty = Ctx.getBaseElementType(Arr); |
| |
| const auto *Record = Ty->getAsCXXRecordDecl(); |
| if (!Record) |
| return false; |
| |
| // We've already checked this type, or are in the process of checking it. |
| if (!Seen.insert(Record).second) |
| return false; |
| |
| assert(Record->hasDefinition() && |
| "Incomplete types should already be diagnosed"); |
| |
| if (Record->isDynamicClass()) |
| return true; |
| |
| for (FieldDecl *F : Record->fields()) { |
| if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen)) |
| return true; |
| } |
| return false; |
| } |
| |
| /// Determine if the specified type requires laundering by checking if it is a |
| /// dynamic class type or contains a subobject which is a dynamic class type. |
| static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) { |
| if (!CGM.getCodeGenOpts().StrictVTablePointers) |
| return false; |
| llvm::SmallPtrSet<const Decl *, 16> Seen; |
| return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen); |
| } |
| |
| RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) { |
| llvm::Value *Src = EmitScalarExpr(E->getArg(0)); |
| llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1)); |
| |
| // The builtin's shift arg may have a different type than the source arg and |
| // result, but the LLVM intrinsic uses the same type for all values. |
| llvm::Type *Ty = Src->getType(); |
| ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false); |
| |
| // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same. |
| unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl; |
| Function *F = CGM.getIntrinsic(IID, Ty); |
| return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt })); |
| } |
| |
| RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, |
| const CallExpr *E, |
| ReturnValueSlot ReturnValue) { |
| const FunctionDecl *FD = GD.getDecl()->getAsFunction(); |
| // See if we can constant fold this builtin. If so, don't emit it at all. |
| Expr::EvalResult Result; |
| if (E->EvaluateAsRValue(Result, CGM.getContext()) && |
| !Result.hasSideEffects()) { |
| if (Result.Val.isInt()) |
| return RValue::get(llvm::ConstantInt::get(getLLVMContext(), |
| Result.Val.getInt())); |
| if (Result.Val.isFloat()) |
| return RValue::get(llvm::ConstantFP::get(getLLVMContext(), |
| Result.Val.getFloat())); |
| } |
| |
| // There are LLVM math intrinsics/instructions corresponding to math library |
| // functions except the LLVM op will never set errno while the math library |
| // might. Also, math builtins have the same semantics as their math library |
| // twins. Thus, we can transform math library and builtin calls to their |
| // LLVM counterparts if the call is marked 'const' (known to never set errno). |
| if (FD->hasAttr<ConstAttr>()) { |
| switch (BuiltinID) { |
| case Builtin::BIceil: |
| case Builtin::BIceilf: |
| case Builtin::BIceill: |
| case Builtin::BI__builtin_ceil: |
| case Builtin::BI__builtin_ceilf: |
| case Builtin::BI__builtin_ceilf16: |
| case Builtin::BI__builtin_ceill: |
| return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::ceil, |
| Intrinsic::experimental_constrained_ceil)); |
| |
| case Builtin::BIcopysign: |
| case Builtin::BIcopysignf: |
| case Builtin::BIcopysignl: |
| case Builtin::BI__builtin_copysign: |
| case Builtin::BI__builtin_copysignf: |
| case Builtin::BI__builtin_copysignf16: |
| case Builtin::BI__builtin_copysignl: |
| case Builtin::BI__builtin_copysignf128: |
| return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign)); |
| |
| case Builtin::BIcos: |
| case Builtin::BIcosf: |
| case Builtin::BIcosl: |
| case Builtin::BI__builtin_cos: |
| case Builtin::BI__builtin_cosf: |
| case Builtin::BI__builtin_cosf16: |
| case Builtin::BI__builtin_cosl: |
| return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::cos, |
| Intrinsic::experimental_constrained_cos)); |
| |
| case Builtin::BIexp: |
| case Builtin::BIexpf: |
| case Builtin::BIexpl: |
| case Builtin::BI__builtin_exp: |
| case Builtin::BI__builtin_expf: |
| case Builtin::BI__builtin_expf16: |
| case Builtin::BI__builtin_expl: |
| return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::exp, |
| Intrinsic::experimental_constrained_exp)); |
| |
| case Builtin::BIexp2: |
| case Builtin::BIexp2f: |
| case Builtin::BIexp2l: |
| case Builtin::BI__builtin_exp2: |
| case Builtin::BI__builtin_exp2f: |
| case Builtin::BI__builtin_exp2f16: |
| case Builtin::BI__builtin_exp2l: |
| return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::exp2, |
| Intrinsic::experimental_constrained_exp2)); |
| |
| case Builtin::BIfabs: |
| case Builtin::BIfabsf: |
| case Builtin::BIfabsl: |
| case Builtin::BI__builtin_fabs: |
| case Builtin::BI__builtin_fabsf: |
| case Builtin::BI__builtin_fabsf16: |
| case Builtin::BI__builtin_fabsl: |
| case Builtin::BI__builtin_fabsf128: |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs)); |
| |
| case Builtin::BIfloor: |
| case Builtin::BIfloorf: |
| case Builtin::BIfloorl: |
| case Builtin::BI__builtin_floor: |
| case Builtin::BI__builtin_floorf: |
| case Builtin::BI__builtin_floorf16: |
| case Builtin::BI__builtin_floorl: |
| return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::floor, |
| Intrinsic::experimental_constrained_floor)); |
| |
| case Builtin::BIfma: |
| case Builtin::BIfmaf: |
| case Builtin::BIfmal: |
| case Builtin::BI__builtin_fma: |
| case Builtin::BI__builtin_fmaf: |
| case Builtin::BI__builtin_fmaf16: |
| case Builtin::BI__builtin_fmal: |
| return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::fma, |
| Intrinsic::experimental_constrained_fma)); |
| |
| case Builtin::BIfmax: |
| case Builtin::BIfmaxf: |
| case Builtin::BIfmaxl: |
| case Builtin::BI__builtin_fmax: |
| case Builtin::BI__builtin_fmaxf: |
| case Builtin::BI__builtin_fmaxf16: |
| case Builtin::BI__builtin_fmaxl: |
| return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::maxnum, |
| Intrinsic::experimental_constrained_maxnum)); |
| |
| case Builtin::BIfmin: |
| case Builtin::BIfminf: |
| case Builtin::BIfminl: |
| case Builtin::BI__builtin_fmin: |
| case Builtin::BI__builtin_fminf: |
| case Builtin::BI__builtin_fminf16: |
| case Builtin::BI__builtin_fminl: |
| return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::minnum, |
| Intrinsic::experimental_constrained_minnum)); |
| |
| // fmod() is a special-case. It maps to the frem instruction rather than an |
| // LLVM intrinsic. |
| case Builtin::BIfmod: |
| case Builtin::BIfmodf: |
| case Builtin::BIfmodl: |
| case Builtin::BI__builtin_fmod: |
| case Builtin::BI__builtin_fmodf: |
| case Builtin::BI__builtin_fmodf16: |
| case Builtin::BI__builtin_fmodl: { |
| Value *Arg1 = EmitScalarExpr(E->getArg(0)); |
| Value *Arg2 = EmitScalarExpr(E->getArg(1)); |
| return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod")); |
| } |
| |
| case Builtin::BIlog: |
| case Builtin::BIlogf: |
| case Builtin::BIlogl: |
| case Builtin::BI__builtin_log: |
| case Builtin::BI__builtin_logf: |
| case Builtin::BI__builtin_logf16: |
| case Builtin::BI__builtin_logl: |
| return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::log, |
| Intrinsic::experimental_constrained_log)); |
| |
| case Builtin::BIlog10: |
| case Builtin::BIlog10f: |
| case Builtin::BIlog10l: |
| case Builtin::BI__builtin_log10: |
| case Builtin::BI__builtin_log10f: |
| case Builtin::BI__builtin_log10f16: |
| case Builtin::BI__builtin_log10l: |
| return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::log10, |
| Intrinsic::experimental_constrained_log10)); |
| |
| case Builtin::BIlog2: |
| case Builtin::BIlog2f: |
| case Builtin::BIlog2l: |
| case Builtin::BI__builtin_log2: |
| case Builtin::BI__builtin_log2f: |
| case Builtin::BI__builtin_log2f16: |
| case Builtin::BI__builtin_log2l: |
| return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::log2, |
| Intrinsic::experimental_constrained_log2)); |
| |
| case Builtin::BInearbyint: |
| case Builtin::BInearbyintf: |
| case Builtin::BInearbyintl: |
| case Builtin::BI__builtin_nearbyint: |
| case Builtin::BI__builtin_nearbyintf: |
| case Builtin::BI__builtin_nearbyintl: |
| return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::nearbyint, |
| Intrinsic::experimental_constrained_nearbyint)); |
| |
| case Builtin::BIpow: |
| case Builtin::BIpowf: |
| case Builtin::BIpowl: |
| case Builtin::BI__builtin_pow: |
| case Builtin::BI__builtin_powf: |
| case Builtin::BI__builtin_powf16: |
| case Builtin::BI__builtin_powl: |
| return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::pow, |
| Intrinsic::experimental_constrained_pow)); |
| |
| case Builtin::BIrint: |
| case Builtin::BIrintf: |
| case Builtin::BIrintl: |
| case Builtin::BI__builtin_rint: |
| case Builtin::BI__builtin_rintf: |
| case Builtin::BI__builtin_rintf16: |
| case Builtin::BI__builtin_rintl: |
| return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::rint, |
| Intrinsic::experimental_constrained_rint)); |
| |
| case Builtin::BIround: |
| case Builtin::BIroundf: |
| case Builtin::BIroundl: |
| case Builtin::BI__builtin_round: |
| case Builtin::BI__builtin_roundf: |
| case Builtin::BI__builtin_roundf16: |
| case Builtin::BI__builtin_roundl: |
| return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::round, |
| Intrinsic::experimental_constrained_round)); |
| |
| case Builtin::BIsin: |
| case Builtin::BIsinf: |
| case Builtin::BIsinl: |
| case Builtin::BI__builtin_sin: |
| case Builtin::BI__builtin_sinf: |
| case Builtin::BI__builtin_sinf16: |
| case Builtin::BI__builtin_sinl: |
| return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::sin, |
| Intrinsic::experimental_constrained_sin)); |
| |
| case Builtin::BIsqrt: |
| case Builtin::BIsqrtf: |
| case Builtin::BIsqrtl: |
| case Builtin::BI__builtin_sqrt: |
| case Builtin::BI__builtin_sqrtf: |
| case Builtin::BI__builtin_sqrtf16: |
| case Builtin::BI__builtin_sqrtl: |
| return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::sqrt, |
| Intrinsic::experimental_constrained_sqrt)); |
| |
| case Builtin::BItrunc: |
| case Builtin::BItruncf: |
| case Builtin::BItruncl: |
| case Builtin::BI__builtin_trunc: |
| case Builtin::BI__builtin_truncf: |
| case Builtin::BI__builtin_truncf16: |
| case Builtin::BI__builtin_truncl: |
| return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
| Intrinsic::trunc, |
| Intrinsic::experimental_constrained_trunc)); |
| |
| case Builtin::BIlround: |
| case Builtin::BIlroundf: |
| case Builtin::BIlroundl: |
| case Builtin::BI__builtin_lround: |
| case Builtin::BI__builtin_lroundf: |
| case Builtin::BI__builtin_lroundl: |
| return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( |
| *this, E, Intrinsic::lround, |
| Intrinsic::experimental_constrained_lround)); |
| |
| case Builtin::BIllround: |
| case Builtin::BIllroundf: |
| case Builtin::BIllroundl: |
| case Builtin::BI__builtin_llround: |
| case Builtin::BI__builtin_llroundf: |
| case Builtin::BI__builtin_llroundl: |
| return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( |
| *this, E, Intrinsic::llround, |
| Intrinsic::experimental_constrained_llround)); |
| |
| case Builtin::BIlrint: |
| case Builtin::BIlrintf: |
| case Builtin::BIlrintl: |
| case Builtin::BI__builtin_lrint: |
| case Builtin::BI__builtin_lrintf: |
| case Builtin::BI__builtin_lrintl: |
| return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( |
| *this, E, Intrinsic::lrint, |
| Intrinsic::experimental_constrained_lrint)); |
| |
| case Builtin::BIllrint: |
| case Builtin::BIllrintf: |
| case Builtin::BIllrintl: |
| case Builtin::BI__builtin_llrint: |
| case Builtin::BI__builtin_llrintf: |
| case Builtin::BI__builtin_llrintl: |
| return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( |
| *this, E, Intrinsic::llrint, |
| Intrinsic::experimental_constrained_llrint)); |
| |
| default: |
| break; |
| } |
| } |
| |
| switch (BuiltinID) { |
| default: break; |
| case Builtin::BI__builtin___CFStringMakeConstantString: |
| case Builtin::BI__builtin___NSStringMakeConstantString: |
| return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType())); |
| case Builtin::BI__builtin_stdarg_start: |
| case Builtin::BI__builtin_va_start: |
| case Builtin::BI__va_start: |
| case Builtin::BI__builtin_va_end: |
| return RValue::get( |
| EmitVAStartEnd(BuiltinID == Builtin::BI__va_start |
| ? EmitScalarExpr(E->getArg(0)) |
| : EmitVAListRef(E->getArg(0)).getPointer(), |
| BuiltinID != Builtin::BI__builtin_va_end)); |
| case Builtin::BI__builtin_va_copy: { |
| Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer(); |
| Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer(); |
| |
| llvm::Type *Type = Int8PtrTy; |
| |
| DstPtr = Builder.CreateBitCast(DstPtr, Type); |
| SrcPtr = Builder.CreateBitCast(SrcPtr, Type); |
| return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy), |
| {DstPtr, SrcPtr})); |
| } |
| case Builtin::BI__builtin_abs: |
| case Builtin::BI__builtin_labs: |
| case Builtin::BI__builtin_llabs: { |
| // X < 0 ? -X : X |
| // The negation has 'nsw' because abs of INT_MIN is undefined. |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg"); |
| Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType()); |
| Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond"); |
| Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_complex: { |
| Value *Real = EmitScalarExpr(E->getArg(0)); |
| Value *Imag = EmitScalarExpr(E->getArg(1)); |
| return RValue::getComplex({Real, Imag}); |
| } |
| case Builtin::BI__builtin_conj: |
| case Builtin::BI__builtin_conjf: |
| case Builtin::BI__builtin_conjl: |
| case Builtin::BIconj: |
| case Builtin::BIconjf: |
| case Builtin::BIconjl: { |
| ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
| Value *Real = ComplexVal.first; |
| Value *Imag = ComplexVal.second; |
| Imag = Builder.CreateFNeg(Imag, "neg"); |
| return RValue::getComplex(std::make_pair(Real, Imag)); |
| } |
| case Builtin::BI__builtin_creal: |
| case Builtin::BI__builtin_crealf: |
| case Builtin::BI__builtin_creall: |
| case Builtin::BIcreal: |
| case Builtin::BIcrealf: |
| case Builtin::BIcreall: { |
| ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
| return RValue::get(ComplexVal.first); |
| } |
| |
| case Builtin::BI__builtin_dump_struct: { |
| llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy); |
| llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get( |
| LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true); |
| |
| Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts()); |
| CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment(); |
| |
| const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts(); |
| QualType Arg0Type = Arg0->getType()->getPointeeType(); |
| |
| Value *RecordPtr = EmitScalarExpr(Arg0); |
| Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align, |
| {LLVMFuncType, Func}, 0); |
| return RValue::get(Res); |
| } |
| |
| case Builtin::BI__builtin_preserve_access_index: { |
| // Only enabled preserved access index region when debuginfo |
| // is available as debuginfo is needed to preserve user-level |
| // access pattern. |
| if (!getDebugInfo()) { |
| CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g"); |
| return RValue::get(EmitScalarExpr(E->getArg(0))); |
| } |
| |
| // Nested builtin_preserve_access_index() not supported |
| if (IsInPreservedAIRegion) { |
| CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported"); |
| return RValue::get(EmitScalarExpr(E->getArg(0))); |
| } |
| |
| IsInPreservedAIRegion = true; |
| Value *Res = EmitScalarExpr(E->getArg(0)); |
| IsInPreservedAIRegion = false; |
| return RValue::get(Res); |
| } |
| |
| case Builtin::BI__builtin_cimag: |
| case Builtin::BI__builtin_cimagf: |
| case Builtin::BI__builtin_cimagl: |
| case Builtin::BIcimag: |
| case Builtin::BIcimagf: |
| case Builtin::BIcimagl: { |
| ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
| return RValue::get(ComplexVal.second); |
| } |
| |
| case Builtin::BI__builtin_clrsb: |
| case Builtin::BI__builtin_clrsbl: |
| case Builtin::BI__builtin_clrsbll: { |
| // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
| |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| Value *Zero = llvm::Constant::getNullValue(ArgType); |
| Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg"); |
| Value *Inverse = Builder.CreateNot(ArgValue, "not"); |
| Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue); |
| Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()}); |
| Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1)); |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
| "cast"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_ctzs: |
| case Builtin::BI__builtin_ctz: |
| case Builtin::BI__builtin_ctzl: |
| case Builtin::BI__builtin_ctzll: { |
| Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
| |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); |
| Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); |
| if (Result->getType() != ResultType) |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
| "cast"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_clzs: |
| case Builtin::BI__builtin_clz: |
| case Builtin::BI__builtin_clzl: |
| case Builtin::BI__builtin_clzll: { |
| Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
| |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); |
| Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); |
| if (Result->getType() != ResultType) |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
| "cast"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_ffs: |
| case Builtin::BI__builtin_ffsl: |
| case Builtin::BI__builtin_ffsll: { |
| // ffs(x) -> x ? cttz(x) + 1 : 0 |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
| |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| Value *Tmp = |
| Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}), |
| llvm::ConstantInt::get(ArgType, 1)); |
| Value *Zero = llvm::Constant::getNullValue(ArgType); |
| Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); |
| Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); |
| if (Result->getType() != ResultType) |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
| "cast"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_parity: |
| case Builtin::BI__builtin_parityl: |
| case Builtin::BI__builtin_parityll: { |
| // parity(x) -> ctpop(x) & 1 |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); |
| |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| Value *Tmp = Builder.CreateCall(F, ArgValue); |
| Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1)); |
| if (Result->getType() != ResultType) |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
| "cast"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__lzcnt16: |
| case Builtin::BI__lzcnt: |
| case Builtin::BI__lzcnt64: { |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
| |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()}); |
| if (Result->getType() != ResultType) |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
| "cast"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__popcnt16: |
| case Builtin::BI__popcnt: |
| case Builtin::BI__popcnt64: |
| case Builtin::BI__builtin_popcount: |
| case Builtin::BI__builtin_popcountl: |
| case Builtin::BI__builtin_popcountll: { |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| |
| llvm::Type *ArgType = ArgValue->getType(); |
| Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); |
| |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| Value *Result = Builder.CreateCall(F, ArgValue); |
| if (Result->getType() != ResultType) |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
| "cast"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_unpredictable: { |
| // Always return the argument of __builtin_unpredictable. LLVM does not |
| // handle this builtin. Metadata for this builtin should be added directly |
| // to instructions such as branches or switches that use it. |
| return RValue::get(EmitScalarExpr(E->getArg(0))); |
| } |
| case Builtin::BI__builtin_expect: { |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| llvm::Type *ArgType = ArgValue->getType(); |
| |
| Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); |
| // Don't generate llvm.expect on -O0 as the backend won't use it for |
| // anything. |
| // Note, we still IRGen ExpectedValue because it could have side-effects. |
| if (CGM.getCodeGenOpts().OptimizationLevel == 0) |
| return RValue::get(ArgValue); |
| |
| Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType); |
| Value *Result = |
| Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_expect_with_probability: { |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| llvm::Type *ArgType = ArgValue->getType(); |
| |
| Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); |
| llvm::APFloat Probability(0.0); |
| const Expr *ProbArg = E->getArg(2); |
| bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext()); |
| assert(EvalSucceed && "probability should be able to evaluate as float"); |
| (void)EvalSucceed; |
| bool LoseInfo = false; |
| Probability.convert(llvm::APFloat::IEEEdouble(), |
| llvm::RoundingMode::Dynamic, &LoseInfo); |
| llvm::Type *Ty = ConvertType(ProbArg->getType()); |
| Constant *Confidence = ConstantFP::get(Ty, Probability); |
| // Don't generate llvm.expect.with.probability on -O0 as the backend |
| // won't use it for anything. |
| // Note, we still IRGen ExpectedValue because it could have side-effects. |
| if (CGM.getCodeGenOpts().OptimizationLevel == 0) |
| return RValue::get(ArgValue); |
| |
| Function *FnExpect = |
| CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType); |
| Value *Result = Builder.CreateCall( |
| FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval"); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_assume_aligned: { |
| const Expr *Ptr = E->getArg(0); |
| Value *PtrValue = EmitScalarExpr(Ptr); |
| Value *OffsetValue = |
| (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr; |
| |
| Value *AlignmentValue = EmitScalarExpr(E->getArg(1)); |
| ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue); |
| if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment)) |
| AlignmentCI = ConstantInt::get(AlignmentCI->getType(), |
| llvm::Value::MaximumAlignment); |
| |
| emitAlignmentAssumption(PtrValue, Ptr, |
| /*The expr loc is sufficient.*/ SourceLocation(), |
| AlignmentCI, OffsetValue); |
| return RValue::get(PtrValue); |
| } |
| case Builtin::BI__assume: |
| case Builtin::BI__builtin_assume: { |
| if (E->getArg(0)->HasSideEffects(getContext())) |
| return RValue::get(nullptr); |
| |
| Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
| Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume); |
| return RValue::get(Builder.CreateCall(FnAssume, ArgValue)); |
| } |
| case Builtin::BI__builtin_bswap16: |
| case Builtin::BI__builtin_bswap32: |
| case Builtin::BI__builtin_bswap64: { |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap)); |
| } |
| case Builtin::BI__builtin_bitreverse8: |
| case Builtin::BI__builtin_bitreverse16: |
| case Builtin::BI__builtin_bitreverse32: |
| case Builtin::BI__builtin_bitreverse64: { |
| return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse)); |
| } |
| case Builtin::BI__builtin_rotateleft8: |
| case Builtin::BI__builtin_rotateleft16: |
| case Builtin::BI__builtin_rotateleft32: |
| case Builtin::BI__builtin_rotateleft64: |
| case Builtin::BI_rotl8: // Microsoft variants of rotate left |
| case Builtin::BI_rotl16: |
| case Builtin::BI_rotl: |
| case Builtin::BI_lrotl: |
| case Builtin::BI_rotl64: |
| return emitRotate(E, false); |
| |
| case Builtin::BI__builtin_rotateright8: |
| case Builtin::BI__builtin_rotateright16: |
| case Builtin::BI__builtin_rotateright32: |
| case Builtin::BI__builtin_rotateright64: |
| case Builtin::BI_rotr8: // Microsoft variants of rotate right |
| case Builtin::BI_rotr16: |
| case Builtin::BI_rotr: |
| case Builtin::BI_lrotr: |
| case Builtin::BI_rotr64: |
| return emitRotate(E, true); |
| |
| case Builtin::BI__builtin_constant_p: { |
| llvm::Type *ResultType = ConvertType(E->getType()); |
| |
| const Expr *Arg = E->getArg(0); |
| QualType ArgType = Arg->getType(); |
| // FIXME: The allowance for Obj-C pointers and block pointers is historical |
| // and likely a mistake. |
| if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() && |
| !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType()) |
| // Per the GCC documentation, only numeric constants are recognized after |
| // inlining. |
| return RValue::get(ConstantInt::get(ResultType, 0)); |
| |
| if (Arg->HasSideEffects(getContext())) |
| // The argument is unevaluated, so be conservative if it might have |
| // side-effects. |
| return RValue::get(ConstantInt::get(ResultType, 0)); |
| |
| Value *ArgValue = EmitScalarExpr(Arg); |
| if (ArgType->isObjCObjectPointerType()) { |
| // Convert Objective-C objects to id because we cannot distinguish between |
| // LLVM types for Obj-C classes as they are opaque. |
| ArgType = CGM.getContext().getObjCIdType(); |
| ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType)); |
| } |
| Function *F = |
| CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType)); |
| Value *Result = Builder.CreateCall(F, ArgValue); |
| if (Result->getType() != ResultType) |
| Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false); |
| return RValue::get(Result); |
| } |
| case Builtin::BI__builtin_dynamic_object_size: |
| case Builtin::BI__builtin_object_size: { |
| unsigned Type = |
| E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue(); |
| auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType())); |
| |
| // We pass this builtin onto the optimizer so that it can figure out the |
| // object size in more complex cases. |
| bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size; |
| return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType, |
| /*EmittedE=*/nullptr, IsDynamic)); |
| } |
| case Builtin::BI__builtin_prefetch: { |
| Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); |
| // FIXME: Technically these constants should of type 'int', yes? |
| RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : |
| llvm::ConstantInt::get(Int32Ty, 0); |
| Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : |
| llvm::ConstantInt::get(Int32Ty, 3); |
| Value *Data = llvm::ConstantInt::get(Int32Ty, 1); |
| Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); |
| return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data})); |
| } |
| case Builtin::BI__builtin_readcyclecounter: { |
| Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter); |
| return RValue::get(Builder.CreateCall(F)); |
| } |
| case Builtin::BI__builtin___clear_cache: { |
| Value *Begin = EmitScalarExpr(E->getArg(0)); |
| Value *End = EmitScalarExpr(E->getArg(1)); |
| Function *F = CGM.getIntrinsic(Intrinsic::clear_cache); |
| return RValue::get(Builder.CreateCall(F, {Begin, End})); |
| } |
| case Builtin::BI__builtin_trap: |
| return RValue::get(EmitTrapCall(Intrinsic::trap)); |
| case Builtin::BI__debugbreak: |
| return RValue::get(EmitTrapCall(Intrinsic::debugtrap)); |
| case Builtin::BI__builtin_unreachable: { |
| EmitUnreachable(E->getExprLoc()); |
| |
| // We do need to preserve an insertion point. |
| EmitBlock(createBasicBlock("unreachable.cont")); |
| |
| return RValue::get(nullptr); |
| } |
| |
| case Builtin::BI__builtin_powi: |
| case Builtin::BI__builtin_powif: |
| case Builtin::BI__builtin_powil: |
| return RValue::get(emitBinaryMaybeConstrainedFPBuiltin( |
| *this, E, Intrinsic::powi, Intrinsic::experimental_constrained_powi)); |
| |
| case Builtin::BI__builtin_isgreater: |
| case Builtin::BI__builtin_isgreaterequal: |
| case Builtin::BI__builtin_isless: |
| case Builtin::BI__builtin_islessequal: |
| case Builtin::BI__builtin_islessgreater: |
| case Builtin::BI__builtin_isunordered: { |
| // Ordered comparisons: we know the arguments to these are matching scalar |
| // floating point values. |
| Value *LHS = EmitScalarExpr(E->getArg(0)); |
| Value *RHS = EmitScalarExpr(E->getArg(1)); |
| |
| switch (BuiltinID) { |
| default: llvm_unreachable("Unknown ordered comparison"); |
| case Builtin::BI__builtin_isgreater: |
| LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); |
| break; |
| case Builtin::BI__builtin_isgreaterequal: |
| LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp"); |
| break; |
| case Builtin::BI__builtin_isless: |
| LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp"); |
| break; |
| case Builtin::BI__builtin_islessequal: |
| LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp"); |
| break; |
| case Builtin::BI__builtin_islessgreater: |
| LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp"); |
| break; |
| case Builtin::BI__builtin_isunordered: |
| LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp"); |
| break; |
| } |
| // ZExt bool to int type. |
| return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()))); |
| } |
| case Builtin::BI__builtin_isnan: { |
| Value *V = EmitScalarExpr(E->getArg(0)); |
| V = Builder.CreateFCmpUNO(V, V, "cmp"); |
| return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); |
| } |
| |
| case Builtin::BI__builtin_matrix_transpose: { |
| const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>(); |
| Value *MatValue = EmitScalarExpr(E->getArg(0)); |
| MatrixBuilder<CGBuilderTy> MB(Builder); |
| Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(), |
| MatrixTy->getNumColumns()); |
| return RValue::get(Result); |
| } |
| |
| case Builtin::BI__builtin_matrix_column_major_load: { |
| MatrixBuilder<CGBuilderTy> MB(Builder); |
| // Emit everything that isn't dependent on the first parameter type |
| Value *Stride = EmitScalarExpr(E->getArg(3)); |
|