| //===--- SemaChecking.cpp - Extra Semantic Checking -----------------------===// |
| // |
| // The LLVM Compiler Infrastructure |
| // |
| // This file is distributed under the University of Illinois Open Source |
| // License. See LICENSE.TXT for details. |
| // |
| //===----------------------------------------------------------------------===// |
| // |
| // This file implements extra semantic analysis beyond what is enforced |
| // by the C type system. |
| // |
| //===----------------------------------------------------------------------===// |
| |
| #include "clang/Sema/SemaInternal.h" |
| #include "clang/AST/ASTContext.h" |
| #include "clang/AST/CharUnits.h" |
| #include "clang/AST/DeclCXX.h" |
| #include "clang/AST/DeclObjC.h" |
| #include "clang/AST/EvaluatedExprVisitor.h" |
| #include "clang/AST/Expr.h" |
| #include "clang/AST/ExprCXX.h" |
| #include "clang/AST/ExprObjC.h" |
| #include "clang/AST/ExprOpenMP.h" |
| #include "clang/AST/StmtCXX.h" |
| #include "clang/AST/StmtObjC.h" |
| #include "clang/Analysis/Analyses/FormatString.h" |
| #include "clang/Basic/CharInfo.h" |
| #include "clang/Basic/TargetBuiltins.h" |
| #include "clang/Basic/TargetInfo.h" |
| #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. |
| #include "clang/Sema/Initialization.h" |
| #include "clang/Sema/Lookup.h" |
| #include "clang/Sema/ScopeInfo.h" |
| #include "clang/Sema/Sema.h" |
| #include "llvm/ADT/STLExtras.h" |
| #include "llvm/ADT/SmallBitVector.h" |
| #include "llvm/ADT/SmallString.h" |
| #include "llvm/Support/ConvertUTF.h" |
| #include "llvm/Support/raw_ostream.h" |
| #include <limits> |
| using namespace clang; |
| using namespace sema; |
| |
| SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, |
| unsigned ByteNo) const { |
| return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, |
| Context.getTargetInfo()); |
| } |
| |
| /// Checks that a call expression's argument count is the desired number. |
| /// This is useful when doing custom type-checking. Returns true on error. |
| static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { |
| unsigned argCount = call->getNumArgs(); |
| if (argCount == desiredArgCount) return false; |
| |
| if (argCount < desiredArgCount) |
| return S.Diag(call->getLocEnd(), diag::err_typecheck_call_too_few_args) |
| << 0 /*function call*/ << desiredArgCount << argCount |
| << call->getSourceRange(); |
| |
| // Highlight all the excess arguments. |
| SourceRange range(call->getArg(desiredArgCount)->getLocStart(), |
| call->getArg(argCount - 1)->getLocEnd()); |
| |
| return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) |
| << 0 /*function call*/ << desiredArgCount << argCount |
| << call->getArg(1)->getSourceRange(); |
| } |
| |
| /// Check that the first argument to __builtin_annotation is an integer |
| /// and the second argument is a non-wide string literal. |
| static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { |
| if (checkArgCount(S, TheCall, 2)) |
| return true; |
| |
| // First argument should be an integer. |
| Expr *ValArg = TheCall->getArg(0); |
| QualType Ty = ValArg->getType(); |
| if (!Ty->isIntegerType()) { |
| S.Diag(ValArg->getLocStart(), diag::err_builtin_annotation_first_arg) |
| << ValArg->getSourceRange(); |
| return true; |
| } |
| |
| // Second argument should be a constant string. |
| Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); |
| StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); |
| if (!Literal || !Literal->isAscii()) { |
| S.Diag(StrArg->getLocStart(), diag::err_builtin_annotation_second_arg) |
| << StrArg->getSourceRange(); |
| return true; |
| } |
| |
| TheCall->setType(Ty); |
| return false; |
| } |
| |
| /// Check that the argument to __builtin_addressof is a glvalue, and set the |
| /// result type to the corresponding pointer type. |
| static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { |
| if (checkArgCount(S, TheCall, 1)) |
| return true; |
| |
| ExprResult Arg(TheCall->getArg(0)); |
| QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getLocStart()); |
| if (ResultType.isNull()) |
| return true; |
| |
| TheCall->setArg(0, Arg.get()); |
| TheCall->setType(ResultType); |
| return false; |
| } |
| |
| static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) { |
| if (checkArgCount(S, TheCall, 3)) |
| return true; |
| |
| // First two arguments should be integers. |
| for (unsigned I = 0; I < 2; ++I) { |
| Expr *Arg = TheCall->getArg(I); |
| QualType Ty = Arg->getType(); |
| if (!Ty->isIntegerType()) { |
| S.Diag(Arg->getLocStart(), diag::err_overflow_builtin_must_be_int) |
| << Ty << Arg->getSourceRange(); |
| return true; |
| } |
| } |
| |
| // Third argument should be a pointer to a non-const integer. |
| // IRGen correctly handles volatile, restrict, and address spaces, and |
| // the other qualifiers aren't possible. |
| { |
| Expr *Arg = TheCall->getArg(2); |
| QualType Ty = Arg->getType(); |
| const auto *PtrTy = Ty->getAs<PointerType>(); |
| if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() && |
| !PtrTy->getPointeeType().isConstQualified())) { |
| S.Diag(Arg->getLocStart(), diag::err_overflow_builtin_must_be_ptr_int) |
| << Ty << Arg->getSourceRange(); |
| return true; |
| } |
| } |
| |
| return false; |
| } |
| |
| static void SemaBuiltinMemChkCall(Sema &S, FunctionDecl *FDecl, |
| CallExpr *TheCall, unsigned SizeIdx, |
| unsigned DstSizeIdx) { |
| if (TheCall->getNumArgs() <= SizeIdx || |
| TheCall->getNumArgs() <= DstSizeIdx) |
| return; |
| |
| const Expr *SizeArg = TheCall->getArg(SizeIdx); |
| const Expr *DstSizeArg = TheCall->getArg(DstSizeIdx); |
| |
| llvm::APSInt Size, DstSize; |
| |
| // find out if both sizes are known at compile time |
| if (!SizeArg->EvaluateAsInt(Size, S.Context) || |
| !DstSizeArg->EvaluateAsInt(DstSize, S.Context)) |
| return; |
| |
| if (Size.ule(DstSize)) |
| return; |
| |
| // confirmed overflow so generate the diagnostic. |
| IdentifierInfo *FnName = FDecl->getIdentifier(); |
| SourceLocation SL = TheCall->getLocStart(); |
| SourceRange SR = TheCall->getSourceRange(); |
| |
| S.Diag(SL, diag::warn_memcpy_chk_overflow) << SR << FnName; |
| } |
| |
| static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { |
| if (checkArgCount(S, BuiltinCall, 2)) |
| return true; |
| |
| SourceLocation BuiltinLoc = BuiltinCall->getLocStart(); |
| Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); |
| Expr *Call = BuiltinCall->getArg(0); |
| Expr *Chain = BuiltinCall->getArg(1); |
| |
| if (Call->getStmtClass() != Stmt::CallExprClass) { |
| S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) |
| << Call->getSourceRange(); |
| return true; |
| } |
| |
| auto CE = cast<CallExpr>(Call); |
| if (CE->getCallee()->getType()->isBlockPointerType()) { |
| S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) |
| << Call->getSourceRange(); |
| return true; |
| } |
| |
| const Decl *TargetDecl = CE->getCalleeDecl(); |
| if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) |
| if (FD->getBuiltinID()) { |
| S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) |
| << Call->getSourceRange(); |
| return true; |
| } |
| |
| if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { |
| S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) |
| << Call->getSourceRange(); |
| return true; |
| } |
| |
| ExprResult ChainResult = S.UsualUnaryConversions(Chain); |
| if (ChainResult.isInvalid()) |
| return true; |
| if (!ChainResult.get()->getType()->isPointerType()) { |
| S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) |
| << Chain->getSourceRange(); |
| return true; |
| } |
| |
| QualType ReturnTy = CE->getCallReturnType(S.Context); |
| QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; |
| QualType BuiltinTy = S.Context.getFunctionType( |
| ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); |
| QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); |
| |
| Builtin = |
| S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); |
| |
| BuiltinCall->setType(CE->getType()); |
| BuiltinCall->setValueKind(CE->getValueKind()); |
| BuiltinCall->setObjectKind(CE->getObjectKind()); |
| BuiltinCall->setCallee(Builtin); |
| BuiltinCall->setArg(1, ChainResult.get()); |
| |
| return false; |
| } |
| |
| static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, |
| Scope::ScopeFlags NeededScopeFlags, |
| unsigned DiagID) { |
| // Scopes aren't available during instantiation. Fortunately, builtin |
| // functions cannot be template args so they cannot be formed through template |
| // instantiation. Therefore checking once during the parse is sufficient. |
| if (!SemaRef.ActiveTemplateInstantiations.empty()) |
| return false; |
| |
| Scope *S = SemaRef.getCurScope(); |
| while (S && !S->isSEHExceptScope()) |
| S = S->getParent(); |
| if (!S || !(S->getFlags() & NeededScopeFlags)) { |
| auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); |
| SemaRef.Diag(TheCall->getExprLoc(), DiagID) |
| << DRE->getDecl()->getIdentifier(); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| ExprResult |
| Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, |
| CallExpr *TheCall) { |
| ExprResult TheCallResult(TheCall); |
| |
| // Find out if any arguments are required to be integer constant expressions. |
| unsigned ICEArguments = 0; |
| ASTContext::GetBuiltinTypeError Error; |
| Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); |
| if (Error != ASTContext::GE_None) |
| ICEArguments = 0; // Don't diagnose previously diagnosed errors. |
| |
| // If any arguments are required to be ICE's, check and diagnose. |
| for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { |
| // Skip arguments not required to be ICE's. |
| if ((ICEArguments & (1 << ArgNo)) == 0) continue; |
| |
| llvm::APSInt Result; |
| if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) |
| return true; |
| ICEArguments &= ~(1 << ArgNo); |
| } |
| |
| switch (BuiltinID) { |
| case Builtin::BI__builtin___CFStringMakeConstantString: |
| assert(TheCall->getNumArgs() == 1 && |
| "Wrong # arguments to builtin CFStringMakeConstantString"); |
| if (CheckObjCString(TheCall->getArg(0))) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_stdarg_start: |
| case Builtin::BI__builtin_va_start: |
| if (SemaBuiltinVAStart(TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__va_start: { |
| switch (Context.getTargetInfo().getTriple().getArch()) { |
| case llvm::Triple::arm: |
| case llvm::Triple::thumb: |
| if (SemaBuiltinVAStartARM(TheCall)) |
| return ExprError(); |
| break; |
| default: |
| if (SemaBuiltinVAStart(TheCall)) |
| return ExprError(); |
| break; |
| } |
| break; |
| } |
| case Builtin::BI__builtin_isgreater: |
| case Builtin::BI__builtin_isgreaterequal: |
| case Builtin::BI__builtin_isless: |
| case Builtin::BI__builtin_islessequal: |
| case Builtin::BI__builtin_islessgreater: |
| case Builtin::BI__builtin_isunordered: |
| if (SemaBuiltinUnorderedCompare(TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_fpclassify: |
| if (SemaBuiltinFPClassification(TheCall, 6)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_isfinite: |
| case Builtin::BI__builtin_isinf: |
| case Builtin::BI__builtin_isinf_sign: |
| case Builtin::BI__builtin_isnan: |
| case Builtin::BI__builtin_isnormal: |
| if (SemaBuiltinFPClassification(TheCall, 1)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_shufflevector: |
| return SemaBuiltinShuffleVector(TheCall); |
| // TheCall will be freed by the smart pointer here, but that's fine, since |
| // SemaBuiltinShuffleVector guts it, but then doesn't release it. |
| case Builtin::BI__builtin_prefetch: |
| if (SemaBuiltinPrefetch(TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__assume: |
| case Builtin::BI__builtin_assume: |
| if (SemaBuiltinAssume(TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_assume_aligned: |
| if (SemaBuiltinAssumeAligned(TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_object_size: |
| if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_longjmp: |
| if (SemaBuiltinLongjmp(TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_setjmp: |
| if (SemaBuiltinSetjmp(TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI_setjmp: |
| case Builtin::BI_setjmpex: |
| if (checkArgCount(*this, TheCall, 1)) |
| return true; |
| break; |
| |
| case Builtin::BI__builtin_classify_type: |
| if (checkArgCount(*this, TheCall, 1)) return true; |
| TheCall->setType(Context.IntTy); |
| break; |
| case Builtin::BI__builtin_constant_p: |
| if (checkArgCount(*this, TheCall, 1)) return true; |
| TheCall->setType(Context.IntTy); |
| break; |
| case Builtin::BI__sync_fetch_and_add: |
| case Builtin::BI__sync_fetch_and_add_1: |
| case Builtin::BI__sync_fetch_and_add_2: |
| case Builtin::BI__sync_fetch_and_add_4: |
| case Builtin::BI__sync_fetch_and_add_8: |
| case Builtin::BI__sync_fetch_and_add_16: |
| case Builtin::BI__sync_fetch_and_sub: |
| case Builtin::BI__sync_fetch_and_sub_1: |
| case Builtin::BI__sync_fetch_and_sub_2: |
| case Builtin::BI__sync_fetch_and_sub_4: |
| case Builtin::BI__sync_fetch_and_sub_8: |
| case Builtin::BI__sync_fetch_and_sub_16: |
| case Builtin::BI__sync_fetch_and_or: |
| case Builtin::BI__sync_fetch_and_or_1: |
| case Builtin::BI__sync_fetch_and_or_2: |
| case Builtin::BI__sync_fetch_and_or_4: |
| case Builtin::BI__sync_fetch_and_or_8: |
| case Builtin::BI__sync_fetch_and_or_16: |
| case Builtin::BI__sync_fetch_and_and: |
| case Builtin::BI__sync_fetch_and_and_1: |
| case Builtin::BI__sync_fetch_and_and_2: |
| case Builtin::BI__sync_fetch_and_and_4: |
| case Builtin::BI__sync_fetch_and_and_8: |
| case Builtin::BI__sync_fetch_and_and_16: |
| case Builtin::BI__sync_fetch_and_xor: |
| case Builtin::BI__sync_fetch_and_xor_1: |
| case Builtin::BI__sync_fetch_and_xor_2: |
| case Builtin::BI__sync_fetch_and_xor_4: |
| case Builtin::BI__sync_fetch_and_xor_8: |
| case Builtin::BI__sync_fetch_and_xor_16: |
| case Builtin::BI__sync_fetch_and_nand: |
| case Builtin::BI__sync_fetch_and_nand_1: |
| case Builtin::BI__sync_fetch_and_nand_2: |
| case Builtin::BI__sync_fetch_and_nand_4: |
| case Builtin::BI__sync_fetch_and_nand_8: |
| case Builtin::BI__sync_fetch_and_nand_16: |
| case Builtin::BI__sync_add_and_fetch: |
| case Builtin::BI__sync_add_and_fetch_1: |
| case Builtin::BI__sync_add_and_fetch_2: |
| case Builtin::BI__sync_add_and_fetch_4: |
| case Builtin::BI__sync_add_and_fetch_8: |
| case Builtin::BI__sync_add_and_fetch_16: |
| case Builtin::BI__sync_sub_and_fetch: |
| case Builtin::BI__sync_sub_and_fetch_1: |
| case Builtin::BI__sync_sub_and_fetch_2: |
| case Builtin::BI__sync_sub_and_fetch_4: |
| case Builtin::BI__sync_sub_and_fetch_8: |
| case Builtin::BI__sync_sub_and_fetch_16: |
| case Builtin::BI__sync_and_and_fetch: |
| case Builtin::BI__sync_and_and_fetch_1: |
| case Builtin::BI__sync_and_and_fetch_2: |
| case Builtin::BI__sync_and_and_fetch_4: |
| case Builtin::BI__sync_and_and_fetch_8: |
| case Builtin::BI__sync_and_and_fetch_16: |
| case Builtin::BI__sync_or_and_fetch: |
| case Builtin::BI__sync_or_and_fetch_1: |
| case Builtin::BI__sync_or_and_fetch_2: |
| case Builtin::BI__sync_or_and_fetch_4: |
| case Builtin::BI__sync_or_and_fetch_8: |
| case Builtin::BI__sync_or_and_fetch_16: |
| case Builtin::BI__sync_xor_and_fetch: |
| case Builtin::BI__sync_xor_and_fetch_1: |
| case Builtin::BI__sync_xor_and_fetch_2: |
| case Builtin::BI__sync_xor_and_fetch_4: |
| case Builtin::BI__sync_xor_and_fetch_8: |
| case Builtin::BI__sync_xor_and_fetch_16: |
| case Builtin::BI__sync_nand_and_fetch: |
| case Builtin::BI__sync_nand_and_fetch_1: |
| case Builtin::BI__sync_nand_and_fetch_2: |
| case Builtin::BI__sync_nand_and_fetch_4: |
| case Builtin::BI__sync_nand_and_fetch_8: |
| case Builtin::BI__sync_nand_and_fetch_16: |
| case Builtin::BI__sync_val_compare_and_swap: |
| case Builtin::BI__sync_val_compare_and_swap_1: |
| case Builtin::BI__sync_val_compare_and_swap_2: |
| case Builtin::BI__sync_val_compare_and_swap_4: |
| case Builtin::BI__sync_val_compare_and_swap_8: |
| case Builtin::BI__sync_val_compare_and_swap_16: |
| case Builtin::BI__sync_bool_compare_and_swap: |
| case Builtin::BI__sync_bool_compare_and_swap_1: |
| case Builtin::BI__sync_bool_compare_and_swap_2: |
| case Builtin::BI__sync_bool_compare_and_swap_4: |
| case Builtin::BI__sync_bool_compare_and_swap_8: |
| case Builtin::BI__sync_bool_compare_and_swap_16: |
| case Builtin::BI__sync_lock_test_and_set: |
| case Builtin::BI__sync_lock_test_and_set_1: |
| case Builtin::BI__sync_lock_test_and_set_2: |
| case Builtin::BI__sync_lock_test_and_set_4: |
| case Builtin::BI__sync_lock_test_and_set_8: |
| case Builtin::BI__sync_lock_test_and_set_16: |
| case Builtin::BI__sync_lock_release: |
| case Builtin::BI__sync_lock_release_1: |
| case Builtin::BI__sync_lock_release_2: |
| case Builtin::BI__sync_lock_release_4: |
| case Builtin::BI__sync_lock_release_8: |
| case Builtin::BI__sync_lock_release_16: |
| case Builtin::BI__sync_swap: |
| case Builtin::BI__sync_swap_1: |
| case Builtin::BI__sync_swap_2: |
| case Builtin::BI__sync_swap_4: |
| case Builtin::BI__sync_swap_8: |
| case Builtin::BI__sync_swap_16: |
| return SemaBuiltinAtomicOverloaded(TheCallResult); |
| case Builtin::BI__builtin_nontemporal_load: |
| case Builtin::BI__builtin_nontemporal_store: |
| return SemaBuiltinNontemporalOverloaded(TheCallResult); |
| #define BUILTIN(ID, TYPE, ATTRS) |
| #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ |
| case Builtin::BI##ID: \ |
| return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); |
| #include "clang/Basic/Builtins.def" |
| case Builtin::BI__builtin_annotation: |
| if (SemaBuiltinAnnotation(*this, TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_addressof: |
| if (SemaBuiltinAddressof(*this, TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_add_overflow: |
| case Builtin::BI__builtin_sub_overflow: |
| case Builtin::BI__builtin_mul_overflow: |
| if (SemaBuiltinOverflow(*this, TheCall)) |
| return ExprError(); |
| break; |
| case Builtin::BI__builtin_operator_new: |
| case Builtin::BI__builtin_operator_delete: |
| if (!getLangOpts().CPlusPlus) { |
| Diag(TheCall->getExprLoc(), diag::err_builtin_requires_language) |
| << (BuiltinID == Builtin::BI__builtin_operator_new |
| ? "__builtin_operator_new" |
| : "__builtin_operator_delete") |
| << "C++"; |
| return ExprError(); |
| } |
| // CodeGen assumes it can find the global new and delete to call, |
| // so ensure that they are declared. |
| DeclareGlobalNewDelete(); |
| break; |
| |
| // check secure string manipulation functions where overflows |
| // are detectable at compile time |
| case Builtin::BI__builtin___memcpy_chk: |
| case Builtin::BI__builtin___memmove_chk: |
| case Builtin::BI__builtin___memset_chk: |
| case Builtin::BI__builtin___strlcat_chk: |
| case Builtin::BI__builtin___strlcpy_chk: |
| case Builtin::BI__builtin___strncat_chk: |
| case Builtin::BI__builtin___strncpy_chk: |
| case Builtin::BI__builtin___stpncpy_chk: |
| SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3); |
| break; |
| case Builtin::BI__builtin___memccpy_chk: |
| SemaBuiltinMemChkCall(*this, FDecl, TheCall, 3, 4); |
| break; |
| case Builtin::BI__builtin___snprintf_chk: |
| case Builtin::BI__builtin___vsnprintf_chk: |
| SemaBuiltinMemChkCall(*this, FDecl, TheCall, 1, 3); |
| break; |
| |
| case Builtin::BI__builtin_call_with_static_chain: |
| if (SemaBuiltinCallWithStaticChain(*this, TheCall)) |
| return ExprError(); |
| break; |
| |
| case Builtin::BI__exception_code: |
| case Builtin::BI_exception_code: { |
| if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, |
| diag::err_seh___except_block)) |
| return ExprError(); |
| break; |
| } |
| case Builtin::BI__exception_info: |
| case Builtin::BI_exception_info: { |
| if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, |
| diag::err_seh___except_filter)) |
| return ExprError(); |
| break; |
| } |
| |
| case Builtin::BI__GetExceptionInfo: |
| if (checkArgCount(*this, TheCall, 1)) |
| return ExprError(); |
| |
| if (CheckCXXThrowOperand( |
| TheCall->getLocStart(), |
| Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), |
| TheCall)) |
| return ExprError(); |
| |
| TheCall->setType(Context.VoidPtrTy); |
| break; |
| |
| } |
| |
| // Since the target specific builtins for each arch overlap, only check those |
| // of the arch we are compiling for. |
| if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { |
| switch (Context.getTargetInfo().getTriple().getArch()) { |
| case llvm::Triple::arm: |
| case llvm::Triple::armeb: |
| case llvm::Triple::thumb: |
| case llvm::Triple::thumbeb: |
| if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| case llvm::Triple::aarch64: |
| case llvm::Triple::aarch64_be: |
| if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| case llvm::Triple::mips: |
| case llvm::Triple::mipsel: |
| case llvm::Triple::mips64: |
| case llvm::Triple::mips64el: |
| if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| case llvm::Triple::systemz: |
| if (CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| case llvm::Triple::x86: |
| case llvm::Triple::x86_64: |
| if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| case llvm::Triple::ppc: |
| case llvm::Triple::ppc64: |
| case llvm::Triple::ppc64le: |
| if (CheckPPCBuiltinFunctionCall(BuiltinID, TheCall)) |
| return ExprError(); |
| break; |
| default: |
| break; |
| } |
| } |
| |
| return TheCallResult; |
| } |
| |
| // Get the valid immediate range for the specified NEON type code. |
| static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { |
| NeonTypeFlags Type(t); |
| int IsQuad = ForceQuad ? true : Type.isQuad(); |
| switch (Type.getEltType()) { |
| case NeonTypeFlags::Int8: |
| case NeonTypeFlags::Poly8: |
| return shift ? 7 : (8 << IsQuad) - 1; |
| case NeonTypeFlags::Int16: |
| case NeonTypeFlags::Poly16: |
| return shift ? 15 : (4 << IsQuad) - 1; |
| case NeonTypeFlags::Int32: |
| return shift ? 31 : (2 << IsQuad) - 1; |
| case NeonTypeFlags::Int64: |
| case NeonTypeFlags::Poly64: |
| return shift ? 63 : (1 << IsQuad) - 1; |
| case NeonTypeFlags::Poly128: |
| return shift ? 127 : (1 << IsQuad) - 1; |
| case NeonTypeFlags::Float16: |
| assert(!shift && "cannot shift float types!"); |
| return (4 << IsQuad) - 1; |
| case NeonTypeFlags::Float32: |
| assert(!shift && "cannot shift float types!"); |
| return (2 << IsQuad) - 1; |
| case NeonTypeFlags::Float64: |
| assert(!shift && "cannot shift float types!"); |
| return (1 << IsQuad) - 1; |
| } |
| llvm_unreachable("Invalid NeonTypeFlag!"); |
| } |
| |
| /// getNeonEltType - Return the QualType corresponding to the elements of |
| /// the vector type specified by the NeonTypeFlags. This is used to check |
| /// the pointer arguments for Neon load/store intrinsics. |
| static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, |
| bool IsPolyUnsigned, bool IsInt64Long) { |
| switch (Flags.getEltType()) { |
| case NeonTypeFlags::Int8: |
| return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; |
| case NeonTypeFlags::Int16: |
| return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; |
| case NeonTypeFlags::Int32: |
| return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; |
| case NeonTypeFlags::Int64: |
| if (IsInt64Long) |
| return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; |
| else |
| return Flags.isUnsigned() ? Context.UnsignedLongLongTy |
| : Context.LongLongTy; |
| case NeonTypeFlags::Poly8: |
| return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; |
| case NeonTypeFlags::Poly16: |
| return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; |
| case NeonTypeFlags::Poly64: |
| if (IsInt64Long) |
| return Context.UnsignedLongTy; |
| else |
| return Context.UnsignedLongLongTy; |
| case NeonTypeFlags::Poly128: |
| break; |
| case NeonTypeFlags::Float16: |
| return Context.HalfTy; |
| case NeonTypeFlags::Float32: |
| return Context.FloatTy; |
| case NeonTypeFlags::Float64: |
| return Context.DoubleTy; |
| } |
| llvm_unreachable("Invalid NeonTypeFlag!"); |
| } |
| |
| bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { |
| llvm::APSInt Result; |
| uint64_t mask = 0; |
| unsigned TV = 0; |
| int PtrArgNum = -1; |
| bool HasConstPtr = false; |
| switch (BuiltinID) { |
| #define GET_NEON_OVERLOAD_CHECK |
| #include "clang/Basic/arm_neon.inc" |
| #undef GET_NEON_OVERLOAD_CHECK |
| } |
| |
| // For NEON intrinsics which are overloaded on vector element type, validate |
| // the immediate which specifies which variant to emit. |
| unsigned ImmArg = TheCall->getNumArgs()-1; |
| if (mask) { |
| if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) |
| return true; |
| |
| TV = Result.getLimitedValue(64); |
| if ((TV > 63) || (mask & (1ULL << TV)) == 0) |
| return Diag(TheCall->getLocStart(), diag::err_invalid_neon_type_code) |
| << TheCall->getArg(ImmArg)->getSourceRange(); |
| } |
| |
| if (PtrArgNum >= 0) { |
| // Check that pointer arguments have the specified type. |
| Expr *Arg = TheCall->getArg(PtrArgNum); |
| if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) |
| Arg = ICE->getSubExpr(); |
| ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); |
| QualType RHSTy = RHS.get()->getType(); |
| |
| llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch(); |
| bool IsPolyUnsigned = Arch == llvm::Triple::aarch64; |
| bool IsInt64Long = |
| Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong; |
| QualType EltTy = |
| getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); |
| if (HasConstPtr) |
| EltTy = EltTy.withConst(); |
| QualType LHSTy = Context.getPointerType(EltTy); |
| AssignConvertType ConvTy; |
| ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); |
| if (RHS.isInvalid()) |
| return true; |
| if (DiagnoseAssignmentResult(ConvTy, Arg->getLocStart(), LHSTy, RHSTy, |
| RHS.get(), AA_Assigning)) |
| return true; |
| } |
| |
| // For NEON intrinsics which take an immediate value as part of the |
| // instruction, range check them here. |
| unsigned i = 0, l = 0, u = 0; |
| switch (BuiltinID) { |
| default: |
| return false; |
| #define GET_NEON_IMMEDIATE_CHECK |
| #include "clang/Basic/arm_neon.inc" |
| #undef GET_NEON_IMMEDIATE_CHECK |
| } |
| |
| return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); |
| } |
| |
| bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, |
| unsigned MaxWidth) { |
| assert((BuiltinID == ARM::BI__builtin_arm_ldrex || |
| BuiltinID == ARM::BI__builtin_arm_ldaex || |
| BuiltinID == ARM::BI__builtin_arm_strex || |
| BuiltinID == ARM::BI__builtin_arm_stlex || |
| BuiltinID == AArch64::BI__builtin_arm_ldrex || |
| BuiltinID == AArch64::BI__builtin_arm_ldaex || |
| BuiltinID == AArch64::BI__builtin_arm_strex || |
| BuiltinID == AArch64::BI__builtin_arm_stlex) && |
| "unexpected ARM builtin"); |
| bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || |
| BuiltinID == ARM::BI__builtin_arm_ldaex || |
| BuiltinID == AArch64::BI__builtin_arm_ldrex || |
| BuiltinID == AArch64::BI__builtin_arm_ldaex; |
| |
| DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); |
| |
| // Ensure that we have the proper number of arguments. |
| if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) |
| return true; |
| |
| // Inspect the pointer argument of the atomic builtin. This should always be |
| // a pointer type, whose element is an integral scalar or pointer type. |
| // Because it is a pointer type, we don't have to worry about any implicit |
| // casts here. |
| Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); |
| ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); |
| if (PointerArgRes.isInvalid()) |
| return true; |
| PointerArg = PointerArgRes.get(); |
| |
| const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); |
| if (!pointerType) { |
| Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer) |
| << PointerArg->getType() << PointerArg->getSourceRange(); |
| return true; |
| } |
| |
| // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next |
| // task is to insert the appropriate casts into the AST. First work out just |
| // what the appropriate type is. |
| QualType ValType = pointerType->getPointeeType(); |
| QualType AddrType = ValType.getUnqualifiedType().withVolatile(); |
| if (IsLdrex) |
| AddrType.addConst(); |
| |
| // Issue a warning if the cast is dodgy. |
| CastKind CastNeeded = CK_NoOp; |
| if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { |
| CastNeeded = CK_BitCast; |
| Diag(DRE->getLocStart(), diag::ext_typecheck_convert_discards_qualifiers) |
| << PointerArg->getType() |
| << Context.getPointerType(AddrType) |
| << AA_Passing << PointerArg->getSourceRange(); |
| } |
| |
| // Finally, do the cast and replace the argument with the corrected version. |
| AddrType = Context.getPointerType(AddrType); |
| PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); |
| if (PointerArgRes.isInvalid()) |
| return true; |
| PointerArg = PointerArgRes.get(); |
| |
| TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); |
| |
| // In general, we allow ints, floats and pointers to be loaded and stored. |
| if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && |
| !ValType->isBlockPointerType() && !ValType->isFloatingType()) { |
| Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intfltptr) |
| << PointerArg->getType() << PointerArg->getSourceRange(); |
| return true; |
| } |
| |
| // But ARM doesn't have instructions to deal with 128-bit versions. |
| if (Context.getTypeSize(ValType) > MaxWidth) { |
| assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); |
| Diag(DRE->getLocStart(), diag::err_atomic_exclusive_builtin_pointer_size) |
| << PointerArg->getType() << PointerArg->getSourceRange(); |
| return true; |
| } |
| |
| switch (ValType.getObjCLifetime()) { |
| case Qualifiers::OCL_None: |
| case Qualifiers::OCL_ExplicitNone: |
| // okay |
| break; |
| |
| case Qualifiers::OCL_Weak: |
| case Qualifiers::OCL_Strong: |
| case Qualifiers::OCL_Autoreleasing: |
| Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership) |
| << ValType << PointerArg->getSourceRange(); |
| return true; |
| } |
| |
| |
| if (IsLdrex) { |
| TheCall->setType(ValType); |
| return false; |
| } |
| |
| // Initialize the argument to be stored. |
| ExprResult ValArg = TheCall->getArg(0); |
| InitializedEntity Entity = InitializedEntity::InitializeParameter( |
| Context, ValType, /*consume*/ false); |
| ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); |
| if (ValArg.isInvalid()) |
| return true; |
| TheCall->setArg(0, ValArg.get()); |
| |
| // __builtin_arm_strex always returns an int. It's marked as such in the .def, |
| // but the custom checker bypasses all default analysis. |
| TheCall->setType(Context.IntTy); |
| return false; |
| } |
| |
| bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { |
| llvm::APSInt Result; |
| |
| if (BuiltinID == ARM::BI__builtin_arm_ldrex || |
| BuiltinID == ARM::BI__builtin_arm_ldaex || |
| BuiltinID == ARM::BI__builtin_arm_strex || |
| BuiltinID == ARM::BI__builtin_arm_stlex) { |
| return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); |
| } |
| |
| if (BuiltinID == ARM::BI__builtin_arm_prefetch) { |
| return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || |
| SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); |
| } |
| |
| if (BuiltinID == ARM::BI__builtin_arm_rsr64 || |
| BuiltinID == ARM::BI__builtin_arm_wsr64) |
| return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); |
| |
| if (BuiltinID == ARM::BI__builtin_arm_rsr || |
| BuiltinID == ARM::BI__builtin_arm_rsrp || |
| BuiltinID == ARM::BI__builtin_arm_wsr || |
| BuiltinID == ARM::BI__builtin_arm_wsrp) |
| return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); |
| |
| if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) |
| return true; |
| |
| // For intrinsics which take an immediate value as part of the instruction, |
| // range check them here. |
| unsigned i = 0, l = 0, u = 0; |
| switch (BuiltinID) { |
| default: return false; |
| case ARM::BI__builtin_arm_ssat: i = 1; l = 1; u = 31; break; |
| case ARM::BI__builtin_arm_usat: i = 1; u = 31; break; |
| case ARM::BI__builtin_arm_vcvtr_f: |
| case ARM::BI__builtin_arm_vcvtr_d: i = 1; u = 1; break; |
| case ARM::BI__builtin_arm_dmb: |
| case ARM::BI__builtin_arm_dsb: |
| case ARM::BI__builtin_arm_isb: |
| case ARM::BI__builtin_arm_dbg: l = 0; u = 15; break; |
| } |
| |
| // FIXME: VFP Intrinsics should error if VFP not present. |
| return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); |
| } |
| |
| bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, |
| CallExpr *TheCall) { |
| llvm::APSInt Result; |
| |
| if (BuiltinID == AArch64::BI__builtin_arm_ldrex || |
| BuiltinID == AArch64::BI__builtin_arm_ldaex || |
| BuiltinID == AArch64::BI__builtin_arm_strex || |
| BuiltinID == AArch64::BI__builtin_arm_stlex) { |
| return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); |
| } |
| |
| if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { |
| return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || |
| SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || |
| SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || |
| SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); |
| } |
| |
| if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || |
| BuiltinID == AArch64::BI__builtin_arm_wsr64) |
| return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, false); |
| |
| if (BuiltinID == AArch64::BI__builtin_arm_rsr || |
| BuiltinID == AArch64::BI__builtin_arm_rsrp || |
| BuiltinID == AArch64::BI__builtin_arm_wsr || |
| BuiltinID == AArch64::BI__builtin_arm_wsrp) |
| return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); |
| |
| if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) |
| return true; |
| |
| // For intrinsics which take an immediate value as part of the instruction, |
| // range check them here. |
| unsigned i = 0, l = 0, u = 0; |
| switch (BuiltinID) { |
| default: return false; |
| case AArch64::BI__builtin_arm_dmb: |
| case AArch64::BI__builtin_arm_dsb: |
| case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; |
| } |
| |
| return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); |
| } |
| |
| bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { |
| unsigned i = 0, l = 0, u = 0; |
| switch (BuiltinID) { |
| default: return false; |
| case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; |
| case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; |
| case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; |
| case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; |
| case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; |
| case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; |
| case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; |
| } |
| |
| return SemaBuiltinConstantArgRange(TheCall, i, l, u); |
| } |
| |
| bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { |
| unsigned i = 0, l = 0, u = 0; |
| bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde || |
| BuiltinID == PPC::BI__builtin_divdeu || |
| BuiltinID == PPC::BI__builtin_bpermd; |
| bool IsTarget64Bit = Context.getTargetInfo() |
| .getTypeWidth(Context |
| .getTargetInfo() |
| .getIntPtrType()) == 64; |
| bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe || |
| BuiltinID == PPC::BI__builtin_divweu || |
| BuiltinID == PPC::BI__builtin_divde || |
| BuiltinID == PPC::BI__builtin_divdeu; |
| |
| if (Is64BitBltin && !IsTarget64Bit) |
| return Diag(TheCall->getLocStart(), diag::err_64_bit_builtin_32_bit_tgt) |
| << TheCall->getSourceRange(); |
| |
| if ((IsBltinExtDiv && !Context.getTargetInfo().hasFeature("extdiv")) || |
| (BuiltinID == PPC::BI__builtin_bpermd && |
| !Context.getTargetInfo().hasFeature("bpermd"))) |
| return Diag(TheCall->getLocStart(), diag::err_ppc_builtin_only_on_pwr7) |
| << TheCall->getSourceRange(); |
| |
| switch (BuiltinID) { |
| default: return false; |
| case PPC::BI__builtin_altivec_crypto_vshasigmaw: |
| case PPC::BI__builtin_altivec_crypto_vshasigmad: |
| return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || |
| SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); |
| case PPC::BI__builtin_tbegin: |
| case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; |
| case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; |
| case PPC::BI__builtin_tabortwc: |
| case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; |
| case PPC::BI__builtin_tabortwci: |
| case PPC::BI__builtin_tabortdci: |
| return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || |
| SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); |
| } |
| return SemaBuiltinConstantArgRange(TheCall, i, l, u); |
| } |
| |
| bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, |
| CallExpr *TheCall) { |
| if (BuiltinID == SystemZ::BI__builtin_tabort) { |
| Expr *Arg = TheCall->getArg(0); |
| llvm::APSInt AbortCode(32); |
| if (Arg->isIntegerConstantExpr(AbortCode, Context) && |
| AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256) |
| return Diag(Arg->getLocStart(), diag::err_systemz_invalid_tabort_code) |
| << Arg->getSourceRange(); |
| } |
| |
| // For intrinsics which take an immediate value as part of the instruction, |
| // range check them here. |
| unsigned i = 0, l = 0, u = 0; |
| switch (BuiltinID) { |
| default: return false; |
| case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; |
| case SystemZ::BI__builtin_s390_verimb: |
| case SystemZ::BI__builtin_s390_verimh: |
| case SystemZ::BI__builtin_s390_verimf: |
| case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; |
| case SystemZ::BI__builtin_s390_vfaeb: |
| case SystemZ::BI__builtin_s390_vfaeh: |
| case SystemZ::BI__builtin_s390_vfaef: |
| case SystemZ::BI__builtin_s390_vfaebs: |
| case SystemZ::BI__builtin_s390_vfaehs: |
| case SystemZ::BI__builtin_s390_vfaefs: |
| case SystemZ::BI__builtin_s390_vfaezb: |
| case SystemZ::BI__builtin_s390_vfaezh: |
| case SystemZ::BI__builtin_s390_vfaezf: |
| case SystemZ::BI__builtin_s390_vfaezbs: |
| case SystemZ::BI__builtin_s390_vfaezhs: |
| case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; |
| case SystemZ::BI__builtin_s390_vfidb: |
| return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || |
| SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); |
| case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; |
| case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; |
| case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; |
| case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; |
| case SystemZ::BI__builtin_s390_vstrcb: |
| case SystemZ::BI__builtin_s390_vstrch: |
| case SystemZ::BI__builtin_s390_vstrcf: |
| case SystemZ::BI__builtin_s390_vstrczb: |
| case SystemZ::BI__builtin_s390_vstrczh: |
| case SystemZ::BI__builtin_s390_vstrczf: |
| case SystemZ::BI__builtin_s390_vstrcbs: |
| case SystemZ::BI__builtin_s390_vstrchs: |
| case SystemZ::BI__builtin_s390_vstrcfs: |
| case SystemZ::BI__builtin_s390_vstrczbs: |
| case SystemZ::BI__builtin_s390_vstrczhs: |
| case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; |
| } |
| return SemaBuiltinConstantArgRange(TheCall, i, l, u); |
| } |
| |
| /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). |
| /// This checks that the target supports __builtin_cpu_supports and |
| /// that the string argument is constant and valid. |
| static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) { |
| Expr *Arg = TheCall->getArg(0); |
| |
| // Check if the argument is a string literal. |
| if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) |
| return S.Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal) |
| << Arg->getSourceRange(); |
| |
| // Check the contents of the string. |
| StringRef Feature = |
| cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); |
| if (!S.Context.getTargetInfo().validateCpuSupports(Feature)) |
| return S.Diag(TheCall->getLocStart(), diag::err_invalid_cpu_supports) |
| << Arg->getSourceRange(); |
| return false; |
| } |
| |
| bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { |
| unsigned i = 0, l = 0, u = 0; |
| switch (BuiltinID) { |
| default: return false; |
| case X86::BI__builtin_cpu_supports: |
| return SemaBuiltinCpuSupports(*this, TheCall); |
| case X86::BI__builtin_ms_va_start: |
| return SemaBuiltinMSVAStart(TheCall); |
| case X86::BI_mm_prefetch: i = 1; l = 0; u = 3; break; |
| case X86::BI__builtin_ia32_sha1rnds4: i = 2, l = 0; u = 3; break; |
| case X86::BI__builtin_ia32_vpermil2pd: |
| case X86::BI__builtin_ia32_vpermil2pd256: |
| case X86::BI__builtin_ia32_vpermil2ps: |
| case X86::BI__builtin_ia32_vpermil2ps256: i = 3, l = 0; u = 3; break; |
| case X86::BI__builtin_ia32_cmpb128_mask: |
| case X86::BI__builtin_ia32_cmpw128_mask: |
| case X86::BI__builtin_ia32_cmpd128_mask: |
| case X86::BI__builtin_ia32_cmpq128_mask: |
| case X86::BI__builtin_ia32_cmpb256_mask: |
| case X86::BI__builtin_ia32_cmpw256_mask: |
| case X86::BI__builtin_ia32_cmpd256_mask: |
| case X86::BI__builtin_ia32_cmpq256_mask: |
| case X86::BI__builtin_ia32_cmpb512_mask: |
| case X86::BI__builtin_ia32_cmpw512_mask: |
| case X86::BI__builtin_ia32_cmpd512_mask: |
| case X86::BI__builtin_ia32_cmpq512_mask: |
| case X86::BI__builtin_ia32_ucmpb128_mask: |
| case X86::BI__builtin_ia32_ucmpw128_mask: |
| case X86::BI__builtin_ia32_ucmpd128_mask: |
| case X86::BI__builtin_ia32_ucmpq128_mask: |
| case X86::BI__builtin_ia32_ucmpb256_mask: |
| case X86::BI__builtin_ia32_ucmpw256_mask: |
| case X86::BI__builtin_ia32_ucmpd256_mask: |
| case X86::BI__builtin_ia32_ucmpq256_mask: |
| case X86::BI__builtin_ia32_ucmpb512_mask: |
| case X86::BI__builtin_ia32_ucmpw512_mask: |
| case X86::BI__builtin_ia32_ucmpd512_mask: |
| case X86::BI__builtin_ia32_ucmpq512_mask: i = 2; l = 0; u = 7; break; |
| case X86::BI__builtin_ia32_roundps: |
| case X86::BI__builtin_ia32_roundpd: |
| case X86::BI__builtin_ia32_roundps256: |
| case X86::BI__builtin_ia32_roundpd256: i = 1, l = 0; u = 15; break; |
| case X86::BI__builtin_ia32_roundss: |
| case X86::BI__builtin_ia32_roundsd: i = 2, l = 0; u = 15; break; |
| case X86::BI__builtin_ia32_cmpps: |
| case X86::BI__builtin_ia32_cmpss: |
| case X86::BI__builtin_ia32_cmppd: |
| case X86::BI__builtin_ia32_cmpsd: |
| case X86::BI__builtin_ia32_cmpps256: |
| case X86::BI__builtin_ia32_cmppd256: |
| case X86::BI__builtin_ia32_cmpps512_mask: |
| case X86::BI__builtin_ia32_cmppd512_mask: i = 2; l = 0; u = 31; break; |
| case X86::BI__builtin_ia32_vpcomub: |
| case X86::BI__builtin_ia32_vpcomuw: |
| case X86::BI__builtin_ia32_vpcomud: |
| case X86::BI__builtin_ia32_vpcomuq: |
| case X86::BI__builtin_ia32_vpcomb: |
| case X86::BI__builtin_ia32_vpcomw: |
| case X86::BI__builtin_ia32_vpcomd: |
| case X86::BI__builtin_ia32_vpcomq: i = 2; l = 0; u = 7; break; |
| } |
| return SemaBuiltinConstantArgRange(TheCall, i, l, u); |
| } |
| |
| /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo |
| /// parameter with the FormatAttr's correct format_idx and firstDataArg. |
| /// Returns true when the format fits the function and the FormatStringInfo has |
| /// been populated. |
| bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, |
| FormatStringInfo *FSI) { |
| FSI->HasVAListArg = Format->getFirstArg() == 0; |
| FSI->FormatIdx = Format->getFormatIdx() - 1; |
| FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; |
| |
| // The way the format attribute works in GCC, the implicit this argument |
| // of member functions is counted. However, it doesn't appear in our own |
| // lists, so decrement format_idx in that case. |
| if (IsCXXMember) { |
| if(FSI->FormatIdx == 0) |
| return false; |
| --FSI->FormatIdx; |
| if (FSI->FirstDataArg != 0) |
| --FSI->FirstDataArg; |
| } |
| return true; |
| } |
| |
| /// Checks if a the given expression evaluates to null. |
| /// |
| /// \brief Returns true if the value evaluates to null. |
| static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { |
| // If the expression has non-null type, it doesn't evaluate to null. |
| if (auto nullability |
| = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { |
| if (*nullability == NullabilityKind::NonNull) |
| return false; |
| } |
| |
| // As a special case, transparent unions initialized with zero are |
| // considered null for the purposes of the nonnull attribute. |
| if (const RecordType *UT = Expr->getType()->getAsUnionType()) { |
| if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) |
| if (const CompoundLiteralExpr *CLE = |
| dyn_cast<CompoundLiteralExpr>(Expr)) |
| if (const InitListExpr *ILE = |
| dyn_cast<InitListExpr>(CLE->getInitializer())) |
| Expr = ILE->getInit(0); |
| } |
| |
| bool Result; |
| return (!Expr->isValueDependent() && |
| Expr->EvaluateAsBooleanCondition(Result, S.Context) && |
| !Result); |
| } |
| |
| static void CheckNonNullArgument(Sema &S, |
| const Expr *ArgExpr, |
| SourceLocation CallSiteLoc) { |
| if (CheckNonNullExpr(S, ArgExpr)) |
| S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, |
| S.PDiag(diag::warn_null_arg) << ArgExpr->getSourceRange()); |
| } |
| |
| bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { |
| FormatStringInfo FSI; |
| if ((GetFormatStringType(Format) == FST_NSString) && |
| getFormatStringInfo(Format, false, &FSI)) { |
| Idx = FSI.FormatIdx; |
| return true; |
| } |
| return false; |
| } |
| /// \brief Diagnose use of %s directive in an NSString which is being passed |
| /// as formatting string to formatting method. |
| static void |
| DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, |
| const NamedDecl *FDecl, |
| Expr **Args, |
| unsigned NumArgs) { |
| unsigned Idx = 0; |
| bool Format = false; |
| ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); |
| if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { |
| Idx = 2; |
| Format = true; |
| } |
| else |
| for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { |
| if (S.GetFormatNSStringIdx(I, Idx)) { |
| Format = true; |
| break; |
| } |
| } |
| if (!Format || NumArgs <= Idx) |
| return; |
| const Expr *FormatExpr = Args[Idx]; |
| if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) |
| FormatExpr = CSCE->getSubExpr(); |
| const StringLiteral *FormatString; |
| if (const ObjCStringLiteral *OSL = |
| dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) |
| FormatString = OSL->getString(); |
| else |
| FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); |
| if (!FormatString) |
| return; |
| if (S.FormatStringHasSArg(FormatString)) { |
| S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) |
| << "%s" << 1 << 1; |
| S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) |
| << FDecl->getDeclName(); |
| } |
| } |
| |
| /// Determine whether the given type has a non-null nullability annotation. |
| static bool isNonNullType(ASTContext &ctx, QualType type) { |
| if (auto nullability = type->getNullability(ctx)) |
| return *nullability == NullabilityKind::NonNull; |
| |
| return false; |
| } |
| |
| static void CheckNonNullArguments(Sema &S, |
| const NamedDecl *FDecl, |
| const FunctionProtoType *Proto, |
| ArrayRef<const Expr *> Args, |
| SourceLocation CallSiteLoc) { |
| assert((FDecl || Proto) && "Need a function declaration or prototype"); |
| |
| // Check the attributes attached to the method/function itself. |
| llvm::SmallBitVector NonNullArgs; |
| if (FDecl) { |
| // Handle the nonnull attribute on the function/method declaration itself. |
| for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { |
| if (!NonNull->args_size()) { |
| // Easy case: all pointer arguments are nonnull. |
| for (const auto *Arg : Args) |
| if (S.isValidPointerAttrType(Arg->getType())) |
| CheckNonNullArgument(S, Arg, CallSiteLoc); |
| return; |
| } |
| |
| for (unsigned Val : NonNull->args()) { |
| if (Val >= Args.size()) |
| continue; |
| if (NonNullArgs.empty()) |
| NonNullArgs.resize(Args.size()); |
| NonNullArgs.set(Val); |
| } |
| } |
| } |
| |
| if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { |
| // Handle the nonnull attribute on the parameters of the |
| // function/method. |
| ArrayRef<ParmVarDecl*> parms; |
| if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) |
| parms = FD->parameters(); |
| else |
| parms = cast<ObjCMethodDecl>(FDecl)->parameters(); |
| |
| unsigned ParamIndex = 0; |
| for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); |
| I != E; ++I, ++ParamIndex) { |
| const ParmVarDecl *PVD = *I; |
| if (PVD->hasAttr<NonNullAttr>() || |
| isNonNullType(S.Context, PVD->getType())) { |
| if (NonNullArgs.empty()) |
| NonNullArgs.resize(Args.size()); |
| |
| NonNullArgs.set(ParamIndex); |
| } |
| } |
| } else { |
| // If we have a non-function, non-method declaration but no |
| // function prototype, try to dig out the function prototype. |
| if (!Proto) { |
| if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { |
| QualType type = VD->getType().getNonReferenceType(); |
| if (auto pointerType = type->getAs<PointerType>()) |
| type = pointerType->getPointeeType(); |
| else if (auto blockType = type->getAs<BlockPointerType>()) |
| type = blockType->getPointeeType(); |
| // FIXME: data member pointers? |
| |
| // Dig out the function prototype, if there is one. |
| Proto = type->getAs<FunctionProtoType>(); |
| } |
| } |
| |
| // Fill in non-null argument information from the nullability |
| // information on the parameter types (if we have them). |
| if (Proto) { |
| unsigned Index = 0; |
| for (auto paramType : Proto->getParamTypes()) { |
| if (isNonNullType(S.Context, paramType)) { |
| if (NonNullArgs.empty()) |
| NonNullArgs.resize(Args.size()); |
| |
| NonNullArgs.set(Index); |
| } |
| |
| ++Index; |
| } |
| } |
| } |
| |
| // Check for non-null arguments. |
| for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); |
| ArgIndex != ArgIndexEnd; ++ArgIndex) { |
| if (NonNullArgs[ArgIndex]) |
| CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); |
| } |
| } |
| |
| /// Handles the checks for format strings, non-POD arguments to vararg |
| /// functions, and NULL arguments passed to non-NULL parameters. |
| void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, |
| ArrayRef<const Expr *> Args, bool IsMemberFunction, |
| SourceLocation Loc, SourceRange Range, |
| VariadicCallType CallType) { |
| // FIXME: We should check as much as we can in the template definition. |
| if (CurContext->isDependentContext()) |
| return; |
| |
| // Printf and scanf checking. |
| llvm::SmallBitVector CheckedVarArgs; |
| if (FDecl) { |
| for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { |
| // Only create vector if there are format attributes. |
| CheckedVarArgs.resize(Args.size()); |
| |
| CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, |
| CheckedVarArgs); |
| } |
| } |
| |
| // Refuse POD arguments that weren't caught by the format string |
| // checks above. |
| if (CallType != VariadicDoesNotApply) { |
| unsigned NumParams = Proto ? Proto->getNumParams() |
| : FDecl && isa<FunctionDecl>(FDecl) |
| ? cast<FunctionDecl>(FDecl)->getNumParams() |
| : FDecl && isa<ObjCMethodDecl>(FDecl) |
| ? cast<ObjCMethodDecl>(FDecl)->param_size() |
| : 0; |
| |
| for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { |
| // Args[ArgIdx] can be null in malformed code. |
| if (const Expr *Arg = Args[ArgIdx]) { |
| if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) |
| checkVariadicArgument(Arg, CallType); |
| } |
| } |
| } |
| |
| if (FDecl || Proto) { |
| CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); |
| |
| // Type safety checking. |
| if (FDecl) { |
| for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) |
| CheckArgumentWithTypeTag(I, Args.data()); |
| } |
| } |
| } |
| |
| /// CheckConstructorCall - Check a constructor call for correctness and safety |
| /// properties not enforced by the C type system. |
| void Sema::CheckConstructorCall(FunctionDecl *FDecl, |
| ArrayRef<const Expr *> Args, |
| const FunctionProtoType *Proto, |
| SourceLocation Loc) { |
| VariadicCallType CallType = |
| Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; |
| checkCall(FDecl, Proto, Args, /*IsMemberFunction=*/true, Loc, SourceRange(), |
| CallType); |
| } |
| |
| /// CheckFunctionCall - Check a direct function call for various correctness |
| /// and safety properties not strictly enforced by the C type system. |
| bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, |
| const FunctionProtoType *Proto) { |
| bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && |
| isa<CXXMethodDecl>(FDecl); |
| bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || |
| IsMemberOperatorCall; |
| VariadicCallType CallType = getVariadicCallType(FDecl, Proto, |
| TheCall->getCallee()); |
| Expr** Args = TheCall->getArgs(); |
| unsigned NumArgs = TheCall->getNumArgs(); |
| if (IsMemberOperatorCall) { |
| // If this is a call to a member operator, hide the first argument |
| // from checkCall. |
| // FIXME: Our choice of AST representation here is less than ideal. |
| ++Args; |
| --NumArgs; |
| } |
| checkCall(FDecl, Proto, llvm::makeArrayRef(Args, NumArgs), |
| IsMemberFunction, TheCall->getRParenLoc(), |
| TheCall->getCallee()->getSourceRange(), CallType); |
| |
| IdentifierInfo *FnInfo = FDecl->getIdentifier(); |
| // None of the checks below are needed for functions that don't have |
| // simple names (e.g., C++ conversion functions). |
| if (!FnInfo) |
| return false; |
| |
| CheckAbsoluteValueFunction(TheCall, FDecl, FnInfo); |
| if (getLangOpts().ObjC1) |
| DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); |
| |
| unsigned CMId = FDecl->getMemoryFunctionKind(); |
| if (CMId == 0) |
| return false; |
| |
| // Handle memory setting and copying functions. |
| if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat) |
| CheckStrlcpycatArguments(TheCall, FnInfo); |
| else if (CMId == Builtin::BIstrncat) |
| CheckStrncatArguments(TheCall, FnInfo); |
| else |
| CheckMemaccessArguments(TheCall, CMId, FnInfo); |
| |
| return false; |
| } |
| |
| bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, |
| ArrayRef<const Expr *> Args) { |
| VariadicCallType CallType = |
| Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; |
| |
| checkCall(Method, nullptr, Args, |
| /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), |
| CallType); |
| |
| return false; |
| } |
| |
| bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, |
| const FunctionProtoType *Proto) { |
| QualType Ty; |
| if (const auto *V = dyn_cast<VarDecl>(NDecl)) |
| Ty = V->getType().getNonReferenceType(); |
| else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) |
| Ty = F->getType().getNonReferenceType(); |
| else |
| return false; |
| |
| if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && |
| !Ty->isFunctionProtoType()) |
| return false; |
| |
| VariadicCallType CallType; |
| if (!Proto || !Proto->isVariadic()) { |
| CallType = VariadicDoesNotApply; |
| } else if (Ty->isBlockPointerType()) { |
| CallType = VariadicBlock; |
| } else { // Ty->isFunctionPointerType() |
| CallType = VariadicFunction; |
| } |
| |
| checkCall(NDecl, Proto, |
| llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), |
| /*IsMemberFunction=*/false, TheCall->getRParenLoc(), |
| TheCall->getCallee()->getSourceRange(), CallType); |
| |
| return false; |
| } |
| |
| /// Checks function calls when a FunctionDecl or a NamedDecl is not available, |
| /// such as function pointers returned from functions. |
| bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { |
| VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, |
| TheCall->getCallee()); |
| checkCall(/*FDecl=*/nullptr, Proto, |
| llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), |
| /*IsMemberFunction=*/false, TheCall->getRParenLoc(), |
| TheCall->getCallee()->getSourceRange(), CallType); |
| |
| return false; |
| } |
| |
| static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { |
| if (Ordering < AtomicExpr::AO_ABI_memory_order_relaxed || |
| Ordering > AtomicExpr::AO_ABI_memory_order_seq_cst) |
| return false; |
| |
| switch (Op) { |
| case AtomicExpr::AO__c11_atomic_init: |
| llvm_unreachable("There is no ordering argument for an init"); |
| |
| case AtomicExpr::AO__c11_atomic_load: |
| case AtomicExpr::AO__atomic_load_n: |
| case AtomicExpr::AO__atomic_load: |
| return Ordering != AtomicExpr::AO_ABI_memory_order_release && |
| Ordering != AtomicExpr::AO_ABI_memory_order_acq_rel; |
| |
| case AtomicExpr::AO__c11_atomic_store: |
| case AtomicExpr::AO__atomic_store: |
| case AtomicExpr::AO__atomic_store_n: |
| return Ordering != AtomicExpr::AO_ABI_memory_order_consume && |
| Ordering != AtomicExpr::AO_ABI_memory_order_acquire && |
| Ordering != AtomicExpr::AO_ABI_memory_order_acq_rel; |
| |
| default: |
| return true; |
| } |
| } |
| |
| ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, |
| AtomicExpr::AtomicOp Op) { |
| CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); |
| DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); |
| |
| // All these operations take one of the following forms: |
| enum { |
| // C __c11_atomic_init(A *, C) |
| Init, |
| // C __c11_atomic_load(A *, int) |
| Load, |
| // void __atomic_load(A *, CP, int) |
| Copy, |
| // C __c11_atomic_add(A *, M, int) |
| Arithmetic, |
| // C __atomic_exchange_n(A *, CP, int) |
| Xchg, |
| // void __atomic_exchange(A *, C *, CP, int) |
| GNUXchg, |
| // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) |
| C11CmpXchg, |
| // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) |
| GNUCmpXchg |
| } Form = Init; |
| const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 4, 5, 6 }; |
| const unsigned NumVals[] = { 1, 0, 1, 1, 1, 2, 2, 3 }; |
| // where: |
| // C is an appropriate type, |
| // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, |
| // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, |
| // M is C if C is an integer, and ptrdiff_t if C is a pointer, and |
| // the int parameters are for orderings. |
| |
| static_assert(AtomicExpr::AO__c11_atomic_init == 0 && |
| AtomicExpr::AO__c11_atomic_fetch_xor + 1 == |
| AtomicExpr::AO__atomic_load, |
| "need to update code for modified C11 atomics"); |
| bool IsC11 = Op >= AtomicExpr::AO__c11_atomic_init && |
| Op <= AtomicExpr::AO__c11_atomic_fetch_xor; |
| bool IsN = Op == AtomicExpr::AO__atomic_load_n || |
| Op == AtomicExpr::AO__atomic_store_n || |
| Op == AtomicExpr::AO__atomic_exchange_n || |
| Op == AtomicExpr::AO__atomic_compare_exchange_n; |
| bool IsAddSub = false; |
| |
| switch (Op) { |
| case AtomicExpr::AO__c11_atomic_init: |
| Form = Init; |
| break; |
| |
| case AtomicExpr::AO__c11_atomic_load: |
| case AtomicExpr::AO__atomic_load_n: |
| Form = Load; |
| break; |
| |
| case AtomicExpr::AO__c11_atomic_store: |
| case AtomicExpr::AO__atomic_load: |
| case AtomicExpr::AO__atomic_store: |
| case AtomicExpr::AO__atomic_store_n: |
| Form = Copy; |
| break; |
| |
| case AtomicExpr::AO__c11_atomic_fetch_add: |
| case AtomicExpr::AO__c11_atomic_fetch_sub: |
| case AtomicExpr::AO__atomic_fetch_add: |
| case AtomicExpr::AO__atomic_fetch_sub: |
| case AtomicExpr::AO__atomic_add_fetch: |
| case AtomicExpr::AO__atomic_sub_fetch: |
| IsAddSub = true; |
| // Fall through. |
| case AtomicExpr::AO__c11_atomic_fetch_and: |
| case AtomicExpr::AO__c11_atomic_fetch_or: |
| case AtomicExpr::AO__c11_atomic_fetch_xor: |
| case AtomicExpr::AO__atomic_fetch_and: |
| case AtomicExpr::AO__atomic_fetch_or: |
| case AtomicExpr::AO__atomic_fetch_xor: |
| case AtomicExpr::AO__atomic_fetch_nand: |
| case AtomicExpr::AO__atomic_and_fetch: |
| case AtomicExpr::AO__atomic_or_fetch: |
| case AtomicExpr::AO__atomic_xor_fetch: |
| case AtomicExpr::AO__atomic_nand_fetch: |
| Form = Arithmetic; |
| break; |
| |
| case AtomicExpr::AO__c11_atomic_exchange: |
| case AtomicExpr::AO__atomic_exchange_n: |
| Form = Xchg; |
| break; |
| |
| case AtomicExpr::AO__atomic_exchange: |
| Form = GNUXchg; |
| break; |
| |
| case AtomicExpr::AO__c11_atomic_compare_exchange_strong: |
| case AtomicExpr::AO__c11_atomic_compare_exchange_weak: |
| Form = C11CmpXchg; |
| break; |
| |
| case AtomicExpr::AO__atomic_compare_exchange: |
| case AtomicExpr::AO__atomic_compare_exchange_n: |
| Form = GNUCmpXchg; |
| break; |
| } |
| |
| // Check we have the right number of arguments. |
| if (TheCall->getNumArgs() < NumArgs[Form]) { |
| Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args) |
| << 0 << NumArgs[Form] << TheCall->getNumArgs() |
| << TheCall->getCallee()->getSourceRange(); |
| return ExprError(); |
| } else if (TheCall->getNumArgs() > NumArgs[Form]) { |
| Diag(TheCall->getArg(NumArgs[Form])->getLocStart(), |
| diag::err_typecheck_call_too_many_args) |
| << 0 << NumArgs[Form] << TheCall->getNumArgs() |
| << TheCall->getCallee()->getSourceRange(); |
| return ExprError(); |
| } |
| |
| // Inspect the first argument of the atomic operation. |
| Expr *Ptr = TheCall->getArg(0); |
| Ptr = DefaultFunctionArrayLvalueConversion(Ptr).get(); |
| const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); |
| if (!pointerType) { |
| Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer) |
| << Ptr->getType() << Ptr->getSourceRange(); |
| return ExprError(); |
| } |
| |
| // For a __c11 builtin, this should be a pointer to an _Atomic type. |
| QualType AtomTy = pointerType->getPointeeType(); // 'A' |
| QualType ValType = AtomTy; // 'C' |
| if (IsC11) { |
| if (!AtomTy->isAtomicType()) { |
| Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic) |
| << Ptr->getType() << Ptr->getSourceRange(); |
| return ExprError(); |
| } |
| if (AtomTy.isConstQualified()) { |
| Diag(DRE->getLocStart(), diag::err_atomic_op_needs_non_const_atomic) |
| << Ptr->getType() << Ptr->getSourceRange(); |
| return ExprError(); |
| } |
| ValType = AtomTy->getAs<AtomicType>()->getValueType(); |
| } else if (Form != Load && Op != AtomicExpr::AO__atomic_load) { |
| if (ValType.isConstQualified()) { |
| Diag(DRE->getLocStart(), diag::err_atomic_op_needs_non_const_pointer) |
| << Ptr->getType() << Ptr->getSourceRange(); |
| return ExprError(); |
| } |
| } |
| |
| // For an arithmetic operation, the implied arithmetic must be well-formed. |
| if (Form == Arithmetic) { |
| // gcc does not enforce these rules for GNU atomics, but we do so for sanity. |
| if (IsAddSub && !ValType->isIntegerType() && !ValType->isPointerType()) { |
| Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr) |
| << IsC11 << Ptr->getType() << Ptr->getSourceRange(); |
| return ExprError(); |
| } |
| if (!IsAddSub && !ValType->isIntegerType()) { |
| Diag(DRE->getLocStart(), diag::err_atomic_op_bitwise_needs_atomic_int) |
| << IsC11 << Ptr->getType() << Ptr->getSourceRange(); |
| return ExprError(); |
| } |
| if (IsC11 && ValType->isPointerType() && |
| RequireCompleteType(Ptr->getLocStart(), ValType->getPointeeType(), |
| diag::err_incomplete_type)) { |
| return ExprError(); |
| } |
| } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { |
| // For __atomic_*_n operations, the value type must be a scalar integral or |
| // pointer type which is 1, 2, 4, 8 or 16 bytes in length. |
| Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr) |
| << IsC11 << Ptr->getType() << Ptr->getSourceRange(); |
| return ExprError(); |
| } |
| |
| if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && |
| !AtomTy->isScalarType()) { |
| // For GNU atomics, require a trivially-copyable type. This is not part of |
| // the GNU atomics specification, but we enforce it for sanity. |
| Diag(DRE->getLocStart(), diag::err_atomic_op_needs_trivial_copy) |
| << Ptr->getType() << Ptr->getSourceRange(); |
| return ExprError(); |
| } |
| |
| switch (ValType.getObjCLifetime()) { |
| case Qualifiers::OCL_None: |
| case Qualifiers::OCL_ExplicitNone: |
| // okay |
| break; |
| |
| case Qualifiers::OCL_Weak: |
| case Qualifiers::OCL_Strong: |
| case Qualifiers::OCL_Autoreleasing: |
| // FIXME: Can this happen? By this point, ValType should be known |
| // to be trivially copyable. |
| Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership) |
| << ValType << Ptr->getSourceRange(); |
| return ExprError(); |
| } |
| |
| // atomic_fetch_or takes a pointer to a volatile 'A'. We shouldn't let the |
| // volatile-ness of the pointee-type inject itself into the result or the |
| // other operands. |
| ValType.removeLocalVolatile(); |
| QualType ResultType = ValType; |
| if (Form == Copy || Form == GNUXchg || Form == Init) |
| ResultType = Context.VoidTy; |
| else if (Form == C11CmpXchg || Form == GNUCmpXchg) |
| ResultType = Context.BoolTy; |
| |
| // The type of a parameter passed 'by value'. In the GNU atomics, such |
| // arguments are actually passed as pointers. |
| QualType ByValType = ValType; // 'CP' |
| if (!IsC11 && !IsN) |
| ByValType = Ptr->getType(); |
| |
| // FIXME: __atomic_load allows the first argument to be a a pointer to const |
| // but not the second argument. We need to manually remove possible const |
| // qualifiers. |
| |
| // The first argument --- the pointer --- has a fixed type; we |
| // deduce the types of the rest of the arguments accordingly. Walk |
| // the remaining arguments, converting them to the deduced value type. |
| for (unsigned i = 1; i != NumArgs[Form]; ++i) { |
| QualType Ty; |
| if (i < NumVals[Form] + 1) { |
| switch (i) { |
| case 1: |
| // The second argument is the non-atomic operand. For arithmetic, this |
| // is always passed by value, and for a compare_exchange it is always |
| // passed by address. For the rest, GNU uses by-address and C11 uses |
| // by-value. |
| assert(Form != Load); |
| if (Form == Init || (Form == Arithmetic && ValType->isIntegerType())) |
| Ty = ValType; |
| else if (Form == Copy || Form == Xchg) |
| Ty = ByValType; |
| else if (Form == Arithmetic) |
| Ty = Context.getPointerDiffType(); |
| else { |
| Expr *ValArg = TheCall->getArg(i); |
| unsigned AS = 0; |
| // Keep address space of non-atomic pointer type. |
| if (const PointerType *PtrTy = |
| ValArg->getType()->getAs<PointerType>()) { |
| AS = PtrTy->getPointeeType().getAddressSpace(); |
| } |
| Ty = Context.getPointerType( |
| Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); |
| } |
| break; |
| case 2: |
| // The third argument to compare_exchange / GNU exchange is a |
| // (pointer to a) desired value. |
| Ty = ByValType; |
| break; |
| case 3: |
| // The fourth argument to GNU compare_exchange is a 'weak' flag. |
| Ty = Context.BoolTy; |
| break; |
| } |
| } else { |
| // The order(s) are always converted to int. |
| Ty = Context.IntTy; |
| } |
| |
| InitializedEntity Entity = |
| InitializedEntity::InitializeParameter(Context, Ty, false); |
| ExprResult Arg = TheCall->getArg(i); |
| Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); |
| if (Arg.isInvalid()) |
| return true; |
| TheCall->setArg(i, Arg.get()); |
| } |
| |
| // Permute the arguments into a 'consistent' order. |
| SmallVector<Expr*, 5> SubExprs; |
| SubExprs.push_back(Ptr); |
| switch (Form) { |
| case Init: |
| // Note, AtomicExpr::getVal1() has a special case for this atomic. |
| SubExprs.push_back(TheCall->getArg(1)); // Val1 |
| break; |
| case Load: |
| SubExprs.push_back(TheCall->getArg(1)); // Order |
| break; |
| case Copy: |
| case Arithmetic: |
| case Xchg: |
| SubExprs.push_back(TheCall->getArg(2)); // Order |
| SubExprs.push_back(TheCall->getArg(1)); // Val1 |
| break; |
| case GNUXchg: |
| // Note, AtomicExpr::getVal2() has a special case for this atomic. |
| SubExprs.push_back(TheCall->getArg(3)); // Order |
| SubExprs.push_back(TheCall->getArg(1)); // Val1 |
| SubExprs.push_back(TheCall->getArg(2)); // Val2 |
| break; |
| case C11CmpXchg: |
| SubExprs.push_back(TheCall->getArg(3)); // Order |
| SubExprs.push_back(TheCall->getArg(1)); // Val1 |
| SubExprs.push_back(TheCall->getArg(4)); // OrderFail |
| SubExprs.push_back(TheCall->getArg(2)); // Val2 |
| break; |
| case GNUCmpXchg: |
| SubExprs.push_back(TheCall->getArg(4)); // Order |
| SubExprs.push_back(TheCall->getArg(1)); // Val1 |
| SubExprs.push_back(TheCall->getArg(5)); // OrderFail |
| SubExprs.push_back(TheCall->getArg(2)); // Val2 |
| SubExprs.push_back(TheCall->getArg(3)); // Weak |
| break; |
| } |
| |
| if (SubExprs.size() >= 2 && Form != Init) { |
| llvm::APSInt Result(32); |
| if (SubExprs[1]->isIntegerConstantExpr(Result, Context) && |
| !isValidOrderingForOp(Result.getSExtValue(), Op)) |
| Diag(SubExprs[1]->getLocStart(), |
| diag::warn_atomic_op_has_invalid_memory_order) |
| << SubExprs[1]->getSourceRange(); |
| } |
| |
| AtomicExpr *AE = new (Context) AtomicExpr(TheCall->getCallee()->getLocStart(), |
| SubExprs, ResultType, Op, |
| TheCall->getRParenLoc()); |
| |
| if ((Op == AtomicExpr::AO__c11_atomic_load || |
| (Op == AtomicExpr::AO__c11_atomic_store)) && |
| Context.AtomicUsesUnsupportedLibcall(AE)) |
| Diag(AE->getLocStart(), diag::err_atomic_load_store_uses_lib) << |
| ((Op == AtomicExpr::AO__c11_atomic_load) ? 0 : 1); |
| |
| return AE; |
| } |
| |
| |
| /// checkBuiltinArgument - Given a call to a builtin function, perform |
| /// normal type-checking on the given argument, updating the call in |
| /// place. This is useful when a builtin function requires custom |
| /// type-checking for some of its arguments but not necessarily all of |
| /// them. |
| /// |
| /// Returns true on error. |
| static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { |
| FunctionDecl *Fn = E->getDirectCallee(); |
| assert(Fn && "builtin call without direct callee!"); |
| |
| ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); |
| InitializedEntity Entity = |
| InitializedEntity::InitializeParameter(S.Context, Param); |
| |
| ExprResult Arg = E->getArg(0); |
| Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); |
| if (Arg.isInvalid()) |
| return true; |
| |
| E->setArg(ArgIndex, Arg.get()); |
| return false; |
| } |
| |
| /// SemaBuiltinAtomicOverloaded - We have a call to a function like |
| /// __sync_fetch_and_add, which is an overloaded function based on the pointer |
| /// type of its first argument. The main ActOnCallExpr routines have already |
| /// promoted the types of arguments because all of these calls are prototyped as |
| /// void(...). |
| /// |
| /// This function goes through and does final semantic checking for these |
| /// builtins, |
| ExprResult |
| Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { |
| CallExpr *TheCall = (CallExpr *)TheCallResult.get(); |
| DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); |
| FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); |
| |
| // Ensure that we have at least one argument to do type inference from. |
| if (TheCall->getNumArgs() < 1) { |
| Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least) |
| << 0 << 1 << TheCall->getNumArgs() |
| << TheCall->getCallee()->getSourceRange(); |
| return ExprError(); |
| } |
| |
| // Inspect the first argument of the atomic builtin. This should always be |
| // a pointer type, whose element is an integral scalar or pointer type. |
| // Because it is a pointer type, we don't have to worry about any implicit |
| // casts here. |
| // FIXME: We don't allow floating point scalars as input. |
| Expr *FirstArg = TheCall->getArg(0); |
| ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); |
| if (FirstArgResult.isInvalid()) |
| return ExprError(); |
| FirstArg = FirstArgResult.get(); |
| TheCall->setArg(0, FirstArg); |
| |
| const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); |
| if (!pointerType) { |
| Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer) |
| << FirstArg->getType() << FirstArg->getSourceRange(); |
| return ExprError(); |
| } |
| |
| QualType ValType = pointerType->getPointeeType(); |
| if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && |
| !ValType->isBlockPointerType()) { |
| Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intptr) |
| << FirstArg->getType() << FirstArg->getSourceRange(); |
| return ExprError(); |
| } |
| |
| switch (ValType.getObjCLifetime()) { |
| case Qualifiers::OCL_None: |
| case Qualifiers::OCL_ExplicitNone: |
| // okay |
| break; |
| |
| case Qualifiers::OCL_Weak: |
| case Qualifiers::OCL_Strong: |
| case Qualifiers::OCL_Autoreleasing: |
| Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership) |
| << ValType << FirstArg->getSourceRange(); |
| return ExprError(); |
| } |
| |
| // Strip any qualifiers off ValType. |
| ValType = ValType.getUnqualifiedType(); |
| |
| // The majority of builtins return a value, but a few have special return |
| // types, so allow them to override appropriately below. |
| QualType ResultType = ValType; |
| |
| // We need to figure out which concrete builtin this maps onto. For example, |
| // __sync_fetch_and_add with a 2 byte object turns into |
| // __sync_fetch_and_add_2. |
| #define BUILTIN_ROW(x) \ |
| { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ |
| Builtin::BI##x##_8, Builtin::BI##x##_16 } |
| |
| static const unsigned BuiltinIndices[][5] = { |
| BUILTIN_ROW(__sync_fetch_and_add), |
| BUILTIN_ROW(__sync_fetch_and_sub), |
| BUILTIN_ROW(__sync_fetch_and_or), |
| BUILTIN_ROW(__sync_fetch_and_and), |
| BUILTIN_ROW(__sync_fetch_and_xor), |
| BUILTIN_ROW(__sync_fetch_and_nand), |
| |
| BUILTIN_ROW(__sync_add_and_fetch), |
| BUILTIN_ROW(__sync_sub_and_fetch), |
| BUILTIN_ROW(__sync_and_and_fetch), |
| BUILTIN_ROW(__sync_or_and_fetch), |
| BUILTIN_ROW(__sync_xor_and_fetch), |
| BUILTIN_ROW(__sync_nand_and_fetch), |
| |
| BUILTIN_ROW(__sync_val_compare_and_swap), |
| BUILTIN_ROW(__sync_bool_compare_and_swap), |
| BUILTIN_ROW(__sync_lock_test_and_set), |
| BUILTIN_ROW(__sync_lock_release), |
| BUILTIN_ROW(__sync_swap) |
| }; |
| #undef BUILTIN_ROW |
| |
| // Determine the index of the size. |
| unsigned SizeIndex; |
| switch (Context.getTypeSizeInChars(ValType).getQuantity()) { |
| case 1: SizeIndex = 0; break; |
| case 2: SizeIndex = 1; break; |
| case 4: SizeIndex = 2; break; |
| case 8: SizeIndex = 3; break; |
| case 16: SizeIndex = 4; break; |
| default: |
| Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size) |
| << FirstArg->getType() << FirstArg->getSourceRange(); |
| return ExprError(); |
| } |
| |
| // Each of these builtins has one pointer argument, followed by some number of |
| // values (0, 1 or 2) followed by a potentially empty varags list of stuff |
| // that we ignore. Find out which row of BuiltinIndices to read from as well |
| // as the number of fixed args. |
| unsigned BuiltinID = FDecl->getBuiltinID(); |
| unsigned BuiltinIndex, NumFixed = 1; |
| bool WarnAboutSemanticsChange = false; |
| switch (BuiltinID) { |
| default: llvm_unreachable("Unknown overloaded atomic builtin!"); |
| case Builtin::BI__sync_fetch_and_add: |
| case Builtin::BI__sync_fetch_and_add_1: |
| case Builtin::BI__sync_fetch_and_add_2: |
| case Builtin::BI__sync_fetch_and_add_4: |
| case Builtin::BI__sync_fetch_and_add_8: |
| case Builtin::BI__sync_fetch_and_add_16: |
| BuiltinIndex = 0; |
| break; |
| |
| case Builtin::BI__sync_fetch_and_sub: |
| case Builtin::BI__sync_fetch_and_sub_1: |
| case Builtin::BI__sync_fetch_and_sub_2: |
| case Builtin::BI__sync_fetch_and_sub_4: |
| case Builtin::BI__sync_fetch_and_sub_8: |
| case Builtin::BI__sync_fetch_and_sub_16: |
| BuiltinIndex = 1; |
| break; |
| |
| case Builtin::BI__sync_fetch_and_or: |
| case Builtin::BI__sync_fetch_and_or_1: |
| case Builtin::BI__sync_fetch_and_or_2: |
| case Builtin::BI__sync_fetch_and_or_4: |
| case Builtin::BI__sync_fetch_and_or_8: |
| case Builtin::BI__sync_fetch_and_or_16: |
| BuiltinIndex = 2; |
| break; |
| |
| case Builtin::BI__sync_fetch_and_and: |
| case Builtin::BI__sync_fetch_and_and_1: |
| case Builtin::BI__sync_fetch_and_and_2: |
| case Builtin::BI__sync_fetch_and_and_4: |
| case Builtin::BI__sync_fetch_and_and_8: |
| case Builtin::BI__sync_fetch_and_and_16: |
| BuiltinIndex = 3; |
| break; |
| |
| case Builtin::BI__sync_fetch_and_xor: |
| case Builtin::BI__sync_fetch_and_xor_1: |
| case Builtin::BI__sync_fetch_and_xor_2: |
| case Builtin::BI__sync_fetch_and_xor_4: |
| case Builtin::BI__sync_fetch_and_xor_8: |
| case Builtin::BI__sync_fetch_and_xor_16: |
| BuiltinIndex = 4; |
| break; |
| |
| case Builtin::BI__sync_fetch_and_nand: |
| case Builtin::BI__sync_fetch_and_nand_1: |
| case Builtin::BI__sync_fetch_and_nand_2: |
| case Builtin::BI__sync_fetch_and_nand_4: |
| case Builtin::BI__sync_fetch_and_nand_8: |
| case Builtin::BI__sync_fetch_and_nand_16: |
| BuiltinIndex = 5; |
| WarnAboutSemanticsChange = true; |
| break; |
| |
| case Builtin::BI__sync_add_and_fetch: |
| case Builtin::BI__sync_add_and_fetch_1: |
| case Builtin::BI__sync_add_and_fetch_2: |
| case Builtin::BI__sync_add_and_fetch_4: |
| case Builtin::BI__sync_add_and_fetch_8: |
| case Builtin::BI__sync_add_and_fetch_16: |
| BuiltinIndex = 6; |
| break; |
| |
| case Builtin::BI__sync_sub_and_fetch: |
| case Builtin::BI__sync_sub_and_fetch_1: |
| case Builtin::BI__sync_sub_and_fetch_2: |
| case Builtin::BI__sync_sub_and_fetch_4: |
| case Builtin::BI__sync_sub_and_fetch_8: |
| case Builtin::BI__sync_sub_and_fetch_16: |
| BuiltinIndex = 7; |
| break; |
| |
| case Builtin::BI__sync_and_and_fetch: |
| case Builtin::BI__sync_and_and_fetch_1: |
| case Builtin::BI__sync_and_and_fetch_2: |
| case Builtin::BI__sync_and_and_fetch_4: |
| case Builtin::BI__sync_and_and_fetch_8: |
| case Builtin::BI__sync_and_and_fetch_16: |
| BuiltinIndex = 8; |
| break; |
| |
| case Builtin::BI__sync_or_and_fetch: |
| case Builtin::BI__sync_or_and_fetch_1: |
| case Builtin::BI__sync_or_and_fetch_2: |
| case Builtin::BI__sync_or_and_fetch_4: |
| case Builtin::BI__sync_or_and_fetch_8: |
| case Builtin::BI__sync_or_and_fetch_16: |
| BuiltinIndex = 9; |
| break; |
| |
| case Builtin::BI__sync_xor_and_fetch: |
| case Builtin::BI__sync_xor_and_fetch_1: |
| case Builtin::BI__sync_xor_and_fetch_2: |
| case Builtin::BI__sync_xor_and_fetch_4: |
| case Builtin::BI__sync_xor_and_fetch_8: |
| case Builtin::BI__sync_xor_and_fetch_16: |
| BuiltinIndex = 10; |
| break; |
| |
| case Builtin::BI__sync_nand_and_fetch: |
| case Builtin::BI__sync_nand_and_fetch_1: |
| case Builtin::BI__sync_nand_and_fetch_2: |
| case Builtin::BI__sync_nand_and_fetch_4: |
| case Builtin::BI__sync_nand_and_fetch_8: |
| case Builtin::BI__sync_nand_and_fetch_16: |
| BuiltinIndex = 11; |
| WarnAboutSemanticsChange = true; |
| break; |
| |
| case Builtin::BI__sync_val_compare_and_swap: |
| case Builtin::BI__sync_val_compare_and_swap_1: |
| case Builtin::BI__sync_val_compare_and_swap_2: |
| case Builtin::BI__sync_val_compare_and_swap_4: |
| case Builtin::BI__sync_val_compare_and_swap_8: |
| case Builtin::BI__sync_val_compare_and_swap_16: |
| BuiltinIndex = 12; |
| NumFixed = 2; |
| break; |
| |
| case Builtin::BI__sync_bool_compare_and_swap: |
| case Builtin::BI__sync_bool_compare_and_swap_1: |
| case Builtin::BI__sync_bool_compare_and_swap_2: |
| case Builtin::BI__sync_bool_compare_and_swap_4: |
| case Builtin::BI__sync_bool_compare_and_swap_8: |
| case Builtin::BI__sync_bool_compare_and_swap_16: |
| BuiltinIndex = 13; |
| NumFixed = 2; |
| ResultType = Context.BoolTy; |
| break; |
| |
| case Builtin::BI__sync_lock_test_and_set: |
| case Builtin::BI__sync_lock_test_and_set_1: |
| case Builtin::BI__sync_lock_test_and_set_2: |
| case Builtin::BI__sync_lock_test_and_set_4: |
| case Builtin::BI__sync_lock_test_and_set_8: |
| case Builtin::BI__sync_lock_test_and_set_16: |
| BuiltinIndex = 14; |
| break; |
| |
| case Builtin::BI__sync_lock_release: |
| case Builtin::BI__sync_lock_release_1: |
| case Builtin::BI__sync_lock_release_2: |
| case Builtin::BI__sync_lock_release_4: |
| case Builtin::BI__sync_lock_release_8: |
| case Builtin::BI__sync_lock_release_16: |
| BuiltinIndex = 15; |
| NumFixed = 0; |
| ResultType = Context.VoidTy; |
| break; |
| |
| case Builtin::BI__sync_swap: |
| case Builtin::BI__sync_swap_1: |
| case Builtin::BI__sync_swap_2: |
| case Builtin::BI__sync_swap_4: |
| case Builtin::BI__sync_swap_8: |
| case Builtin::BI__sync_swap_16: |
| BuiltinIndex = 16; |
| break; |
| } |
| |
| // Now that we know how many fixed arguments we expect, first check that we |
| // have at least that many. |
| if (TheCall->getNumArgs() < 1+NumFixed) { |
| Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least) |
| << 0 << 1+NumFixed << TheCall->getNumArgs() |
| << TheCall->getCallee()->getSourceRange(); |
| return ExprError(); |
| } |
| |
| if (WarnAboutSemanticsChange) { |
| Diag(TheCall->getLocEnd(), diag::warn_sync_fetch_and_nand_semantics_change) |
| << TheCall->getCallee()->getSourceRange(); |
| } |
| |
| // Get the decl for the concrete builtin from this, we can tell what the |
| // concrete integer type we should convert to is. |
| unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; |
| const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); |
| FunctionDecl *NewBuiltinDecl; |
| if (NewBuiltinID == BuiltinID) |
| NewBuiltinDecl = FDecl; |
| else { |
| // Perform builtin lookup to avoid redeclaring it. |
| DeclarationName DN(&Context.Idents.get(NewBuiltinName)); |
| LookupResult Res(*this, DN, DRE->getLocStart(), LookupOrdinaryName); |
| LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); |
| assert(Res.getFoundDecl()); |
| NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); |
| if (!NewBuiltinDecl) |
| return ExprError(); |
| } |
| |
| // The first argument --- the pointer --- has a fixed type; we |
| // deduce the types of the rest of the arguments accordingly. Walk |
| // the remaining arguments, converting them to the deduced value type. |
| for (unsigned i = 0; i != NumFixed; ++i) { |
| ExprResult Arg = TheCall->getArg(i+1); |
| |
| // GCC does an implicit conversion to the pointer or integer ValType. This |
| // can fail in some cases (1i -> int**), check for this error case now. |
| // Initialize the argument. |
| InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, |
| ValType, /*consume*/ false); |
| Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); |
| if (Arg.isInvalid()) |
| return ExprError(); |
| |
| // Okay, we have something that *can* be converted to the right type. Check |
| // to see if there is a potentially weird extension going on here. This can |
| // happen when you do an atomic operation on something like an char* and |
| // pass in 42. The 42 gets converted to char. This is even more strange |
| // for things like 45.123 -> char, etc. |
| // FIXME: Do this check. |
| TheCall->setArg(i+1, Arg.get()); |
| } |
| |
| ASTContext& Context = this->getASTContext(); |
| |
| // Create a new DeclRefExpr to refer to the new decl. |
| DeclRefExpr* NewDRE = DeclRefExpr::Create( |
| Context, |
| DRE->getQualifierLoc(), |
| SourceLocation(), |
| NewBuiltinDecl, |
| /*enclosing*/ false, |
| DRE->getLocation(), |
| Context.BuiltinFnTy, |
| DRE->getValueKind()); |
| |
| // Set the callee in the CallExpr. |
| // FIXME: This loses syntactic information. |
| QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); |
| ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, |
| CK_BuiltinFnToFnPtr); |
| TheCall->setCallee(PromotedCall.get()); |
| |
| // Change the result type of the call to match the original value type. This |
| // is arbitrary, but the codegen for these builtins ins design to handle it |
| // gracefully. |
| TheCall->setType(ResultType); |
| |
| return TheCallResult; |
| } |
| |
| /// SemaBuiltinNontemporalOverloaded - We have a call to |
| /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an |
| /// overloaded function based on the pointer type of its last argument. |
| /// |
| /// This function goes through and does final semantic checking for these |
| /// builtins. |
| ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { |
| CallExpr *TheCall = (CallExpr *)TheCallResult.get(); |
| DeclRefExpr *DRE = |
| cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); |
| FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); |
| unsigned BuiltinID = FDecl->getBuiltinID(); |
| assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || |
| BuiltinID == Builtin::BI__builtin_nontemporal_load) && |
| "Unexpected nontemporal load/store builtin!"); |
| bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; |
| unsigned numArgs = isStore ? 2 : 1; |
| |
| // Ensure that we have the proper number of arguments. |
| if (checkArgCount(*this, TheCall, numArgs)) |
| return ExprError(); |
| |
| // Inspect the last argument of the nontemporal builtin. This should always |
| // be a pointer type, from which we imply the type of the memory access. |
| // Because it is a pointer type, we don't have to worry about any implicit |
| // casts here. |
| Expr *PointerArg = TheCall->getArg(numArgs - 1); |
| ExprResult PointerArgResult = |
| DefaultFunctionArrayLvalueConversion(PointerArg); |
| |
| if (PointerArgResult.isInvalid()) |
| return ExprError(); |
| PointerArg = PointerArgResult.get(); |
| TheCall->setArg(numArgs - 1, PointerArg); |
| |
| const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); |
| if (!pointerType) { |
| Diag(DRE->getLocStart(), diag::err_nontemporal_builtin_must_be_pointer) |
| << PointerArg->getType() << PointerArg->getSourceRange(); |
| return ExprError(); |
| } |
| |
| QualType ValType = pointerType->getPointeeType(); |
| |
| // Strip any qualifiers off ValType. |
| ValType = ValType.getUnqualifiedType(); |
| if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && |
| !ValType->isBlockPointerType() && !ValType->isFloatingType() && |
| !ValType->isVectorType()) { |
| Diag(DRE->getLocStart(), |
| diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) |
| << PointerArg->getType() << PointerArg->getSourceRange(); |
| return ExprError(); |
| } |
| |
| if (!isStore) { |
| TheCall->setType(ValType); |
| return TheCallResult; |
| } |
| |
| ExprResult ValArg = TheCall->getArg(0); |
| InitializedEntity Entity = InitializedEntity::InitializeParameter( |
| Context, ValType, /*consume*/ false); |
| ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); |
| if (ValArg.isInvalid()) |
| return ExprError(); |
| |
| TheCall->setArg(0, ValArg.get()); |
| TheCall->setType(Context.VoidTy); |
| return TheCallResult; |
| } |
| |
| /// CheckObjCString - Checks that the argument to the builtin |
| /// CFString constructor is correct |
| /// Note: It might also make sense to do the UTF-16 conversion here (would |
| /// simplify the backend). |
| bool Sema::CheckObjCString(Expr *Arg) { |
| Arg = Arg->IgnoreParenCasts(); |
| StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); |
| |
| if (!Literal || !Literal->isAscii()) { |
| Diag(Arg->getLocStart(), diag::err_cfstring_literal_not_string_constant) |
| << Arg->getSourceRange(); |
| return true; |
| } |
| |
| if (Literal->containsNonAsciiOrNull()) { |
| StringRef String = Literal->getString(); |
| |