blob: f2aa931eeddfaac608a1fd67c707913a1ebe473d [file] [log] [blame]
//===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements extra semantic analysis beyond what is enforced
// by the C type system.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/AttrIterator.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/FormatString.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/NonTrivialTypeVisitor.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/UnresolvedSet.h"
#include "clang/Basic/AddressSpaces.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/SyncScope.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetCXXABI.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/Locale.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <limits>
#include <string>
#include <tuple>
#include <utility>
using namespace clang;
using namespace sema;
SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const {
return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts,
Context.getTargetInfo());
}
/// Checks that a call expression's argument count is the desired number.
/// This is useful when doing custom type-checking. Returns true on error.
static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) {
unsigned argCount = call->getNumArgs();
if (argCount == desiredArgCount) return false;
if (argCount < desiredArgCount)
return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args)
<< 0 /*function call*/ << desiredArgCount << argCount
<< call->getSourceRange();
// Highlight all the excess arguments.
SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(),
call->getArg(argCount - 1)->getEndLoc());
return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args)
<< 0 /*function call*/ << desiredArgCount << argCount
<< call->getArg(1)->getSourceRange();
}
/// Check that the first argument to __builtin_annotation is an integer
/// and the second argument is a non-wide string literal.
static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
if (checkArgCount(S, TheCall, 2))
return true;
// First argument should be an integer.
Expr *ValArg = TheCall->getArg(0);
QualType Ty = ValArg->getType();
if (!Ty->isIntegerType()) {
S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg)
<< ValArg->getSourceRange();
return true;
}
// Second argument should be a constant string.
Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
if (!Literal || !Literal->isAscii()) {
S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg)
<< StrArg->getSourceRange();
return true;
}
TheCall->setType(Ty);
return false;
}
static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
// We need at least one argument.
if (TheCall->getNumArgs() < 1) {
S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
<< 0 << 1 << TheCall->getNumArgs()
<< TheCall->getCallee()->getSourceRange();
return true;
}
// All arguments should be wide string literals.
for (Expr *Arg : TheCall->arguments()) {
auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
if (!Literal || !Literal->isWide()) {
S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str)
<< Arg->getSourceRange();
return true;
}
}
return false;
}
/// Check that the argument to __builtin_addressof is a glvalue, and set the
/// result type to the corresponding pointer type.
static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
if (checkArgCount(S, TheCall, 1))
return true;
ExprResult Arg(TheCall->getArg(0));
QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc());
if (ResultType.isNull())
return true;
TheCall->setArg(0, Arg.get());
TheCall->setType(ResultType);
return false;
}
static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
if (checkArgCount(S, TheCall, 3))
return true;
// First two arguments should be integers.
for (unsigned I = 0; I < 2; ++I) {
ExprResult Arg = TheCall->getArg(I);
QualType Ty = Arg.get()->getType();
if (!Ty->isIntegerType()) {
S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
<< Ty << Arg.get()->getSourceRange();
return true;
}
InitializedEntity Entity = InitializedEntity::InitializeParameter(
S.getASTContext(), Ty, /*consume*/ false);
Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
if (Arg.isInvalid())
return true;
TheCall->setArg(I, Arg.get());
}
// Third argument should be a pointer to a non-const integer.
// IRGen correctly handles volatile, restrict, and address spaces, and
// the other qualifiers aren't possible.
{
ExprResult Arg = TheCall->getArg(2);
QualType Ty = Arg.get()->getType();
const auto *PtrTy = Ty->getAs<PointerType>();
if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() &&
!PtrTy->getPointeeType().isConstQualified())) {
S.Diag(Arg.get()->getBeginLoc(),
diag::err_overflow_builtin_must_be_ptr_int)
<< Ty << Arg.get()->getSourceRange();
return true;
}
InitializedEntity Entity = InitializedEntity::InitializeParameter(
S.getASTContext(), Ty, /*consume*/ false);
Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
if (Arg.isInvalid())
return true;
TheCall->setArg(2, Arg.get());
}
return false;
}
static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
if (checkArgCount(S, BuiltinCall, 2))
return true;
SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
Expr *Call = BuiltinCall->getArg(0);
Expr *Chain = BuiltinCall->getArg(1);
if (Call->getStmtClass() != Stmt::CallExprClass) {
S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
<< Call->getSourceRange();
return true;
}
auto CE = cast<CallExpr>(Call);
if (CE->getCallee()->getType()->isBlockPointerType()) {
S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
<< Call->getSourceRange();
return true;
}
const Decl *TargetDecl = CE->getCalleeDecl();
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
if (FD->getBuiltinID()) {
S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
<< Call->getSourceRange();
return true;
}
if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) {
S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
<< Call->getSourceRange();
return true;
}
ExprResult ChainResult = S.UsualUnaryConversions(Chain);
if (ChainResult.isInvalid())
return true;
if (!ChainResult.get()->getType()->isPointerType()) {
S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
<< Chain->getSourceRange();
return true;
}
QualType ReturnTy = CE->getCallReturnType(S.Context);
QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
QualType BuiltinTy = S.Context.getFunctionType(
ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo());
QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy);
Builtin =
S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get();
BuiltinCall->setType(CE->getType());
BuiltinCall->setValueKind(CE->getValueKind());
BuiltinCall->setObjectKind(CE->getObjectKind());
BuiltinCall->setCallee(Builtin);
BuiltinCall->setArg(1, ChainResult.get());
return false;
}
/// Check a call to BuiltinID for buffer overflows. If BuiltinID is a
/// __builtin_*_chk function, then use the object size argument specified in the
/// source. Otherwise, infer the object size using __builtin_object_size.
void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
CallExpr *TheCall) {
// FIXME: There are some more useful checks we could be doing here:
// - Analyze the format string of sprintf to see how much of buffer is used.
// - Evaluate strlen of strcpy arguments, use as object size.
if (TheCall->isValueDependent() || TheCall->isTypeDependent())
return;
unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true);
if (!BuiltinID)
return;
unsigned DiagID = 0;
bool IsChkVariant = false;
unsigned SizeIndex, ObjectIndex;
switch (BuiltinID) {
default:
return;
case Builtin::BI__builtin___memcpy_chk:
case Builtin::BI__builtin___memmove_chk:
case Builtin::BI__builtin___memset_chk:
case Builtin::BI__builtin___strlcat_chk:
case Builtin::BI__builtin___strlcpy_chk:
case Builtin::BI__builtin___strncat_chk:
case Builtin::BI__builtin___strncpy_chk:
case Builtin::BI__builtin___stpncpy_chk:
case Builtin::BI__builtin___memccpy_chk: {
DiagID = diag::warn_builtin_chk_overflow;
IsChkVariant = true;
SizeIndex = TheCall->getNumArgs() - 2;
ObjectIndex = TheCall->getNumArgs() - 1;
break;
}
case Builtin::BI__builtin___snprintf_chk:
case Builtin::BI__builtin___vsnprintf_chk: {
DiagID = diag::warn_builtin_chk_overflow;
IsChkVariant = true;
SizeIndex = 1;
ObjectIndex = 3;
break;
}
case Builtin::BIstrncat:
case Builtin::BI__builtin_strncat:
case Builtin::BIstrncpy:
case Builtin::BI__builtin_strncpy:
case Builtin::BIstpncpy:
case Builtin::BI__builtin_stpncpy: {
// Whether these functions overflow depends on the runtime strlen of the
// string, not just the buffer size, so emitting the "always overflow"
// diagnostic isn't quite right. We should still diagnose passing a buffer
// size larger than the destination buffer though; this is a runtime abort
// in _FORTIFY_SOURCE mode, and is quite suspicious otherwise.
DiagID = diag::warn_fortify_source_size_mismatch;
SizeIndex = TheCall->getNumArgs() - 1;
ObjectIndex = 0;
break;
}
case Builtin::BImemcpy:
case Builtin::BI__builtin_memcpy:
case Builtin::BImemmove:
case Builtin::BI__builtin_memmove:
case Builtin::BImemset:
case Builtin::BI__builtin_memset: {
DiagID = diag::warn_fortify_source_overflow;
SizeIndex = TheCall->getNumArgs() - 1;
ObjectIndex = 0;
break;
}
case Builtin::BIsnprintf:
case Builtin::BI__builtin_snprintf:
case Builtin::BIvsnprintf:
case Builtin::BI__builtin_vsnprintf: {
DiagID = diag::warn_fortify_source_size_mismatch;
SizeIndex = 1;
ObjectIndex = 0;
break;
}
}
llvm::APSInt ObjectSize;
// For __builtin___*_chk, the object size is explicitly provided by the caller
// (usually using __builtin_object_size). Use that value to check this call.
if (IsChkVariant) {
Expr::EvalResult Result;
Expr *SizeArg = TheCall->getArg(ObjectIndex);
if (!SizeArg->EvaluateAsInt(Result, getASTContext()))
return;
ObjectSize = Result.Val.getInt();
// Otherwise, try to evaluate an imaginary call to __builtin_object_size.
} else {
// If the parameter has a pass_object_size attribute, then we should use its
// (potentially) more strict checking mode. Otherwise, conservatively assume
// type 0.
int BOSType = 0;
if (const auto *POS =
FD->getParamDecl(ObjectIndex)->getAttr<PassObjectSizeAttr>())
BOSType = POS->getType();
Expr *ObjArg = TheCall->getArg(ObjectIndex);
uint64_t Result;
if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
return;
// Get the object size in the target's size_t width.
const TargetInfo &TI = getASTContext().getTargetInfo();
unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth);
}
// Evaluate the number of bytes of the object that this call will use.
Expr::EvalResult Result;
Expr *UsedSizeArg = TheCall->getArg(SizeIndex);
if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext()))
return;
llvm::APSInt UsedSize = Result.Val.getInt();
if (UsedSize.ule(ObjectSize))
return;
StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID);
// Skim off the details of whichever builtin was called to produce a better
// diagnostic, as it's unlikley that the user wrote the __builtin explicitly.
if (IsChkVariant) {
FunctionName = FunctionName.drop_front(std::strlen("__builtin___"));
FunctionName = FunctionName.drop_back(std::strlen("_chk"));
} else if (FunctionName.startswith("__builtin_")) {
FunctionName = FunctionName.drop_front(std::strlen("__builtin_"));
}
DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
PDiag(DiagID)
<< FunctionName << ObjectSize.toString(/*Radix=*/10)
<< UsedSize.toString(/*Radix=*/10));
}
static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
Scope::ScopeFlags NeededScopeFlags,
unsigned DiagID) {
// Scopes aren't available during instantiation. Fortunately, builtin
// functions cannot be template args so they cannot be formed through template
// instantiation. Therefore checking once during the parse is sufficient.
if (SemaRef.inTemplateInstantiation())
return false;
Scope *S = SemaRef.getCurScope();
while (S && !S->isSEHExceptScope())
S = S->getParent();
if (!S || !(S->getFlags() & NeededScopeFlags)) {
auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
SemaRef.Diag(TheCall->getExprLoc(), DiagID)
<< DRE->getDecl()->getIdentifier();
return true;
}
return false;
}
static inline bool isBlockPointer(Expr *Arg) {
return Arg->getType()->isBlockPointerType();
}
/// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
/// void*, which is a requirement of device side enqueue.
static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
const BlockPointerType *BPT =
cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
ArrayRef<QualType> Params =
BPT->getPointeeType()->getAs<FunctionProtoType>()->getParamTypes();
unsigned ArgCounter = 0;
bool IllegalParams = false;
// Iterate through the block parameters until either one is found that is not
// a local void*, or the block is valid.
for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end();
I != E; ++I, ++ArgCounter) {
if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() ||
(*I)->getPointeeType().getQualifiers().getAddressSpace() !=
LangAS::opencl_local) {
// Get the location of the error. If a block literal has been passed
// (BlockExpr) then we can point straight to the offending argument,
// else we just point to the variable reference.
SourceLocation ErrorLoc;
if (isa<BlockExpr>(BlockArg)) {
BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl();
ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc();
} else if (isa<DeclRefExpr>(BlockArg)) {
ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc();
}
S.Diag(ErrorLoc,
diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
IllegalParams = true;
}
}
return IllegalParams;
}
static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
<< 1 << Call->getDirectCallee() << "cl_khr_subgroups";
return true;
}
return false;
}
static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
if (checkArgCount(S, TheCall, 2))
return true;
if (checkOpenCLSubgroupExt(S, TheCall))
return true;
// First argument is an ndrange_t type.
Expr *NDRangeArg = TheCall->getArg(0);
if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "'ndrange_t'";
return true;
}
Expr *BlockArg = TheCall->getArg(1);
if (!isBlockPointer(BlockArg)) {
S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "block";
return true;
}
return checkOpenCLBlockArgs(S, BlockArg);
}
/// OpenCL C v2.0, s6.13.17.6 - Check the argument to the
/// get_kernel_work_group_size
/// and get_kernel_preferred_work_group_size_multiple builtin functions.
static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) {
if (checkArgCount(S, TheCall, 1))
return true;
Expr *BlockArg = TheCall->getArg(0);
if (!isBlockPointer(BlockArg)) {
S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "block";
return true;
}
return checkOpenCLBlockArgs(S, BlockArg);
}
/// Diagnose integer type and any valid implicit conversion to it.
static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
const QualType &IntType);
static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall,
unsigned Start, unsigned End) {
bool IllegalParams = false;
for (unsigned I = Start; I <= End; ++I)
IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I),
S.Context.getSizeType());
return IllegalParams;
}
/// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
/// 'local void*' parameter of passed block.
static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall,
Expr *BlockArg,
unsigned NumNonVarArgs) {
const BlockPointerType *BPT =
cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
unsigned NumBlockParams =
BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams();
unsigned TotalNumArgs = TheCall->getNumArgs();
// For each argument passed to the block, a corresponding uint needs to
// be passed to describe the size of the local memory.
if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
S.Diag(TheCall->getBeginLoc(),
diag::err_opencl_enqueue_kernel_local_size_args);
return true;
}
// Check that the sizes of the local memory are specified by integers.
return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs,
TotalNumArgs - 1);
}
/// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different
/// overload formats specified in Table 6.13.17.1.
/// int enqueue_kernel(queue_t queue,
/// kernel_enqueue_flags_t flags,
/// const ndrange_t ndrange,
/// void (^block)(void))
/// int enqueue_kernel(queue_t queue,
/// kernel_enqueue_flags_t flags,
/// const ndrange_t ndrange,
/// uint num_events_in_wait_list,
/// clk_event_t *event_wait_list,
/// clk_event_t *event_ret,
/// void (^block)(void))
/// int enqueue_kernel(queue_t queue,
/// kernel_enqueue_flags_t flags,
/// const ndrange_t ndrange,
/// void (^block)(local void*, ...),
/// uint size0, ...)
/// int enqueue_kernel(queue_t queue,
/// kernel_enqueue_flags_t flags,
/// const ndrange_t ndrange,
/// uint num_events_in_wait_list,
/// clk_event_t *event_wait_list,
/// clk_event_t *event_ret,
/// void (^block)(local void*, ...),
/// uint size0, ...)
static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
unsigned NumArgs = TheCall->getNumArgs();
if (NumArgs < 4) {
S.Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_few_args);
return true;
}
Expr *Arg0 = TheCall->getArg(0);
Expr *Arg1 = TheCall->getArg(1);
Expr *Arg2 = TheCall->getArg(2);
Expr *Arg3 = TheCall->getArg(3);
// First argument always needs to be a queue_t type.
if (!Arg0->getType()->isQueueT()) {
S.Diag(TheCall->getArg(0)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << S.Context.OCLQueueTy;
return true;
}
// Second argument always needs to be a kernel_enqueue_flags_t enum value.
if (!Arg1->getType()->isIntegerType()) {
S.Diag(TheCall->getArg(1)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
return true;
}
// Third argument is always an ndrange_t type.
if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
S.Diag(TheCall->getArg(2)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "'ndrange_t'";
return true;
}
// With four arguments, there is only one form that the function could be
// called in: no events and no variable arguments.
if (NumArgs == 4) {
// check that the last argument is the right block type.
if (!isBlockPointer(Arg3)) {
S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "block";
return true;
}
// we have a block type, check the prototype
const BlockPointerType *BPT =
cast<BlockPointerType>(Arg3->getType().getCanonicalType());
if (BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams() > 0) {
S.Diag(Arg3->getBeginLoc(),
diag::err_opencl_enqueue_kernel_blocks_no_args);
return true;
}
return false;
}
// we can have block + varargs.
if (isBlockPointer(Arg3))
return (checkOpenCLBlockArgs(S, Arg3) ||
checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4));
// last two cases with either exactly 7 args or 7 args and varargs.
if (NumArgs >= 7) {
// check common block argument.
Expr *Arg6 = TheCall->getArg(6);
if (!isBlockPointer(Arg6)) {
S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "block";
return true;
}
if (checkOpenCLBlockArgs(S, Arg6))
return true;
// Forth argument has to be any integer type.
if (!Arg3->getType()->isIntegerType()) {
S.Diag(TheCall->getArg(3)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "integer";
return true;
}
// check remaining common arguments.
Expr *Arg4 = TheCall->getArg(4);
Expr *Arg5 = TheCall->getArg(5);
// Fifth argument is always passed as a pointer to clk_event_t.
if (!Arg4->isNullPointerConstant(S.Context,
Expr::NPC_ValueDependentIsNotNull) &&
!Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
S.Diag(TheCall->getArg(4)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee()
<< S.Context.getPointerType(S.Context.OCLClkEventTy);
return true;
}
// Sixth argument is always passed as a pointer to clk_event_t.
if (!Arg5->isNullPointerConstant(S.Context,
Expr::NPC_ValueDependentIsNotNull) &&
!(Arg5->getType()->isPointerType() &&
Arg5->getType()->getPointeeType()->isClkEventT())) {
S.Diag(TheCall->getArg(5)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee()
<< S.Context.getPointerType(S.Context.OCLClkEventTy);
return true;
}
if (NumArgs == 7)
return false;
return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7);
}
// None of the specific case has been detected, give generic error
S.Diag(TheCall->getBeginLoc(),
diag::err_opencl_enqueue_kernel_incorrect_args);
return true;
}
/// Returns OpenCL access qual.
static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) {
return D->getAttr<OpenCLAccessAttr>();
}
/// Returns true if pipe element type is different from the pointer.
static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
const Expr *Arg0 = Call->getArg(0);
// First argument type should always be pipe.
if (!Arg0->getType()->isPipeType()) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
<< Call->getDirectCallee() << Arg0->getSourceRange();
return true;
}
OpenCLAccessAttr *AccessQual =
getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl());
// Validates the access qualifier is compatible with the call.
// OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
// read_only and write_only, and assumed to be read_only if no qualifier is
// specified.
switch (Call->getDirectCallee()->getBuiltinID()) {
case Builtin::BIread_pipe:
case Builtin::BIreserve_read_pipe:
case Builtin::BIcommit_read_pipe:
case Builtin::BIwork_group_reserve_read_pipe:
case Builtin::BIsub_group_reserve_read_pipe:
case Builtin::BIwork_group_commit_read_pipe:
case Builtin::BIsub_group_commit_read_pipe:
if (!(!AccessQual || AccessQual->isReadOnly())) {
S.Diag(Arg0->getBeginLoc(),
diag::err_opencl_builtin_pipe_invalid_access_modifier)
<< "read_only" << Arg0->getSourceRange();
return true;
}
break;
case Builtin::BIwrite_pipe:
case Builtin::BIreserve_write_pipe:
case Builtin::BIcommit_write_pipe:
case Builtin::BIwork_group_reserve_write_pipe:
case Builtin::BIsub_group_reserve_write_pipe:
case Builtin::BIwork_group_commit_write_pipe:
case Builtin::BIsub_group_commit_write_pipe:
if (!(AccessQual && AccessQual->isWriteOnly())) {
S.Diag(Arg0->getBeginLoc(),
diag::err_opencl_builtin_pipe_invalid_access_modifier)
<< "write_only" << Arg0->getSourceRange();
return true;
}
break;
default:
break;
}
return false;
}
/// Returns true if pipe element type is different from the pointer.
static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
const Expr *Arg0 = Call->getArg(0);
const Expr *ArgIdx = Call->getArg(Idx);
const PipeType *PipeTy = cast<PipeType>(Arg0->getType());
const QualType EltTy = PipeTy->getElementType();
const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>();
// The Idx argument should be a pointer and the type of the pointer and
// the type of pipe element should also be the same.
if (!ArgTy ||
!S.Context.hasSameType(
EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
<< Call->getDirectCallee() << S.Context.getPointerType(EltTy)
<< ArgIdx->getType() << ArgIdx->getSourceRange();
return true;
}
return false;
}
// Performs semantic analysis for the read/write_pipe call.
// \param S Reference to the semantic analyzer.
// \param Call A pointer to the builtin call.
// \return True if a semantic error has been found, false otherwise.
static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
// OpenCL v2.0 s6.13.16.2 - The built-in read/write
// functions have two forms.
switch (Call->getNumArgs()) {
case 2:
if (checkOpenCLPipeArg(S, Call))
return true;
// The call with 2 arguments should be
// read/write_pipe(pipe T, T*).
// Check packet type T.
if (checkOpenCLPipePacketType(S, Call, 1))
return true;
break;
case 4: {
if (checkOpenCLPipeArg(S, Call))
return true;
// The call with 4 arguments should be
// read/write_pipe(pipe T, reserve_id_t, uint, T*).
// Check reserve_id_t.
if (!Call->getArg(1)->getType()->isReserveIDT()) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
<< Call->getDirectCallee() << S.Context.OCLReserveIDTy
<< Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
return true;
}
// Check the index.
const Expr *Arg2 = Call->getArg(2);
if (!Arg2->getType()->isIntegerType() &&
!Arg2->getType()->isUnsignedIntegerType()) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
<< Call->getDirectCallee() << S.Context.UnsignedIntTy
<< Arg2->getType() << Arg2->getSourceRange();
return true;
}
// Check packet type T.
if (checkOpenCLPipePacketType(S, Call, 3))
return true;
} break;
default:
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num)
<< Call->getDirectCallee() << Call->getSourceRange();
return true;
}
return false;
}
// Performs a semantic analysis on the {work_group_/sub_group_
// /_}reserve_{read/write}_pipe
// \param S Reference to the semantic analyzer.
// \param Call The call to the builtin function to be analyzed.
// \return True if a semantic error was found, false otherwise.
static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
if (checkArgCount(S, Call, 2))
return true;
if (checkOpenCLPipeArg(S, Call))
return true;
// Check the reserve size.
if (!Call->getArg(1)->getType()->isIntegerType() &&
!Call->getArg(1)->getType()->isUnsignedIntegerType()) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
<< Call->getDirectCallee() << S.Context.UnsignedIntTy
<< Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
return true;
}
// Since return type of reserve_read/write_pipe built-in function is
// reserve_id_t, which is not defined in the builtin def file , we used int
// as return type and need to override the return type of these functions.
Call->setType(S.Context.OCLReserveIDTy);
return false;
}
// Performs a semantic analysis on {work_group_/sub_group_
// /_}commit_{read/write}_pipe
// \param S Reference to the semantic analyzer.
// \param Call The call to the builtin function to be analyzed.
// \return True if a semantic error was found, false otherwise.
static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
if (checkArgCount(S, Call, 2))
return true;
if (checkOpenCLPipeArg(S, Call))
return true;
// Check reserve_id_t.
if (!Call->getArg(1)->getType()->isReserveIDT()) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
<< Call->getDirectCallee() << S.Context.OCLReserveIDTy
<< Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
return true;
}
return false;
}
// Performs a semantic analysis on the call to built-in Pipe
// Query Functions.
// \param S Reference to the semantic analyzer.
// \param Call The call to the builtin function to be analyzed.
// \return True if a semantic error was found, false otherwise.
static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
if (checkArgCount(S, Call, 1))
return true;
if (!Call->getArg(0)->getType()->isPipeType()) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
<< Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
return true;
}
return false;
}
// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
// Performs semantic analysis for the to_global/local/private call.
// \param S Reference to the semantic analyzer.
// \param BuiltinID ID of the builtin function.
// \param Call A pointer to the builtin call.
// \return True if a semantic error has been found, false otherwise.
static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
CallExpr *Call) {
if (Call->getNumArgs() != 1) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_arg_num)
<< Call->getDirectCallee() << Call->getSourceRange();
return true;
}
auto RT = Call->getArg(0)->getType();
if (!RT->isPointerType() || RT->getPointeeType()
.getAddressSpace() == LangAS::opencl_constant) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg)
<< Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
return true;
}
if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) {
S.Diag(Call->getArg(0)->getBeginLoc(),
diag::warn_opencl_generic_address_space_arg)
<< Call->getDirectCallee()->getNameInfo().getAsString()
<< Call->getArg(0)->getSourceRange();
}
RT = RT->getPointeeType();
auto Qual = RT.getQualifiers();
switch (BuiltinID) {
case Builtin::BIto_global:
Qual.setAddressSpace(LangAS::opencl_global);
break;
case Builtin::BIto_local:
Qual.setAddressSpace(LangAS::opencl_local);
break;
case Builtin::BIto_private:
Qual.setAddressSpace(LangAS::opencl_private);
break;
default:
llvm_unreachable("Invalid builtin function");
}
Call->setType(S.Context.getPointerType(S.Context.getQualifiedType(
RT.getUnqualifiedType(), Qual)));
return false;
}
static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) {
if (checkArgCount(S, TheCall, 1))
return ExprError();
// Compute __builtin_launder's parameter type from the argument.
// The parameter type is:
// * The type of the argument if it's not an array or function type,
// Otherwise,
// * The decayed argument type.
QualType ParamTy = [&]() {
QualType ArgTy = TheCall->getArg(0)->getType();
if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe())
return S.Context.getPointerType(Ty->getElementType());
if (ArgTy->isFunctionType()) {
return S.Context.getPointerType(ArgTy);
}
return ArgTy;
}();
TheCall->setType(ParamTy);
auto DiagSelect = [&]() -> llvm::Optional<unsigned> {
if (!ParamTy->isPointerType())
return 0;
if (ParamTy->isFunctionPointerType())
return 1;
if (ParamTy->isVoidPointerType())
return 2;
return llvm::Optional<unsigned>{};
}();
if (DiagSelect.hasValue()) {
S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
<< DiagSelect.getValue() << TheCall->getSourceRange();
return ExprError();
}
// We either have an incomplete class type, or we have a class template
// whose instantiation has not been forced. Example:
//
// template <class T> struct Foo { T value; };
// Foo<int> *p = nullptr;
// auto *d = __builtin_launder(p);
if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(),
diag::err_incomplete_type))
return ExprError();
assert(ParamTy->getPointeeType()->isObjectType() &&
"Unhandled non-object pointer case");
InitializedEntity Entity =
InitializedEntity::InitializeParameter(S.Context, ParamTy, false);
ExprResult Arg =
S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0));
if (Arg.isInvalid())
return ExprError();
TheCall->setArg(0, Arg.get());
return TheCall;
}
// Emit an error and return true if the current architecture is not in the list
// of supported architectures.
static bool
CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
llvm::Triple::ArchType CurArch =
S.getASTContext().getTargetInfo().getTriple().getArch();
if (llvm::is_contained(SupportedArchs, CurArch))
return false;
S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
<< TheCall->getSourceRange();
return true;
}
ExprResult
Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
CallExpr *TheCall) {
ExprResult TheCallResult(TheCall);
// Find out if any arguments are required to be integer constant expressions.
unsigned ICEArguments = 0;
ASTContext::GetBuiltinTypeError Error;
Context.GetBuiltinType(BuiltinID, Error, &ICEArguments);
if (Error != ASTContext::GE_None)
ICEArguments = 0; // Don't diagnose previously diagnosed errors.
// If any arguments are required to be ICE's, check and diagnose.
for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
// Skip arguments not required to be ICE's.
if ((ICEArguments & (1 << ArgNo)) == 0) continue;
llvm::APSInt Result;
if (SemaBuiltinConstantArg(TheCall, ArgNo, Result))
return true;
ICEArguments &= ~(1 << ArgNo);
}
switch (BuiltinID) {
case Builtin::BI__builtin___CFStringMakeConstantString:
assert(TheCall->getNumArgs() == 1 &&
"Wrong # arguments to builtin CFStringMakeConstantString");
if (CheckObjCString(TheCall->getArg(0)))
return ExprError();
break;
case Builtin::BI__builtin_ms_va_start:
case Builtin::BI__builtin_stdarg_start:
case Builtin::BI__builtin_va_start:
if (SemaBuiltinVAStart(BuiltinID, TheCall))
return ExprError();
break;
case Builtin::BI__va_start: {
switch (Context.getTargetInfo().getTriple().getArch()) {
case llvm::Triple::aarch64:
case llvm::Triple::arm:
case llvm::Triple::thumb:
if (SemaBuiltinVAStartARMMicrosoft(TheCall))
return ExprError();
break;
default:
if (SemaBuiltinVAStart(BuiltinID, TheCall))
return ExprError();
break;
}
break;
}
// The acquire, release, and no fence variants are ARM and AArch64 only.
case Builtin::BI_interlockedbittestandset_acq:
case Builtin::BI_interlockedbittestandset_rel:
case Builtin::BI_interlockedbittestandset_nf:
case Builtin::BI_interlockedbittestandreset_acq:
case Builtin::BI_interlockedbittestandreset_rel:
case Builtin::BI_interlockedbittestandreset_nf:
if (CheckBuiltinTargetSupport(
*this, BuiltinID, TheCall,
{llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
return ExprError();
break;
// The 64-bit bittest variants are x64, ARM, and AArch64 only.
case Builtin::BI_bittest64:
case Builtin::BI_bittestandcomplement64:
case Builtin::BI_bittestandreset64:
case Builtin::BI_bittestandset64:
case Builtin::BI_interlockedbittestandreset64:
case Builtin::BI_interlockedbittestandset64:
if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall,
{llvm::Triple::x86_64, llvm::Triple::arm,
llvm::Triple::thumb, llvm::Triple::aarch64}))
return ExprError();
break;
case Builtin::BI__builtin_isgreater:
case Builtin::BI__builtin_isgreaterequal:
case Builtin::BI__builtin_isless:
case Builtin::BI__builtin_islessequal:
case Builtin::BI__builtin_islessgreater:
case Builtin::BI__builtin_isunordered:
if (SemaBuiltinUnorderedCompare(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_fpclassify:
if (SemaBuiltinFPClassification(TheCall, 6))
return ExprError();
break;
case Builtin::BI__builtin_isfinite:
case Builtin::BI__builtin_isinf:
case Builtin::BI__builtin_isinf_sign:
case Builtin::BI__builtin_isnan:
case Builtin::BI__builtin_isnormal:
case Builtin::BI__builtin_signbit:
case Builtin::BI__builtin_signbitf:
case Builtin::BI__builtin_signbitl:
if (SemaBuiltinFPClassification(TheCall, 1))
return ExprError();
break;
case Builtin::BI__builtin_shufflevector:
return SemaBuiltinShuffleVector(TheCall);
// TheCall will be freed by the smart pointer here, but that's fine, since
// SemaBuiltinShuffleVector guts it, but then doesn't release it.
case Builtin::BI__builtin_prefetch:
if (SemaBuiltinPrefetch(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_alloca_with_align:
if (SemaBuiltinAllocaWithAlign(TheCall))
return ExprError();
break;
case Builtin::BI__assume:
case Builtin::BI__builtin_assume:
if (SemaBuiltinAssume(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_assume_aligned:
if (SemaBuiltinAssumeAligned(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_dynamic_object_size:
case Builtin::BI__builtin_object_size:
if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3))
return ExprError();
break;
case Builtin::BI__builtin_longjmp:
if (SemaBuiltinLongjmp(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_setjmp:
if (SemaBuiltinSetjmp(TheCall))
return ExprError();
break;
case Builtin::BI_setjmp:
case Builtin::BI_setjmpex:
if (checkArgCount(*this, TheCall, 1))
return true;
break;
case Builtin::BI__builtin_classify_type:
if (checkArgCount(*this, TheCall, 1)) return true;
TheCall->setType(Context.IntTy);
break;
case Builtin::BI__builtin_constant_p: {
if (checkArgCount(*this, TheCall, 1)) return true;
ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
if (Arg.isInvalid()) return true;
TheCall->setArg(0, Arg.get());
TheCall->setType(Context.IntTy);
break;
}
case Builtin::BI__builtin_launder:
return SemaBuiltinLaunder(*this, TheCall);
case Builtin::BI__sync_fetch_and_add:
case Builtin::BI__sync_fetch_and_add_1:
case Builtin::BI__sync_fetch_and_add_2:
case Builtin::BI__sync_fetch_and_add_4:
case Builtin::BI__sync_fetch_and_add_8:
case Builtin::BI__sync_fetch_and_add_16:
case Builtin::BI__sync_fetch_and_sub:
case Builtin::BI__sync_fetch_and_sub_1:
case Builtin::BI__sync_fetch_and_sub_2:
case Builtin::BI__sync_fetch_and_sub_4:
case Builtin::BI__sync_fetch_and_sub_8:
case Builtin::BI__sync_fetch_and_sub_16:
case Builtin::BI__sync_fetch_and_or:
case Builtin::BI__sync_fetch_and_or_1:
case Builtin::BI__sync_fetch_and_or_2:
case Builtin::BI__sync_fetch_and_or_4:
case Builtin::BI__sync_fetch_and_or_8:
case Builtin::BI__sync_fetch_and_or_16:
case Builtin::BI__sync_fetch_and_and:
case Builtin::BI__sync_fetch_and_and_1:
case Builtin::BI__sync_fetch_and_and_2:
case Builtin::BI__sync_fetch_and_and_4:
case Builtin::BI__sync_fetch_and_and_8:
case Builtin::BI__sync_fetch_and_and_16:
case Builtin::BI__sync_fetch_and_xor:
case Builtin::BI__sync_fetch_and_xor_1:
case Builtin::BI__sync_fetch_and_xor_2:
case Builtin::BI__sync_fetch_and_xor_4:
case Builtin::BI__sync_fetch_and_xor_8:
case Builtin::BI__sync_fetch_and_xor_16:
case Builtin::BI__sync_fetch_and_nand:
case Builtin::BI__sync_fetch_and_nand_1:
case Builtin::BI__sync_fetch_and_nand_2:
case Builtin::BI__sync_fetch_and_nand_4:
case Builtin::BI__sync_fetch_and_nand_8:
case Builtin::BI__sync_fetch_and_nand_16:
case Builtin::BI__sync_add_and_fetch:
case Builtin::BI__sync_add_and_fetch_1:
case Builtin::BI__sync_add_and_fetch_2:
case Builtin::BI__sync_add_and_fetch_4:
case Builtin::BI__sync_add_and_fetch_8:
case Builtin::BI__sync_add_and_fetch_16:
case Builtin::BI__sync_sub_and_fetch:
case Builtin::BI__sync_sub_and_fetch_1:
case Builtin::BI__sync_sub_and_fetch_2:
case Builtin::BI__sync_sub_and_fetch_4:
case Builtin::BI__sync_sub_and_fetch_8:
case Builtin::BI__sync_sub_and_fetch_16:
case Builtin::BI__sync_and_and_fetch:
case Builtin::BI__sync_and_and_fetch_1:
case Builtin::BI__sync_and_and_fetch_2:
case Builtin::BI__sync_and_and_fetch_4:
case Builtin::BI__sync_and_and_fetch_8:
case Builtin::BI__sync_and_and_fetch_16:
case Builtin::BI__sync_or_and_fetch:
case Builtin::BI__sync_or_and_fetch_1:
case Builtin::BI__sync_or_and_fetch_2:
case Builtin::BI__sync_or_and_fetch_4:
case Builtin::BI__sync_or_and_fetch_8:
case Builtin::BI__sync_or_and_fetch_16:
case Builtin::BI__sync_xor_and_fetch:
case Builtin::BI__sync_xor_and_fetch_1:
case Builtin::BI__sync_xor_and_fetch_2:
case Builtin::BI__sync_xor_and_fetch_4:
case Builtin::BI__sync_xor_and_fetch_8:
case Builtin::BI__sync_xor_and_fetch_16:
case Builtin::BI__sync_nand_and_fetch:
case Builtin::BI__sync_nand_and_fetch_1:
case Builtin::BI__sync_nand_and_fetch_2:
case Builtin::BI__sync_nand_and_fetch_4:
case Builtin::BI__sync_nand_and_fetch_8:
case Builtin::BI__sync_nand_and_fetch_16:
case Builtin::BI__sync_val_compare_and_swap:
case Builtin::BI__sync_val_compare_and_swap_1:
case Builtin::BI__sync_val_compare_and_swap_2:
case Builtin::BI__sync_val_compare_and_swap_4:
case Builtin::BI__sync_val_compare_and_swap_8:
case Builtin::BI__sync_val_compare_and_swap_16:
case Builtin::BI__sync_bool_compare_and_swap:
case Builtin::BI__sync_bool_compare_and_swap_1:
case Builtin::BI__sync_bool_compare_and_swap_2:
case Builtin::BI__sync_bool_compare_and_swap_4:
case Builtin::BI__sync_bool_compare_and_swap_8:
case Builtin::BI__sync_bool_compare_and_swap_16:
case Builtin::BI__sync_lock_test_and_set:
case Builtin::BI__sync_lock_test_and_set_1:
case Builtin::BI__sync_lock_test_and_set_2:
case Builtin::BI__sync_lock_test_and_set_4:
case Builtin::BI__sync_lock_test_and_set_8:
case Builtin::BI__sync_lock_test_and_set_16:
case Builtin::BI__sync_lock_release:
case Builtin::BI__sync_lock_release_1:
case Builtin::BI__sync_lock_release_2:
case Builtin::BI__sync_lock_release_4:
case Builtin::BI__sync_lock_release_8:
case Builtin::BI__sync_lock_release_16:
case Builtin::BI__sync_swap:
case Builtin::BI__sync_swap_1:
case Builtin::BI__sync_swap_2:
case Builtin::BI__sync_swap_4:
case Builtin::BI__sync_swap_8:
case Builtin::BI__sync_swap_16:
return SemaBuiltinAtomicOverloaded(TheCallResult);
case Builtin::BI__sync_synchronize:
Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst)
<< TheCall->getCallee()->getSourceRange();
break;
case Builtin::BI__builtin_nontemporal_load:
case Builtin::BI__builtin_nontemporal_store:
return SemaBuiltinNontemporalOverloaded(TheCallResult);
#define BUILTIN(ID, TYPE, ATTRS)
#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
case Builtin::BI##ID: \
return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
#include "clang/Basic/Builtins.def"
case Builtin::BI__annotation:
if (SemaBuiltinMSVCAnnotation(*this, TheCall))
return ExprError();
break;
case Builtin::BI__builtin_annotation:
if (SemaBuiltinAnnotation(*this, TheCall))
return ExprError();
break;
case Builtin::BI__builtin_addressof:
if (SemaBuiltinAddressof(*this, TheCall))
return ExprError();
break;
case Builtin::BI__builtin_add_overflow:
case Builtin::BI__builtin_sub_overflow:
case Builtin::BI__builtin_mul_overflow:
if (SemaBuiltinOverflow(*this, TheCall))
return ExprError();
break;
case Builtin::BI__builtin_operator_new:
case Builtin::BI__builtin_operator_delete: {
bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
ExprResult Res =
SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
if (Res.isInvalid())
CorrectDelayedTyposInExpr(TheCallResult.get());
return Res;
}
case Builtin::BI__builtin_dump_struct: {
// We first want to ensure we are called with 2 arguments
if (checkArgCount(*this, TheCall, 2))
return ExprError();
// Ensure that the first argument is of type 'struct XX *'
const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts();
const QualType PtrArgType = PtrArg->getType();
if (!PtrArgType->isPointerType() ||
!PtrArgType->getPointeeType()->isRecordType()) {
Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
<< PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType
<< "structure pointer";
return ExprError();
}
// Ensure that the second argument is of type 'FunctionType'
const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts();
const QualType FnPtrArgType = FnPtrArg->getType();
if (!FnPtrArgType->isPointerType()) {
Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
<< FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
<< FnPtrArgType << "'int (*)(const char *, ...)'";
return ExprError();
}
const auto *FuncType =
FnPtrArgType->getPointeeType()->getAs<FunctionType>();
if (!FuncType) {
Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
<< FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
<< FnPtrArgType << "'int (*)(const char *, ...)'";
return ExprError();
}
if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) {
if (!FT->getNumParams()) {
Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
<< FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
<< 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
return ExprError();
}
QualType PT = FT->getParamType(0);
if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy ||
!PT->isPointerType() || !PT->getPointeeType()->isCharType() ||
!PT->getPointeeType().isConstQualified()) {
Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
<< FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
<< 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
return ExprError();
}
}
TheCall->setType(Context.IntTy);
break;
}
case Builtin::BI__builtin_call_with_static_chain:
if (SemaBuiltinCallWithStaticChain(*this, TheCall))
return ExprError();
break;
case Builtin::BI__exception_code:
case Builtin::BI_exception_code:
if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
diag::err_seh___except_block))
return ExprError();
break;
case Builtin::BI__exception_info:
case Builtin::BI_exception_info:
if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
diag::err_seh___except_filter))
return ExprError();
break;
case Builtin::BI__GetExceptionInfo:
if (checkArgCount(*this, TheCall, 1))
return ExprError();
if (CheckCXXThrowOperand(
TheCall->getBeginLoc(),
Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()),
TheCall))
return ExprError();
TheCall->setType(Context.VoidPtrTy);
break;
// OpenCL v2.0, s6.13.16 - Pipe functions
case Builtin::BIread_pipe:
case Builtin::BIwrite_pipe:
// Since those two functions are declared with var args, we need a semantic
// check for the argument.
if (SemaBuiltinRWPipe(*this, TheCall))
return ExprError();
break;
case Builtin::BIreserve_read_pipe:
case Builtin::BIreserve_write_pipe:
case Builtin::BIwork_group_reserve_read_pipe:
case Builtin::BIwork_group_reserve_write_pipe:
if (SemaBuiltinReserveRWPipe(*this, TheCall))
return ExprError();
break;
case Builtin::BIsub_group_reserve_read_pipe:
case Builtin::BIsub_group_reserve_write_pipe:
if (checkOpenCLSubgroupExt(*this, TheCall) ||
SemaBuiltinReserveRWPipe(*this, TheCall))
return ExprError();
break;
case Builtin::BIcommit_read_pipe:
case Builtin::BIcommit_write_pipe:
case Builtin::BIwork_group_commit_read_pipe:
case Builtin::BIwork_group_commit_write_pipe:
if (SemaBuiltinCommitRWPipe(*this, TheCall))
return ExprError();
break;
case Builtin::BIsub_group_commit_read_pipe:
case Builtin::BIsub_group_commit_write_pipe:
if (checkOpenCLSubgroupExt(*this, TheCall) ||
SemaBuiltinCommitRWPipe(*this, TheCall))
return ExprError();
break;
case Builtin::BIget_pipe_num_packets:
case Builtin::BIget_pipe_max_packets:
if (SemaBuiltinPipePackets(*this, TheCall))
return ExprError();
break;
case Builtin::BIto_global:
case Builtin::BIto_local:
case Builtin::BIto_private:
if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall))
return ExprError();
break;
// OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
case Builtin::BIenqueue_kernel:
if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall))
return ExprError();
break;
case Builtin::BIget_kernel_work_group_size:
case Builtin::BIget_kernel_preferred_work_group_size_multiple:
if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall))
return ExprError();
break;
case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
case Builtin::BIget_kernel_sub_group_count_for_ndrange:
if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall))
return ExprError();
break;
case Builtin::BI__builtin_os_log_format:
case Builtin::BI__builtin_os_log_format_buffer_size:
if (SemaBuiltinOSLogFormat(TheCall))
return ExprError();
break;
}
// Since the target specific builtins for each arch overlap, only check those
// of the arch we are compiling for.
if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
switch (Context.getTargetInfo().getTriple().getArch()) {
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall))
return ExprError();
break;
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall))
return ExprError();
break;
case llvm::Triple::hexagon:
if (CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall))
return ExprError();
break;
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall))
return ExprError();
break;
case llvm::Triple::systemz:
if (CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall))
return ExprError();
break;
case llvm::Triple::x86:
case llvm::Triple::x86_64:
if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall))
return ExprError();
break;
case llvm::Triple::ppc:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
if (CheckPPCBuiltinFunctionCall(BuiltinID, TheCall))
return ExprError();
break;
default:
break;
}
}
return TheCallResult;
}
// Get the valid immediate range for the specified NEON type code.
static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
NeonTypeFlags Type(t);
int IsQuad = ForceQuad ? true : Type.isQuad();
switch (Type.getEltType()) {
case NeonTypeFlags::Int8:
case NeonTypeFlags::Poly8:
return shift ? 7 : (8 << IsQuad) - 1;
case NeonTypeFlags::Int16:
case NeonTypeFlags::Poly16:
return shift ? 15 : (4 << IsQuad) - 1;
case NeonTypeFlags::Int32:
return shift ? 31 : (2 << IsQuad) - 1;
case NeonTypeFlags::Int64:
case NeonTypeFlags::Poly64:
return shift ? 63 : (1 << IsQuad) - 1;
case NeonTypeFlags::Poly128:
return shift ? 127 : (1 << IsQuad) - 1;
case NeonTypeFlags::Float16:
assert(!shift && "cannot shift float types!");
return (4 << IsQuad) - 1;
case NeonTypeFlags::Float32:
assert(!shift && "cannot shift float types!");
return (2 << IsQuad) - 1;
case NeonTypeFlags::Float64:
assert(!shift && "cannot shift float types!");
return (1 << IsQuad) - 1;
}
llvm_unreachable("Invalid NeonTypeFlag!");
}
/// getNeonEltType - Return the QualType corresponding to the elements of
/// the vector type specified by the NeonTypeFlags. This is used to check
/// the pointer arguments for Neon load/store intrinsics.
static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
bool IsPolyUnsigned, bool IsInt64Long) {
switch (Flags.getEltType()) {
case NeonTypeFlags::Int8:
return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
case NeonTypeFlags::Int16:
return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
case NeonTypeFlags::Int32:
return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
case NeonTypeFlags::Int64:
if (IsInt64Long)
return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
else
return Flags.isUnsigned() ? Context.UnsignedLongLongTy
: Context.LongLongTy;
case NeonTypeFlags::Poly8:
return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
case NeonTypeFlags::Poly16:
return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
case NeonTypeFlags::Poly64:
if (IsInt64Long)
return Context.UnsignedLongTy;
else
return Context.UnsignedLongLongTy;
case NeonTypeFlags::Poly128:
break;
case NeonTypeFlags::Float16:
return Context.HalfTy;
case NeonTypeFlags::Float32:
return Context.FloatTy;
case NeonTypeFlags::Float64:
return Context.DoubleTy;
}
llvm_unreachable("Invalid NeonTypeFlag!");
}
bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
llvm::APSInt Result;
uint64_t mask = 0;
unsigned TV = 0;
int PtrArgNum = -1;
bool HasConstPtr = false;
switch (BuiltinID) {
#define GET_NEON_OVERLOAD_CHECK
#include "clang/Basic/arm_neon.inc"
#include "clang/Basic/arm_fp16.inc"
#undef GET_NEON_OVERLOAD_CHECK
}
// For NEON intrinsics which are overloaded on vector element type, validate
// the immediate which specifies which variant to emit.
unsigned ImmArg = TheCall->getNumArgs()-1;
if (mask) {
if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
return true;
TV = Result.getLimitedValue(64);
if ((TV > 63) || (mask & (1ULL << TV)) == 0)
return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
<< TheCall->getArg(ImmArg)->getSourceRange();
}
if (PtrArgNum >= 0) {
// Check that pointer arguments have the specified type.
Expr *Arg = TheCall->getArg(PtrArgNum);
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
Arg = ICE->getSubExpr();
ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
QualType RHSTy = RHS.get()->getType();
llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
Arch == llvm::Triple::aarch64_be;
bool IsInt64Long =
Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong;
QualType EltTy =
getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
if (HasConstPtr)
EltTy = EltTy.withConst();
QualType LHSTy = Context.getPointerType(EltTy);
AssignConvertType ConvTy;
ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
if (RHS.isInvalid())
return true;
if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy,
RHS.get(), AA_Assigning))
return true;
}
// For NEON intrinsics which take an immediate value as part of the
// instruction, range check them here.
unsigned i = 0, l = 0, u = 0;
switch (BuiltinID) {
default:
return false;
#define GET_NEON_IMMEDIATE_CHECK
#include "clang/Basic/arm_neon.inc"
#include "clang/Basic/arm_fp16.inc"
#undef GET_NEON_IMMEDIATE_CHECK
}
return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
}
bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth) {
assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex ||
BuiltinID == ARM::BI__builtin_arm_strex ||
BuiltinID == ARM::BI__builtin_arm_stlex ||
BuiltinID == AArch64::BI__builtin_arm_ldrex ||
BuiltinID == AArch64::BI__builtin_arm_ldaex ||
BuiltinID == AArch64::BI__builtin_arm_strex ||
BuiltinID == AArch64::BI__builtin_arm_stlex) &&
"unexpected ARM builtin");
bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex ||
BuiltinID == AArch64::BI__builtin_arm_ldrex ||
BuiltinID == AArch64::BI__builtin_arm_ldaex;
DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
// Ensure that we have the proper number of arguments.
if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2))
return true;
// Inspect the pointer argument of the atomic builtin. This should always be
// a pointer type, whose element is an integral scalar or pointer type.
// Because it is a pointer type, we don't have to worry about any implicit
// casts here.
Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg);
if (PointerArgRes.isInvalid())
return true;
PointerArg = PointerArgRes.get();
const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
if (!pointerType) {
Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
<< PointerArg->getType() << PointerArg->getSourceRange();
return true;
}
// ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
// task is to insert the appropriate casts into the AST. First work out just
// what the appropriate type is.
QualType ValType = pointerType->getPointeeType();
QualType AddrType = ValType.getUnqualifiedType().withVolatile();
if (IsLdrex)
AddrType.addConst();
// Issue a warning if the cast is dodgy.
CastKind CastNeeded = CK_NoOp;
if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
CastNeeded = CK_BitCast;
Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
<< PointerArg->getType() << Context.getPointerType(AddrType)
<< AA_Passing << PointerArg->getSourceRange();
}
// Finally, do the cast and replace the argument with the corrected version.
AddrType = Context.getPointerType(AddrType);
PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded);
if (PointerArgRes.isInvalid())
return true;
PointerArg = PointerArgRes.get();
TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
// In general, we allow ints, floats and pointers to be loaded and stored.
if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
!ValType->isBlockPointerType() && !ValType->isFloatingType()) {
Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
<< PointerArg->getType() << PointerArg->getSourceRange();
return true;
}
// But ARM doesn't have instructions to deal with 128-bit versions.
if (Context.getTypeSize(ValType) > MaxWidth) {
assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
<< PointerArg->getType() << PointerArg->getSourceRange();
return true;
}
switch (ValType.getObjCLifetime()) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
// okay
break;
case Qualifiers::OCL_Weak:
case Qualifiers::OCL_Strong:
case Qualifiers::OCL_Autoreleasing:
Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
<< ValType << PointerArg->getSourceRange();
return true;
}
if (IsLdrex) {
TheCall->setType(ValType);
return false;
}
// Initialize the argument to be stored.
ExprResult ValArg = TheCall->getArg(0);
InitializedEntity Entity = InitializedEntity::InitializeParameter(
Context, ValType, /*consume*/ false);
ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
if (ValArg.isInvalid())
return true;
TheCall->setArg(0, ValArg.get());
// __builtin_arm_strex always returns an int. It's marked as such in the .def,
// but the custom checker bypasses all default analysis.
TheCall->setType(Context.IntTy);
return false;
}
bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex ||
BuiltinID == ARM::BI__builtin_arm_strex ||
BuiltinID == ARM::BI__builtin_arm_stlex) {
return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
}
if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
}
if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
BuiltinID == ARM::BI__builtin_arm_wsr64)
return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
if (BuiltinID == ARM::BI__builtin_arm_rsr ||
BuiltinID == ARM::BI__builtin_arm_rsrp ||
BuiltinID == ARM::BI__builtin_arm_wsr ||
BuiltinID == ARM::BI__builtin_arm_wsrp)
return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
return true;
// For intrinsics which take an immediate value as part of the instruction,
// range check them here.
// FIXME: VFP Intrinsics should error if VFP not present.
switch (BuiltinID) {
default: return false;
case ARM::BI__builtin_arm_ssat:
return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32);
case ARM::BI__builtin_arm_usat:
return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
case ARM::BI__builtin_arm_ssat16:
return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
case ARM::BI__builtin_arm_usat16:
return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
case ARM::BI__builtin_arm_vcvtr_f:
case ARM::BI__builtin_arm_vcvtr_d:
return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
case ARM::BI__builtin_arm_dmb:
case ARM::BI__builtin_arm_dsb:
case ARM::BI__builtin_arm_isb:
case ARM::BI__builtin_arm_dbg:
return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15);
}
}
bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
CallExpr *TheCall) {
if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
BuiltinID == AArch64::BI__builtin_arm_ldaex ||
BuiltinID == AArch64::BI__builtin_arm_strex ||
BuiltinID == AArch64::BI__builtin_arm_stlex) {
return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
}
if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) ||
SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) ||
SemaBuiltinConstantArgRange(TheCall, 4, 0, 1);
}
if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
BuiltinID == AArch64::BI__builtin_arm_wsr64)
return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
// Memory Tagging Extensions (MTE) Intrinsics
if (BuiltinID == AArch64::BI__builtin_arm_irg ||
BuiltinID == AArch64::BI__builtin_arm_addg ||
BuiltinID == AArch64::BI__builtin_arm_gmi ||
BuiltinID == AArch64::BI__builtin_arm_ldg ||
BuiltinID == AArch64::BI__builtin_arm_stg ||
BuiltinID == AArch64::BI__builtin_arm_subp) {
return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
}
if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
BuiltinID == AArch64::BI__builtin_arm_rsrp ||
BuiltinID == AArch64::BI__builtin_arm_wsr ||
BuiltinID == AArch64::BI__builtin_arm_wsrp)
return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
// Only check the valid encoding range. Any constant in this range would be
// converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
// an exception for incorrect registers. This matches MSVC behavior.
if (BuiltinID == AArch64::BI_ReadStatusReg ||
BuiltinID == AArch64::BI_WriteStatusReg)
return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff);
if (BuiltinID == AArch64::BI__getReg)
return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
return true;
// For intrinsics which take an immediate value as part of the instruction,
// range check them here.
unsigned i = 0, l = 0, u = 0;
switch (BuiltinID) {
default: return false;
case AArch64::BI__builtin_arm_dmb:
case AArch64::BI__builtin_arm_dsb:
case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
}
return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
}
bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) {
struct BuiltinAndString {
unsigned BuiltinID;
const char *Str;
};
static BuiltinAndString ValidCPU[] = {
{ Hexagon::BI__builtin_HEXAGON_A6_vcmpbeq_notany, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_A6_vminub_RdP, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_F2_dfadd, "v66" },
{ Hexagon::BI__builtin_HEXAGON_F2_dfsub, "v66" },
{ Hexagon::BI__builtin_HEXAGON_M2_mnaci, "v66" },
{ Hexagon::BI__builtin_HEXAGON_M6_vabsdiffb, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_M6_vabsdiffub, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_S2_mask, "v66" },
{ Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_S6_vsplatrbp, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_S6_vtrunehb_ppp, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_S6_vtrunohb_ppp, "v62,v65,v66" },
};
static BuiltinAndString ValidHVX[] = {
{ Hexagon::BI__builtin_HEXAGON_V6_hi, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_hi_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_lo, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_lo_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_extractw, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_extractw_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_lvsplatb, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_lvsplatb_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_lvsplath, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_lvsplath_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_lvsplatw, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_lvsplatw_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_pred_and, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_pred_and_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_pred_and_n, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_pred_and_n_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_pred_not, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_pred_not_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_pred_or, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_pred_or_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_pred_or_n, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_pred_or_n_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_pred_xor, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_pred_xor_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_shuffeqh, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_shuffeqh_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_shuffeqw, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_shuffeqw_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsb, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsb_128B, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat_128B, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsh, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsh_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsw, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsw_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddb, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddb_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddbsat, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddcarry, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat, "v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat_128B, "v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddclbh, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddclbh_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddclbw, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddclbw_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddh, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddh_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddhsat, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddhw, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddhw_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddubh, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddubh_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddubsat, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vadduhsat, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vadduhw, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vadduhw_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vadduwsat, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddw, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddw_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddwsat, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_valignb, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_valignb_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_valignbi, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vand, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vand_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vandnqrt, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vandqrt, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vandqrt_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vandvnqv, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vandvnqv_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vandvqv, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vandvqv_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vandvrt, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vandvrt_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaslh, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaslh_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc_128B, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaslhv, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaslhv_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaslw, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaslw_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaslwv, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vaslwv_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrh, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrh_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc_128B, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrhv, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrhv_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasr_into, "v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasr_into_128B, "v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat_128B, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat_128B, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat_128B, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrw, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrw_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrwh, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrwh_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat_128B, "v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrwv, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vasrwv_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vassign, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vassign_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vassignp, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vassignp_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavgb, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavgb_128B, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd_128B, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavgh, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavgh_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavghrnd, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavghrnd_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavgub, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavgub_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavguh, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavguh_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavguw, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavguw_128B, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd_128B, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavgw, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavgw_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vcl0h, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vcl0h_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vcl0w, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vcl0w_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vcombine, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vcombine_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vd0, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vd0_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdd0, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdd0_128B, "v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdealb, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdealb_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdealb4w, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdealb4w_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdealh, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdealh_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdealvdd, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdealvdd_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdelta, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdelta_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpybus, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdsaduh, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqb, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqb_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqb_and, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqb_and_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqb_or, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqb_or_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqb_xor, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqb_xor_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqh, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqh_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqh_and, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqh_and_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqh_or, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqh_or_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqh_xor, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqh_xor_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqw, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqw_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqw_and, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqw_and_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqw_or, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqw_or_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqw_xor, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_veqw_xor_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtb, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtb_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtb_and, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtb_and_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtb_or, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtb_or_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgth, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgth_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgth_and, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgth_and_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgth_or, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgth_or_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgth_xor, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgth_xor_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtub, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtub_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtub_and, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtub_and_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtub_or, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtub_or_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtuh, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtuh_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtuw, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtuw_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or_128B, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor, "v60,v62,v65,v66" },
{ Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor_128B, "v60,v62,v65,v66"