aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-12-24 01:00:23 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-12-24 01:00:23 +0000
commit6252156da5066bd47f63f8bd40404d1f89909d32 (patch)
tree743d1b59945b071890dc19b6bf5e9d3ff0d1399b /lib
parent461a67fa15370a9ec88f8f8a240bf7c123bb2029 (diff)
downloadsrc-6252156da5066bd47f63f8bd40404d1f89909d32.tar.gz
src-6252156da5066bd47f63f8bd40404d1f89909d32.zip
Vendor import of clang trunk r321414:vendor/clang/clang-trunk-r321414
Notes
Notes: svn path=/vendor/clang/dist/; revision=327124 svn path=/vendor/clang/clang-trunk-r321414/; revision=327125; tag=vendor/clang/clang-trunk-r321414
Diffstat (limited to 'lib')
-rw-r--r--lib/AST/ASTDumper.cpp48
-rw-r--r--lib/AST/ASTImporter.cpp126
-rw-r--r--lib/AST/Decl.cpp25
-rw-r--r--lib/AST/Expr.cpp3
-rw-r--r--lib/AST/ExprCXX.cpp4
-rw-r--r--lib/AST/ExprConstant.cpp16
-rw-r--r--lib/AST/ItaniumMangle.cpp2
-rw-r--r--lib/AST/ODRHash.cpp34
-rw-r--r--lib/AST/TypeLoc.cpp2
-rw-r--r--lib/AST/TypePrinter.cpp12
-rw-r--r--lib/Basic/Diagnostic.cpp2
-rw-r--r--lib/Basic/DiagnosticIDs.cpp2
-rw-r--r--lib/Basic/SourceManager.cpp7
-rw-r--r--lib/Basic/Targets/AArch64.cpp3
-rw-r--r--lib/Basic/Targets/X86.cpp12
-rw-r--r--lib/CodeGen/BackendUtil.cpp6
-rw-r--r--lib/CodeGen/CGAtomic.cpp26
-rw-r--r--lib/CodeGen/CGBuiltin.cpp261
-rw-r--r--lib/CodeGen/CGCall.cpp23
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp1
-rw-r--r--lib/CodeGen/CGException.cpp2
-rw-r--r--lib/CodeGen/CGExpr.cpp17
-rw-r--r--lib/CodeGen/CGExprAgg.cpp2
-rw-r--r--lib/CodeGen/CGExprCXX.cpp7
-rw-r--r--lib/CodeGen/CGExprComplex.cpp37
-rw-r--r--lib/CodeGen/CodeGenFunction.h25
-rw-r--r--lib/CodeGen/CodeGenModule.cpp7
-rw-r--r--lib/CodeGen/CodeGenTBAA.cpp21
-rw-r--r--lib/CodeGen/CodeGenTypeCache.h2
-rw-r--r--lib/Driver/SanitizerArgs.cpp3
-rw-r--r--lib/Driver/ToolChains/Clang.cpp1
-rw-r--r--lib/Driver/ToolChains/CommonArgs.cpp6
-rw-r--r--lib/Driver/ToolChains/Darwin.cpp154
-rw-r--r--lib/Driver/ToolChains/Fuchsia.cpp1
-rw-r--r--lib/Driver/ToolChains/Myriad.cpp9
-rw-r--r--lib/Frontend/CompilerInstance.cpp38
-rw-r--r--lib/Frontend/PrecompiledPreamble.cpp29
-rw-r--r--lib/Headers/__clang_cuda_intrinsics.h77
-rw-r--r--lib/Headers/cpuid.h19
-rw-r--r--lib/Headers/xmmintrin.h11
-rw-r--r--lib/Index/IndexSymbol.cpp65
-rw-r--r--lib/Lex/HeaderSearch.cpp13
-rw-r--r--lib/Lex/ModuleMap.cpp98
-rw-r--r--lib/Lex/Preprocessor.cpp7
-rw-r--r--lib/Parse/ParseTemplate.cpp14
-rw-r--r--lib/Rewrite/HTMLRewrite.cpp1
-rw-r--r--lib/Sema/CodeCompleteConsumer.cpp2
-rw-r--r--lib/Sema/JumpDiagnostics.cpp2
-rw-r--r--lib/Sema/SemaChecking.cpp6
-rw-r--r--lib/Sema/SemaCodeComplete.cpp6
-rw-r--r--lib/Sema/SemaDecl.cpp8
-rw-r--r--lib/Sema/SemaDeclAttr.cpp6
-rw-r--r--lib/Sema/SemaDeclCXX.cpp4
-rw-r--r--lib/Sema/SemaExpr.cpp6
-rw-r--r--lib/Sema/SemaExprMember.cpp2
-rw-r--r--lib/Sema/SemaExprObjC.cpp1
-rw-r--r--lib/Sema/SemaInit.cpp4
-rw-r--r--lib/Sema/SemaOpenMP.cpp51
-rw-r--r--lib/Sema/SemaOverload.cpp10
-rw-r--r--lib/Sema/SemaTemplate.cpp60
-rw-r--r--lib/Sema/SemaTemplateDeduction.cpp3
-rw-r--r--lib/Sema/SemaTemplateInstantiateDecl.cpp37
-rw-r--r--lib/Sema/SemaType.cpp38
-rw-r--r--lib/Serialization/ASTReader.cpp244
-rw-r--r--lib/Serialization/ASTReaderDecl.cpp3
-rw-r--r--lib/Serialization/ASTWriterDecl.cpp3
-rw-r--r--lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp16
-rw-r--r--lib/StaticAnalyzer/Checkers/PaddingChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Core/BugReporter.cpp12
-rw-r--r--lib/StaticAnalyzer/Core/BugReporterVisitors.cpp13
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineCXX.cpp18
-rw-r--r--lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp95
-rw-r--r--lib/StaticAnalyzer/Core/RegionStore.cpp7
73 files changed, 1391 insertions, 551 deletions
diff --git a/lib/AST/ASTDumper.cpp b/lib/AST/ASTDumper.cpp
index 157b29fd84b6..92be6d95e898 100644
--- a/lib/AST/ASTDumper.cpp
+++ b/lib/AST/ASTDumper.cpp
@@ -99,6 +99,9 @@ namespace {
const CommandTraits *Traits;
const SourceManager *SM;
+ /// The policy to use for printing; can be defaulted.
+ PrintingPolicy PrintPolicy;
+
/// Pending[i] is an action to dump an entity at level i.
llvm::SmallVector<std::function<void(bool isLastChild)>, 32> Pending;
@@ -207,12 +210,17 @@ namespace {
public:
ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
const SourceManager *SM)
- : OS(OS), Traits(Traits), SM(SM),
- ShowColors(SM && SM->getDiagnostics().getShowColors()) { }
+ : ASTDumper(OS, Traits, SM,
+ SM && SM->getDiagnostics().getShowColors()) {}
ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
const SourceManager *SM, bool ShowColors)
- : OS(OS), Traits(Traits), SM(SM), ShowColors(ShowColors) {}
+ : ASTDumper(OS, Traits, SM, ShowColors, LangOptions()) {}
+ ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
+ const SourceManager *SM, bool ShowColors,
+ const PrintingPolicy &PrintPolicy)
+ : OS(OS), Traits(Traits), SM(SM), PrintPolicy(PrintPolicy),
+ ShowColors(ShowColors) {}
void setDeserialize(bool D) { Deserialize = D; }
@@ -646,13 +654,13 @@ void ASTDumper::dumpBareType(QualType T, bool Desugar) {
ColorScope Color(*this, TypeColor);
SplitQualType T_split = T.split();
- OS << "'" << QualType::getAsString(T_split) << "'";
+ OS << "'" << QualType::getAsString(T_split, PrintPolicy) << "'";
if (Desugar && !T.isNull()) {
// If the type is sugared, also dump a (shallow) desugared type.
SplitQualType D_split = T.getSplitDesugaredType();
if (T_split != D_split)
- OS << ":'" << QualType::getAsString(D_split) << "'";
+ OS << ":'" << QualType::getAsString(D_split, PrintPolicy) << "'";
}
}
@@ -1187,12 +1195,12 @@ void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) {
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
if (MD->size_overridden_methods() != 0) {
- auto dumpOverride =
- [=](const CXXMethodDecl *D) {
- SplitQualType T_split = D->getType().split();
- OS << D << " " << D->getParent()->getName() << "::"
- << D->getNameAsString() << " '" << QualType::getAsString(T_split) << "'";
- };
+ auto dumpOverride = [=](const CXXMethodDecl *D) {
+ SplitQualType T_split = D->getType().split();
+ OS << D << " " << D->getParent()->getName()
+ << "::" << D->getNameAsString() << " '"
+ << QualType::getAsString(T_split, PrintPolicy) << "'";
+ };
dumpChild([=] {
auto Overrides = MD->overridden_methods();
@@ -1537,7 +1545,7 @@ void ASTDumper::VisitTemplateDeclSpecialization(const SpecializationDecl *D,
case TSK_ExplicitInstantiationDefinition:
if (!DumpExplicitInst)
break;
- // Fall through.
+ LLVM_FALLTHROUGH;
case TSK_Undeclared:
case TSK_ImplicitInstantiation:
if (DumpRefOnly)
@@ -2682,15 +2690,19 @@ LLVM_DUMP_METHOD void Type::dump(llvm::raw_ostream &OS) const {
LLVM_DUMP_METHOD void Decl::dump() const { dump(llvm::errs()); }
LLVM_DUMP_METHOD void Decl::dump(raw_ostream &OS, bool Deserialize) const {
- ASTDumper P(OS, &getASTContext().getCommentCommandTraits(),
- &getASTContext().getSourceManager());
+ const ASTContext &Ctx = getASTContext();
+ const SourceManager &SM = Ctx.getSourceManager();
+ ASTDumper P(OS, &Ctx.getCommentCommandTraits(), &SM,
+ SM.getDiagnostics().getShowColors(), Ctx.getPrintingPolicy());
P.setDeserialize(Deserialize);
P.dumpDecl(this);
}
LLVM_DUMP_METHOD void Decl::dumpColor() const {
- ASTDumper P(llvm::errs(), &getASTContext().getCommentCommandTraits(),
- &getASTContext().getSourceManager(), /*ShowColors*/true);
+ const ASTContext &Ctx = getASTContext();
+ ASTDumper P(llvm::errs(), &Ctx.getCommentCommandTraits(),
+ &Ctx.getSourceManager(), /*ShowColors*/ true,
+ Ctx.getPrintingPolicy());
P.dumpDecl(this);
}
@@ -2705,7 +2717,9 @@ LLVM_DUMP_METHOD void DeclContext::dumpLookups(raw_ostream &OS,
while (!DC->isTranslationUnit())
DC = DC->getParent();
ASTContext &Ctx = cast<TranslationUnitDecl>(DC)->getASTContext();
- ASTDumper P(OS, &Ctx.getCommentCommandTraits(), &Ctx.getSourceManager());
+ const SourceManager &SM = Ctx.getSourceManager();
+ ASTDumper P(OS, &Ctx.getCommentCommandTraits(), &Ctx.getSourceManager(),
+ SM.getDiagnostics().getShowColors(), Ctx.getPrintingPolicy());
P.setDeserialize(Deserialize);
P.dumpLookups(this, DumpDecls);
}
diff --git a/lib/AST/ASTImporter.cpp b/lib/AST/ASTImporter.cpp
index 84b0d7ecff93..0e627f9737ce 100644
--- a/lib/AST/ASTImporter.cpp
+++ b/lib/AST/ASTImporter.cpp
@@ -134,17 +134,12 @@ namespace clang {
bool ImportTemplateArguments(const TemplateArgument *FromArgs,
unsigned NumFromArgs,
SmallVectorImpl<TemplateArgument> &ToArgs);
- template <typename InContainerTy>
- bool ImportTemplateArgumentListInfo(const InContainerTy &Container,
- TemplateArgumentListInfo &ToTAInfo);
bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord,
bool Complain = true);
bool IsStructuralMatch(VarDecl *FromVar, VarDecl *ToVar,
bool Complain = true);
bool IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToRecord);
bool IsStructuralMatch(EnumConstantDecl *FromEC, EnumConstantDecl *ToEC);
- bool IsStructuralMatch(FunctionTemplateDecl *From,
- FunctionTemplateDecl *To);
bool IsStructuralMatch(ClassTemplateDecl *From, ClassTemplateDecl *To);
bool IsStructuralMatch(VarTemplateDecl *From, VarTemplateDecl *To);
Decl *VisitDecl(Decl *D);
@@ -200,7 +195,6 @@ namespace clang {
ClassTemplateSpecializationDecl *D);
Decl *VisitVarTemplateDecl(VarTemplateDecl *D);
Decl *VisitVarTemplateSpecializationDecl(VarTemplateSpecializationDecl *D);
- Decl *VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
// Importing statements
DeclGroupRef ImportDeclGroup(DeclGroupRef DG);
@@ -286,7 +280,6 @@ namespace clang {
Expr *VisitCXXDeleteExpr(CXXDeleteExpr *E);
Expr *VisitCXXConstructExpr(CXXConstructExpr *E);
Expr *VisitCXXMemberCallExpr(CXXMemberCallExpr *E);
- Expr *VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E);
Expr *VisitExprWithCleanups(ExprWithCleanups *EWC);
Expr *VisitCXXThisExpr(CXXThisExpr *E);
Expr *VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E);
@@ -1254,18 +1247,6 @@ bool ASTNodeImporter::ImportTemplateArguments(const TemplateArgument *FromArgs,
return false;
}
-template <typename InContainerTy>
-bool ASTNodeImporter::ImportTemplateArgumentListInfo(
- const InContainerTy &Container, TemplateArgumentListInfo &ToTAInfo) {
- for (const auto &FromLoc : Container) {
- if (auto ToLoc = ImportTemplateArgumentLoc(FromLoc))
- ToTAInfo.addArgument(*ToLoc);
- else
- return true;
- }
- return false;
-}
-
bool ASTNodeImporter::IsStructuralMatch(RecordDecl *FromRecord,
RecordDecl *ToRecord, bool Complain) {
// Eliminate a potential failure point where we attempt to re-import
@@ -1299,14 +1280,6 @@ bool ASTNodeImporter::IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToEnum) {
return Ctx.IsStructurallyEquivalent(FromEnum, ToEnum);
}
-bool ASTNodeImporter::IsStructuralMatch(FunctionTemplateDecl *From,
- FunctionTemplateDecl *To) {
- StructuralEquivalenceContext Ctx(
- Importer.getFromContext(), Importer.getToContext(),
- Importer.getNonEquivalentDecls(), false, false);
- return Ctx.IsStructurallyEquivalent(From, To);
-}
-
bool ASTNodeImporter::IsStructuralMatch(EnumConstantDecl *FromEC,
EnumConstantDecl *ToEC)
{
@@ -4224,64 +4197,6 @@ Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl(
return D2;
}
-Decl *ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
- DeclContext *DC, *LexicalDC;
- DeclarationName Name;
- SourceLocation Loc;
- NamedDecl *ToD;
-
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
-
- if (ToD)
- return ToD;
-
- // Try to find a function in our own ("to") context with the same name, same
- // type, and in the same context as the function we're importing.
- if (!LexicalDC->isFunctionOrMethod()) {
- unsigned IDNS = Decl::IDNS_Ordinary;
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
- continue;
-
- if (FunctionTemplateDecl *FoundFunction =
- dyn_cast<FunctionTemplateDecl>(FoundDecls[I])) {
- if (FoundFunction->hasExternalFormalLinkage() &&
- D->hasExternalFormalLinkage()) {
- if (IsStructuralMatch(D, FoundFunction)) {
- Importer.Imported(D, FoundFunction);
- // FIXME: Actually try to merge the body and other attributes.
- return FoundFunction;
- }
- }
- }
- }
- }
-
- TemplateParameterList *Params =
- ImportTemplateParameterList(D->getTemplateParameters());
- if (!Params)
- return nullptr;
-
- FunctionDecl *TemplatedFD =
- cast_or_null<FunctionDecl>(Importer.Import(D->getTemplatedDecl()));
- if (!TemplatedFD)
- return nullptr;
-
- FunctionTemplateDecl *ToFunc = FunctionTemplateDecl::Create(
- Importer.getToContext(), DC, Loc, Name, Params, TemplatedFD);
-
- TemplatedFD->setDescribedFunctionTemplate(ToFunc);
- ToFunc->setAccess(D->getAccess());
- ToFunc->setLexicalDeclContext(LexicalDC);
- Importer.Imported(D, ToFunc);
-
- LexicalDC->addDeclInternal(ToFunc);
- return ToFunc;
-}
-
//----------------------------------------------------------------------------
// Import Statements
//----------------------------------------------------------------------------
@@ -5844,47 +5759,6 @@ Expr *ASTNodeImporter::VisitCXXPseudoDestructorExpr(
Importer.Import(E->getTildeLoc()), Storage);
}
-Expr *ASTNodeImporter::VisitCXXDependentScopeMemberExpr(
- CXXDependentScopeMemberExpr *E) {
- Expr *Base = nullptr;
- if (!E->isImplicitAccess()) {
- Base = Importer.Import(E->getBase());
- if (!Base)
- return nullptr;
- }
-
- QualType BaseType = Importer.Import(E->getBaseType());
- if (BaseType.isNull())
- return nullptr;
-
- TemplateArgumentListInfo ToTAInfo(Importer.Import(E->getLAngleLoc()),
- Importer.Import(E->getRAngleLoc()));
- TemplateArgumentListInfo *ResInfo = nullptr;
- if (E->hasExplicitTemplateArgs()) {
- if (ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo))
- return nullptr;
- ResInfo = &ToTAInfo;
- }
-
- DeclarationName Name = Importer.Import(E->getMember());
- if (!E->getMember().isEmpty() && Name.isEmpty())
- return nullptr;
-
- DeclarationNameInfo MemberNameInfo(Name, Importer.Import(E->getMemberLoc()));
- // Import additional name location/type info.
- ImportDeclarationNameLoc(E->getMemberNameInfo(), MemberNameInfo);
- auto ToFQ = Importer.Import(E->getFirstQualifierFoundInScope());
- if (!ToFQ && E->getFirstQualifierFoundInScope())
- return nullptr;
-
- return CXXDependentScopeMemberExpr::Create(
- Importer.getToContext(), Base, BaseType, E->isArrow(),
- Importer.Import(E->getOperatorLoc()),
- Importer.Import(E->getQualifierLoc()),
- Importer.Import(E->getTemplateKeywordLoc()),
- cast_or_null<NamedDecl>(ToFQ), MemberNameInfo, ResInfo);
-}
-
Expr *ASTNodeImporter::VisitCallExpr(CallExpr *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index 2f51ec31a7bd..629037b1755c 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -26,6 +26,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/ODRHash.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/Redeclarable.h"
#include "clang/AST/Stmt.h"
@@ -1548,7 +1549,10 @@ void NamedDecl::printQualifiedName(raw_ostream &OS,
// enumerator is declared in the scope that immediately contains
// the enum-specifier. Each scoped enumerator is declared in the
// scope of the enumeration.
- if (ED->isScoped() || ED->getIdentifier())
+ // For the case of unscoped enumerator, do not include in the qualified
+ // name any information about its enum enclosing scope, as is visibility
+ // is global.
+ if (ED->isScoped())
OS << *ED;
else
continue;
@@ -3601,6 +3605,25 @@ unsigned FunctionDecl::getMemoryFunctionKind() const {
return 0;
}
+unsigned FunctionDecl::getODRHash() {
+ if (HasODRHash)
+ return ODRHash;
+
+ if (FunctionDecl *Definition = getDefinition()) {
+ if (Definition != this) {
+ HasODRHash = true;
+ ODRHash = Definition->getODRHash();
+ return ODRHash;
+ }
+ }
+
+ class ODRHash Hash;
+ Hash.AddFunctionDecl(this);
+ HasODRHash = true;
+ ODRHash = Hash.CalculateHash();
+ return ODRHash;
+}
+
//===----------------------------------------------------------------------===//
// FieldDecl Implementation
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index 55061aa462e5..7ddab9356b54 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -3116,7 +3116,8 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
if (DCE->getTypeAsWritten()->isReferenceType() &&
DCE->getCastKind() == CK_Dynamic)
return true;
- } // Fall through.
+ }
+ LLVM_FALLTHROUGH;
case ImplicitCastExprClass:
case CStyleCastExprClass:
case CXXStaticCastExprClass:
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index 262a1e3ff9d5..a0d611381123 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -857,7 +857,7 @@ LambdaCapture::LambdaCapture(SourceLocation Loc, bool Implicit,
switch (Kind) {
case LCK_StarThis:
Bits |= Capture_ByCopy;
- // Fall through
+ LLVM_FALLTHROUGH;
case LCK_This:
assert(!Var && "'this' capture cannot have a variable!");
Bits |= Capture_This;
@@ -865,7 +865,7 @@ LambdaCapture::LambdaCapture(SourceLocation Loc, bool Implicit,
case LCK_ByCopy:
Bits |= Capture_ByCopy;
- // Fall through
+ LLVM_FALLTHROUGH;
case LCK_ByRef:
assert(Var && "capture must have a variable!");
break;
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index 9c9eeb79b40a..8d9b3c3bebc0 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -5913,7 +5913,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
<< (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'");
else
Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
- // Fall through.
+ LLVM_FALLTHROUGH;
case Builtin::BI__builtin_strchr:
case Builtin::BI__builtin_wcschr:
case Builtin::BI__builtin_memchr:
@@ -5952,7 +5952,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
Desired))
return ZeroInitialization(E);
StopAtNull = true;
- // Fall through.
+ LLVM_FALLTHROUGH;
case Builtin::BImemchr:
case Builtin::BI__builtin_memchr:
case Builtin::BI__builtin_char_memchr:
@@ -5965,7 +5965,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BIwcschr:
case Builtin::BI__builtin_wcschr:
StopAtNull = true;
- // Fall through.
+ LLVM_FALLTHROUGH;
case Builtin::BIwmemchr:
case Builtin::BI__builtin_wmemchr:
// wcschr and wmemchr are given a wchar_t to look for. Just use it.
@@ -7209,6 +7209,7 @@ static int EvaluateBuiltinClassifyType(const CallExpr *E,
case BuiltinType::Dependent:
llvm_unreachable("CallExpr::isBuiltinClassifyType(): unimplemented type");
};
+ break;
case Type::Enum:
return LangOpts.CPlusPlus ? enumeral_type_class : integer_type_class;
@@ -7419,7 +7420,10 @@ static bool isDesignatorAtObjectEnd(const ASTContext &Ctx, const LValue &LVal) {
// If we don't know the array bound, conservatively assume we're looking at
// the final array element.
++I;
- BaseType = BaseType->castAs<PointerType>()->getPointeeType();
+ if (BaseType->isIncompleteArrayType())
+ BaseType = Ctx.getAsArrayType(BaseType)->getElementType();
+ else
+ BaseType = BaseType->castAs<PointerType>()->getPointeeType();
}
for (unsigned E = LVal.Designator.Entries.size(); I != E; ++I) {
@@ -7821,7 +7825,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
<< (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'");
else
Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
- // Fall through.
+ LLVM_FALLTHROUGH;
case Builtin::BI__builtin_strlen:
case Builtin::BI__builtin_wcslen: {
// As an extension, we support __builtin_strlen() as a constant expression,
@@ -7881,7 +7885,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
<< (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'");
else
Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
- // Fall through.
+ LLVM_FALLTHROUGH;
case Builtin::BI__builtin_strcmp:
case Builtin::BI__builtin_wcscmp:
case Builtin::BI__builtin_strncmp:
diff --git a/lib/AST/ItaniumMangle.cpp b/lib/AST/ItaniumMangle.cpp
index f95dc8458e8a..3c7e26d41370 100644
--- a/lib/AST/ItaniumMangle.cpp
+++ b/lib/AST/ItaniumMangle.cpp
@@ -1468,7 +1468,7 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
if (!MD->isStatic())
Arity++;
}
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case DeclarationName::CXXConversionFunctionName:
case DeclarationName::CXXLiteralOperatorName:
mangleOperatorName(Name, Arity);
diff --git a/lib/AST/ODRHash.cpp b/lib/AST/ODRHash.cpp
index 17c95f2a0af7..088d8bedd453 100644
--- a/lib/AST/ODRHash.cpp
+++ b/lib/AST/ODRHash.cpp
@@ -466,8 +466,42 @@ void ODRHash::AddCXXRecordDecl(const CXXRecordDecl *Record) {
}
}
+void ODRHash::AddFunctionDecl(const FunctionDecl *Function) {
+ assert(Function && "Expecting non-null pointer.");
+
+ // Skip hashing these kinds of function.
+ if (Function->isImplicit()) return;
+ if (Function->isDefaulted()) return;
+ if (Function->isDeleted()) return;
+ if (!Function->hasBody()) return;
+ if (!Function->getBody()) return;
+
+ // TODO: Fix hashing for class methods.
+ if (isa<CXXMethodDecl>(Function)) return;
+
+ // Skip functions that are specializations or in specialization context.
+ const DeclContext *DC = Function;
+ while (DC) {
+ if (isa<ClassTemplateSpecializationDecl>(DC)) return;
+ if (auto *F = dyn_cast<FunctionDecl>(DC))
+ if (F->isFunctionTemplateSpecialization()) return;
+ DC = DC->getParent();
+ }
+
+ AddDecl(Function);
+
+ AddQualType(Function->getReturnType());
+
+ ID.AddInteger(Function->param_size());
+ for (auto Param : Function->parameters())
+ AddSubDecl(Param);
+
+ AddStmt(Function->getBody());
+}
+
void ODRHash::AddDecl(const Decl *D) {
assert(D && "Expecting non-null pointer.");
+ D = D->getCanonicalDecl();
auto Result = DeclMap.insert(std::make_pair(D, DeclMap.size()));
ID.AddInteger(Result.first->second);
// On first encounter of a Decl pointer, process it. Every time afterwards,
diff --git a/lib/AST/TypeLoc.cpp b/lib/AST/TypeLoc.cpp
index b05c5fc68096..0ac50b31acec 100644
--- a/lib/AST/TypeLoc.cpp
+++ b/lib/AST/TypeLoc.cpp
@@ -201,7 +201,7 @@ SourceLocation TypeLoc::getBeginLoc() const {
LeftMost = Cur;
break;
}
- /* Fall through */
+ LLVM_FALLTHROUGH;
case FunctionNoProto:
case ConstantArray:
case DependentSizedArray:
diff --git a/lib/AST/TypePrinter.cpp b/lib/AST/TypePrinter.cpp
index 35e0b75f3c22..c28ada7dcb8b 100644
--- a/lib/AST/TypePrinter.cpp
+++ b/lib/AST/TypePrinter.cpp
@@ -213,7 +213,7 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::VariableArray:
case Type::DependentSizedArray:
NeedARCStrongQualifier = true;
- // Fall through
+ LLVM_FALLTHROUGH;
case Type::Adjusted:
case Type::Decayed:
@@ -1712,16 +1712,20 @@ void Qualifiers::print(raw_ostream &OS, const PrintingPolicy& Policy,
OS << ' ';
}
+std::string QualType::getAsString() const {
+ return getAsString(split(), LangOptions());
+}
+
std::string QualType::getAsString(const PrintingPolicy &Policy) const {
std::string S;
getAsStringInternal(S, Policy);
return S;
}
-std::string QualType::getAsString(const Type *ty, Qualifiers qs) {
+std::string QualType::getAsString(const Type *ty, Qualifiers qs,
+ const PrintingPolicy &Policy) {
std::string buffer;
- LangOptions options;
- getAsStringInternal(ty, qs, buffer, PrintingPolicy(options));
+ getAsStringInternal(ty, qs, buffer, Policy);
return buffer;
}
diff --git a/lib/Basic/Diagnostic.cpp b/lib/Basic/Diagnostic.cpp
index 640b42c1ca2e..26baa838f8c6 100644
--- a/lib/Basic/Diagnostic.cpp
+++ b/lib/Basic/Diagnostic.cpp
@@ -363,7 +363,7 @@ void DiagnosticsEngine::setSeverityForAll(diag::Flavor Flavor,
diag::Severity Map,
SourceLocation Loc) {
// Get all the diagnostics.
- SmallVector<diag::kind, 64> AllDiags;
+ std::vector<diag::kind> AllDiags;
DiagnosticIDs::getAllDiagnostics(Flavor, AllDiags);
// Set the mapping.
diff --git a/lib/Basic/DiagnosticIDs.cpp b/lib/Basic/DiagnosticIDs.cpp
index 5c53f35aa68f..c4c425d9eb1d 100644
--- a/lib/Basic/DiagnosticIDs.cpp
+++ b/lib/Basic/DiagnosticIDs.cpp
@@ -583,7 +583,7 @@ DiagnosticIDs::getDiagnosticsInGroup(diag::Flavor Flavor, StringRef Group,
}
void DiagnosticIDs::getAllDiagnostics(diag::Flavor Flavor,
- SmallVectorImpl<diag::kind> &Diags) {
+ std::vector<diag::kind> &Diags) {
for (unsigned i = 0; i != StaticDiagInfoSize; ++i)
if (StaticDiagInfo[i].getFlavor() == Flavor)
Diags.push_back(StaticDiagInfo[i].DiagID);
diff --git a/lib/Basic/SourceManager.cpp b/lib/Basic/SourceManager.cpp
index e664879639f3..0a51985614c8 100644
--- a/lib/Basic/SourceManager.cpp
+++ b/lib/Basic/SourceManager.cpp
@@ -125,11 +125,12 @@ llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
// possible.
if (!BufferOrError) {
StringRef FillStr("<<<MISSING SOURCE FILE>>>\n");
- Buffer.setPointer(MemoryBuffer::getNewUninitMemBuffer(
- ContentsEntry->getSize(), "<invalid>").release());
- char *Ptr = const_cast<char*>(Buffer.getPointer()->getBufferStart());
+ auto BackupBuffer = llvm::WritableMemoryBuffer::getNewUninitMemBuffer(
+ ContentsEntry->getSize(), "<invalid>");
+ char *Ptr = BackupBuffer->getBufferStart();
for (unsigned i = 0, e = ContentsEntry->getSize(); i != e; ++i)
Ptr[i] = FillStr[i % FillStr.size()];
+ Buffer.setPointer(BackupBuffer.release());
if (Diag.isDiagnosticInFlight())
Diag.SetDelayedDiagnostic(diag::err_cannot_open_file,
diff --git a/lib/Basic/Targets/AArch64.cpp b/lib/Basic/Targets/AArch64.cpp
index 62990dc23821..6080cefac744 100644
--- a/lib/Basic/Targets/AArch64.cpp
+++ b/lib/Basic/Targets/AArch64.cpp
@@ -181,6 +181,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (Unaligned)
Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
+ if ((FPU & NeonMode) && HasFullFP16)
+ Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
+
switch (ArchKind) {
default:
break;
diff --git a/lib/Basic/Targets/X86.cpp b/lib/Basic/Targets/X86.cpp
index 7fd9fd047818..bdf5cdb9407b 100644
--- a/lib/Basic/Targets/X86.cpp
+++ b/lib/Basic/Targets/X86.cpp
@@ -159,6 +159,7 @@ bool X86TargetInfo::initFeatureMap(
case CK_Broadwell:
setFeatureEnabledImpl(Features, "rdseed", true);
setFeatureEnabledImpl(Features, "adx", true);
+ setFeatureEnabledImpl(Features, "prfchw", true);
LLVM_FALLTHROUGH;
case CK_Haswell:
setFeatureEnabledImpl(Features, "avx2", true);
@@ -224,6 +225,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "aes", true);
setFeatureEnabledImpl(Features, "pclmul", true);
setFeatureEnabledImpl(Features, "sse4.2", true);
+ setFeatureEnabledImpl(Features, "prfchw", true);
LLVM_FALLTHROUGH;
case CK_Bonnell:
setFeatureEnabledImpl(Features, "movbe", true);
@@ -241,6 +243,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "avx512cd", true);
setFeatureEnabledImpl(Features, "avx512er", true);
setFeatureEnabledImpl(Features, "avx512pf", true);
+ setFeatureEnabledImpl(Features, "prfchw", true);
setFeatureEnabledImpl(Features, "prefetchwt1", true);
setFeatureEnabledImpl(Features, "fxsr", true);
setFeatureEnabledImpl(Features, "rdseed", true);
@@ -1131,6 +1134,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
return llvm::StringSwitch<bool>(Name)
.Case("3dnow", true)
.Case("3dnowa", true)
+ .Case("adx", true)
.Case("aes", true)
.Case("avx", true)
.Case("avx2", true)
@@ -1160,6 +1164,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("mmx", true)
.Case("movbe", true)
.Case("mpx", true)
+ .Case("mwaitx", true)
.Case("pclmul", true)
.Case("pku", true)
.Case("popcnt", true)
@@ -1170,6 +1175,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("rtm", true)
.Case("sgx", true)
.Case("sha", true)
+ .Case("shstk", true)
.Case("sse", true)
.Case("sse2", true)
.Case("sse3", true)
@@ -1190,6 +1196,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
bool X86TargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
+ .Case("adx", HasADX)
.Case("aes", HasAES)
.Case("avx", SSELevel >= AVX)
.Case("avx2", SSELevel >= AVX2)
@@ -1214,6 +1221,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("fma4", XOPLevel >= FMA4)
.Case("fsgsbase", HasFSGSBASE)
.Case("fxsr", HasFXSR)
+ .Case("ibt", HasIBT)
.Case("lwp", HasLWP)
.Case("lzcnt", HasLZCNT)
.Case("mm3dnow", MMX3DNowLevel >= AMD3DNow)
@@ -1221,8 +1229,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("mmx", MMX3DNowLevel >= MMX)
.Case("movbe", HasMOVBE)
.Case("mpx", HasMPX)
- .Case("shstk", HasSHSTK)
- .Case("ibt", HasIBT)
+ .Case("mwaitx", HasMWAITX)
.Case("pclmul", HasPCLMUL)
.Case("pku", HasPKU)
.Case("popcnt", HasPOPCNT)
@@ -1233,6 +1240,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("rtm", HasRTM)
.Case("sgx", HasSGX)
.Case("sha", HasSHA)
+ .Case("shstk", HasSHSTK)
.Case("sse", SSELevel >= SSE1)
.Case("sse2", SSELevel >= SSE2)
.Case("sse3", SSELevel >= SSE3)
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index 2c033e0f7c02..e2349da5f0a4 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -239,7 +239,11 @@ static void addKernelAddressSanitizerPasses(const PassManagerBuilder &Builder,
static void addHWAddressSanitizerPasses(const PassManagerBuilder &Builder,
legacy::PassManagerBase &PM) {
- PM.add(createHWAddressSanitizerPass());
+ const PassManagerBuilderWrapper &BuilderWrapper =
+ static_cast<const PassManagerBuilderWrapper &>(Builder);
+ const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
+ bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::HWAddress);
+ PM.add(createHWAddressSanitizerPass(Recover));
}
static void addMemorySanitizerPass(const PassManagerBuilder &Builder,
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp
index d90c3a53a635..6862fd811186 100644
--- a/lib/CodeGen/CGAtomic.cpp
+++ b/lib/CodeGen/CGAtomic.cpp
@@ -573,7 +573,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__atomic_add_fetch:
PostOp = llvm::Instruction::Add;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
@@ -582,7 +582,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__atomic_sub_fetch:
PostOp = llvm::Instruction::Sub;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
@@ -601,7 +601,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__atomic_and_fetch:
PostOp = llvm::Instruction::And;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
@@ -610,7 +610,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__atomic_or_fetch:
PostOp = llvm::Instruction::Or;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
@@ -619,7 +619,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__atomic_xor_fetch:
PostOp = llvm::Instruction::Xor;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
@@ -628,7 +628,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__atomic_nand_fetch:
PostOp = llvm::Instruction::And; // the NOT is special cased below
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__atomic_fetch_nand:
Op = llvm::AtomicRMWInst::Nand;
break;
@@ -828,7 +828,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
break;
}
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__atomic_add_fetch:
@@ -1035,7 +1035,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// T __atomic_fetch_add_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_add_fetch:
PostOp = llvm::Instruction::Add;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
@@ -1047,7 +1047,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// T __atomic_fetch_and_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_and_fetch:
PostOp = llvm::Instruction::And;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
@@ -1059,7 +1059,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// T __atomic_fetch_or_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_or_fetch:
PostOp = llvm::Instruction::Or;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
@@ -1071,7 +1071,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// T __atomic_fetch_sub_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_sub_fetch:
PostOp = llvm::Instruction::Sub;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
@@ -1083,7 +1083,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// T __atomic_fetch_xor_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_xor_fetch:
PostOp = llvm::Instruction::Xor;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
@@ -1109,7 +1109,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// T __atomic_fetch_nand_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_nand_fetch:
PostOp = llvm::Instruction::And; // the NOT is special cased below
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__atomic_fetch_nand:
LibCallName = "__atomic_fetch_nand";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 3ecd1c6697d7..609987c4fa4c 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -1432,14 +1432,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__debugbreak:
return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
case Builtin::BI__builtin_unreachable: {
- if (SanOpts.has(SanitizerKind::Unreachable)) {
- SanitizerScope SanScope(this);
- EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
- SanitizerKind::Unreachable),
- SanitizerHandler::BuiltinUnreachable,
- EmitCheckSourceLocation(E->getExprLoc()), None);
- } else
- Builder.CreateUnreachable();
+ EmitUnreachable(E->getExprLoc());
// We do need to preserve an insertion point.
EmitBlock(createBasicBlock("unreachable.cont"));
@@ -3341,10 +3334,10 @@ static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
- return CGF->EmitARMBuiltinExpr(BuiltinID, E);
+ return CGF->EmitARMBuiltinExpr(BuiltinID, E, Arch);
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
- return CGF->EmitAArch64BuiltinExpr(BuiltinID, E);
+ return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
case llvm::Triple::x86:
case llvm::Triple::x86_64:
return CGF->EmitX86BuiltinExpr(BuiltinID, E);
@@ -3385,6 +3378,7 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
NeonTypeFlags TypeFlags,
+ llvm::Triple::ArchType Arch,
bool V1Ty=false) {
int IsQuad = TypeFlags.isQuad();
switch (TypeFlags.getEltType()) {
@@ -3393,8 +3387,14 @@ static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
case NeonTypeFlags::Int16:
case NeonTypeFlags::Poly16:
- case NeonTypeFlags::Float16:
return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
+ case NeonTypeFlags::Float16:
+ // FIXME: Only AArch64 backend can so far properly handle half types.
+ // Remove else part once ARM backend support for half is complete.
+ if (Arch == llvm::Triple::aarch64)
+ return llvm::VectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
+ else
+ return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::Int32:
return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
case NeonTypeFlags::Int64:
@@ -3417,6 +3417,8 @@ static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
NeonTypeFlags IntTypeFlags) {
int IsQuad = IntTypeFlags.isQuad();
switch (IntTypeFlags.getEltType()) {
+ case NeonTypeFlags::Int16:
+ return llvm::VectorType::get(CGF->HalfTy, (4 << IsQuad));
case NeonTypeFlags::Int32:
return llvm::VectorType::get(CGF->FloatTy, (2 << IsQuad));
case NeonTypeFlags::Int64:
@@ -3564,55 +3566,80 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
NEONMAP0(vcvt_f32_v),
+ NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
+ NEONMAP0(vcvt_s16_v),
NEONMAP0(vcvt_s32_v),
NEONMAP0(vcvt_s64_v),
+ NEONMAP0(vcvt_u16_v),
NEONMAP0(vcvt_u32_v),
NEONMAP0(vcvt_u64_v),
+ NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
+ NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
+ NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
+ NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
+ NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
+ NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
+ NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
+ NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
+ NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
+ NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
+ NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
+ NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
+ NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
+ NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
+ NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
NEONMAP0(vcvtq_f32_v),
+ NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
+ NEONMAP0(vcvtq_s16_v),
NEONMAP0(vcvtq_s32_v),
NEONMAP0(vcvtq_s64_v),
+ NEONMAP0(vcvtq_u16_v),
NEONMAP0(vcvtq_u32_v),
NEONMAP0(vcvtq_u64_v),
NEONMAP0(vext_v),
@@ -3775,19 +3802,27 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vcnt_v, ctpop, Add1ArgType),
NEONMAP1(vcntq_v, ctpop, Add1ArgType),
NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
+ NEONMAP0(vcvt_f16_v),
NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
NEONMAP0(vcvt_f32_v),
+ NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
+ NEONMAP0(vcvtq_f16_v),
NEONMAP0(vcvtq_f32_v),
+ NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
@@ -4197,7 +4232,8 @@ static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
const char *NameHint, unsigned Modifier, const CallExpr *E,
- SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1) {
+ SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
+ llvm::Triple::ArchType Arch) {
// Get the last argument, which specifies the vector type.
llvm::APSInt NeonTypeConst;
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
@@ -4209,7 +4245,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
bool Usgn = Type.isUnsigned();
bool Quad = Type.isQuad();
- llvm::VectorType *VTy = GetNeonType(this, Type);
+ llvm::VectorType *VTy = GetNeonType(this, Type, Arch);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
@@ -4256,9 +4292,20 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcageq_v:
case NEON::BI__builtin_neon_vcagt_v:
case NEON::BI__builtin_neon_vcagtq_v: {
- llvm::Type *VecFlt = llvm::VectorType::get(
- VTy->getScalarSizeInBits() == 32 ? FloatTy : DoubleTy,
- VTy->getNumElements());
+ llvm::Type *Ty;
+ switch (VTy->getScalarSizeInBits()) {
+ default: llvm_unreachable("unexpected type");
+ case 32:
+ Ty = FloatTy;
+ break;
+ case 64:
+ Ty = DoubleTy;
+ break;
+ case 16:
+ Ty = HalfTy;
+ break;
+ }
+ llvm::Type *VecFlt = llvm::VectorType::get(Ty, VTy->getNumElements());
llvm::Type *Tys[] = { VTy, VecFlt };
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
return EmitNeonCall(F, Ops, NameHint);
@@ -4272,11 +4319,19 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcvt_f32_v:
case NEON::BI__builtin_neon_vcvtq_f32_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad));
+ Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad), Arch);
+ return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
+ : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
+ case NEON::BI__builtin_neon_vcvt_f16_v:
+ case NEON::BI__builtin_neon_vcvtq_f16_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad), Arch);
return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
+ case NEON::BI__builtin_neon_vcvt_n_f16_v:
case NEON::BI__builtin_neon_vcvt_n_f32_v:
case NEON::BI__builtin_neon_vcvt_n_f64_v:
+ case NEON::BI__builtin_neon_vcvtq_n_f16_v:
case NEON::BI__builtin_neon_vcvtq_n_f32_v:
case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
@@ -4284,11 +4339,15 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
+ case NEON::BI__builtin_neon_vcvt_n_s16_v:
case NEON::BI__builtin_neon_vcvt_n_s32_v:
+ case NEON::BI__builtin_neon_vcvt_n_u16_v:
case NEON::BI__builtin_neon_vcvt_n_u32_v:
case NEON::BI__builtin_neon_vcvt_n_s64_v:
case NEON::BI__builtin_neon_vcvt_n_u64_v:
+ case NEON::BI__builtin_neon_vcvtq_n_s16_v:
case NEON::BI__builtin_neon_vcvtq_n_s32_v:
+ case NEON::BI__builtin_neon_vcvtq_n_u16_v:
case NEON::BI__builtin_neon_vcvtq_n_u32_v:
case NEON::BI__builtin_neon_vcvtq_n_s64_v:
case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
@@ -4300,44 +4359,63 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcvt_u32_v:
case NEON::BI__builtin_neon_vcvt_s64_v:
case NEON::BI__builtin_neon_vcvt_u64_v:
+ case NEON::BI__builtin_neon_vcvt_s16_v:
+ case NEON::BI__builtin_neon_vcvt_u16_v:
case NEON::BI__builtin_neon_vcvtq_s32_v:
case NEON::BI__builtin_neon_vcvtq_u32_v:
case NEON::BI__builtin_neon_vcvtq_s64_v:
- case NEON::BI__builtin_neon_vcvtq_u64_v: {
+ case NEON::BI__builtin_neon_vcvtq_u64_v:
+ case NEON::BI__builtin_neon_vcvtq_s16_v:
+ case NEON::BI__builtin_neon_vcvtq_u16_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
: Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
}
+ case NEON::BI__builtin_neon_vcvta_s16_v:
case NEON::BI__builtin_neon_vcvta_s32_v:
case NEON::BI__builtin_neon_vcvta_s64_v:
case NEON::BI__builtin_neon_vcvta_u32_v:
case NEON::BI__builtin_neon_vcvta_u64_v:
+ case NEON::BI__builtin_neon_vcvtaq_s16_v:
case NEON::BI__builtin_neon_vcvtaq_s32_v:
case NEON::BI__builtin_neon_vcvtaq_s64_v:
+ case NEON::BI__builtin_neon_vcvtaq_u16_v:
case NEON::BI__builtin_neon_vcvtaq_u32_v:
case NEON::BI__builtin_neon_vcvtaq_u64_v:
+ case NEON::BI__builtin_neon_vcvtn_s16_v:
case NEON::BI__builtin_neon_vcvtn_s32_v:
case NEON::BI__builtin_neon_vcvtn_s64_v:
+ case NEON::BI__builtin_neon_vcvtn_u16_v:
case NEON::BI__builtin_neon_vcvtn_u32_v:
case NEON::BI__builtin_neon_vcvtn_u64_v:
+ case NEON::BI__builtin_neon_vcvtnq_s16_v:
case NEON::BI__builtin_neon_vcvtnq_s32_v:
case NEON::BI__builtin_neon_vcvtnq_s64_v:
+ case NEON::BI__builtin_neon_vcvtnq_u16_v:
case NEON::BI__builtin_neon_vcvtnq_u32_v:
case NEON::BI__builtin_neon_vcvtnq_u64_v:
+ case NEON::BI__builtin_neon_vcvtp_s16_v:
case NEON::BI__builtin_neon_vcvtp_s32_v:
case NEON::BI__builtin_neon_vcvtp_s64_v:
+ case NEON::BI__builtin_neon_vcvtp_u16_v:
case NEON::BI__builtin_neon_vcvtp_u32_v:
case NEON::BI__builtin_neon_vcvtp_u64_v:
+ case NEON::BI__builtin_neon_vcvtpq_s16_v:
case NEON::BI__builtin_neon_vcvtpq_s32_v:
case NEON::BI__builtin_neon_vcvtpq_s64_v:
+ case NEON::BI__builtin_neon_vcvtpq_u16_v:
case NEON::BI__builtin_neon_vcvtpq_u32_v:
case NEON::BI__builtin_neon_vcvtpq_u64_v:
+ case NEON::BI__builtin_neon_vcvtm_s16_v:
case NEON::BI__builtin_neon_vcvtm_s32_v:
case NEON::BI__builtin_neon_vcvtm_s64_v:
+ case NEON::BI__builtin_neon_vcvtm_u16_v:
case NEON::BI__builtin_neon_vcvtm_u32_v:
case NEON::BI__builtin_neon_vcvtm_u64_v:
+ case NEON::BI__builtin_neon_vcvtmq_s16_v:
case NEON::BI__builtin_neon_vcvtmq_s32_v:
case NEON::BI__builtin_neon_vcvtmq_s64_v:
+ case NEON::BI__builtin_neon_vcvtmq_u16_v:
case NEON::BI__builtin_neon_vcvtmq_u32_v:
case NEON::BI__builtin_neon_vcvtmq_u64_v: {
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
@@ -4816,7 +4894,8 @@ static bool HasExtraNeonArgument(unsigned BuiltinID) {
}
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
- const CallExpr *E) {
+ const CallExpr *E,
+ llvm::Triple::ArchType Arch) {
if (auto Hint = GetValueForARMHint(BuiltinID))
return Hint;
@@ -5355,7 +5434,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
bool usgn = Type.isUnsigned();
bool rightShift = false;
- llvm::VectorType *VTy = GetNeonType(this, Type);
+ llvm::VectorType *VTy = GetNeonType(this, Type, Arch);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
@@ -5368,7 +5447,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (Builtin)
return EmitCommonNeonBuiltinExpr(
Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
- Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1);
+ Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
unsigned Int;
switch (BuiltinID) {
@@ -5393,7 +5472,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices);
return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
}
- // fall through
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vld1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
@@ -5518,7 +5597,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
Tys), Ops);
}
- // fall through
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vst1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
@@ -5555,7 +5634,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
const CallExpr *E,
- SmallVectorImpl<Value *> &Ops) {
+ SmallVectorImpl<Value *> &Ops,
+ llvm::Triple::ArchType Arch) {
unsigned int Int = 0;
const char *s = nullptr;
@@ -5600,7 +5680,7 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type(Result.getZExtValue());
- llvm::VectorType *Ty = GetNeonType(&CGF, Type);
+ llvm::VectorType *Ty = GetNeonType(&CGF, Type, Arch);
if (!Ty)
return nullptr;
@@ -5710,7 +5790,8 @@ Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
}
Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
- const CallExpr *E) {
+ const CallExpr *E,
+ llvm::Triple::ArchType Arch) {
unsigned HintID = static_cast<unsigned>(-1);
switch (BuiltinID) {
default: break;
@@ -6011,7 +6092,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvts_u32_f32:
case NEON::BI__builtin_neon_vcvtd_u64_f64:
usgn = true;
- // FALL THROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcvts_s32_f32:
case NEON::BI__builtin_neon_vcvtd_s64_f64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
@@ -6026,7 +6107,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvts_f32_u32:
case NEON::BI__builtin_neon_vcvtd_f64_u64:
usgn = true;
- // FALL THROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcvts_f32_s32:
case NEON::BI__builtin_neon_vcvtd_f64_s64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
@@ -6453,7 +6534,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
}
- llvm::VectorType *VTy = GetNeonType(this, Type);
+ llvm::VectorType *VTy = GetNeonType(this, Type, Arch);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
@@ -6467,9 +6548,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitCommonNeonBuiltinExpr(
Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
Builtin->NameHint, Builtin->TypeModifier, E, Ops,
- /*never use addresses*/ Address::invalid(), Address::invalid());
+ /*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
- if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops))
+ if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
return V;
unsigned Int;
@@ -6518,7 +6599,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
llvm::Type *VTy = GetNeonType(this,
- NeonTypeFlags(NeonTypeFlags::Float64, false, true));
+ NeonTypeFlags(NeonTypeFlags::Float64, false, true), Arch);
Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
@@ -6547,7 +6628,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
}
+ case NEON::BI__builtin_neon_vfmah_lane_f16:
case NEON::BI__builtin_neon_vfmas_lane_f32:
+ case NEON::BI__builtin_neon_vfmah_laneq_f16:
case NEON::BI__builtin_neon_vfmas_laneq_f32:
case NEON::BI__builtin_neon_vfmad_lane_f64:
case NEON::BI__builtin_neon_vfmad_laneq_f64: {
@@ -6699,14 +6782,14 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvt_f64_v:
case NEON::BI__builtin_neon_vcvtq_f64_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
+ Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad), Arch);
return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
case NEON::BI__builtin_neon_vcvt_f64_f32: {
assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
"unexpected vcvt_f64_f32 builtin");
NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
- Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
+ Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag, Arch));
return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
}
@@ -6714,7 +6797,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
assert(Type.getEltType() == NeonTypeFlags::Float32 &&
"unexpected vcvt_f32_f64 builtin");
NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
- Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
+ Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag, Arch));
return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
}
@@ -6722,18 +6805,25 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvt_u32_v:
case NEON::BI__builtin_neon_vcvt_s64_v:
case NEON::BI__builtin_neon_vcvt_u64_v:
+ case NEON::BI__builtin_neon_vcvt_s16_v:
+ case NEON::BI__builtin_neon_vcvt_u16_v:
case NEON::BI__builtin_neon_vcvtq_s32_v:
case NEON::BI__builtin_neon_vcvtq_u32_v:
case NEON::BI__builtin_neon_vcvtq_s64_v:
- case NEON::BI__builtin_neon_vcvtq_u64_v: {
+ case NEON::BI__builtin_neon_vcvtq_u64_v:
+ case NEON::BI__builtin_neon_vcvtq_s16_v:
+ case NEON::BI__builtin_neon_vcvtq_u16_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
if (usgn)
return Builder.CreateFPToUI(Ops[0], Ty);
return Builder.CreateFPToSI(Ops[0], Ty);
}
+ case NEON::BI__builtin_neon_vcvta_s16_v:
case NEON::BI__builtin_neon_vcvta_s32_v:
+ case NEON::BI__builtin_neon_vcvtaq_s16_v:
case NEON::BI__builtin_neon_vcvtaq_s32_v:
case NEON::BI__builtin_neon_vcvta_u32_v:
+ case NEON::BI__builtin_neon_vcvtaq_u16_v:
case NEON::BI__builtin_neon_vcvtaq_u32_v:
case NEON::BI__builtin_neon_vcvta_s64_v:
case NEON::BI__builtin_neon_vcvtaq_s64_v:
@@ -6743,9 +6833,13 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
}
+ case NEON::BI__builtin_neon_vcvtm_s16_v:
case NEON::BI__builtin_neon_vcvtm_s32_v:
+ case NEON::BI__builtin_neon_vcvtmq_s16_v:
case NEON::BI__builtin_neon_vcvtmq_s32_v:
+ case NEON::BI__builtin_neon_vcvtm_u16_v:
case NEON::BI__builtin_neon_vcvtm_u32_v:
+ case NEON::BI__builtin_neon_vcvtmq_u16_v:
case NEON::BI__builtin_neon_vcvtmq_u32_v:
case NEON::BI__builtin_neon_vcvtm_s64_v:
case NEON::BI__builtin_neon_vcvtmq_s64_v:
@@ -6755,9 +6849,13 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
}
+ case NEON::BI__builtin_neon_vcvtn_s16_v:
case NEON::BI__builtin_neon_vcvtn_s32_v:
+ case NEON::BI__builtin_neon_vcvtnq_s16_v:
case NEON::BI__builtin_neon_vcvtnq_s32_v:
+ case NEON::BI__builtin_neon_vcvtn_u16_v:
case NEON::BI__builtin_neon_vcvtn_u32_v:
+ case NEON::BI__builtin_neon_vcvtnq_u16_v:
case NEON::BI__builtin_neon_vcvtnq_u32_v:
case NEON::BI__builtin_neon_vcvtn_s64_v:
case NEON::BI__builtin_neon_vcvtnq_s64_v:
@@ -6767,9 +6865,13 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
}
+ case NEON::BI__builtin_neon_vcvtp_s16_v:
case NEON::BI__builtin_neon_vcvtp_s32_v:
+ case NEON::BI__builtin_neon_vcvtpq_s16_v:
case NEON::BI__builtin_neon_vcvtpq_s32_v:
+ case NEON::BI__builtin_neon_vcvtp_u16_v:
case NEON::BI__builtin_neon_vcvtp_u32_v:
+ case NEON::BI__builtin_neon_vcvtpq_u16_v:
case NEON::BI__builtin_neon_vcvtpq_u32_v:
case NEON::BI__builtin_neon_vcvtp_s64_v:
case NEON::BI__builtin_neon_vcvtpq_s64_v:
@@ -6792,7 +6894,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Quad = true;
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
llvm::Type *VTy = GetNeonType(this,
- NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
+ NeonTypeFlags(NeonTypeFlags::Float64, false, Quad), Arch);
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
@@ -6824,7 +6926,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddv_u8:
// FIXME: These are handled by the AArch64 scalar code.
usgn = true;
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vaddv_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
@@ -6836,7 +6938,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vaddv_u16:
usgn = true;
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vaddv_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
@@ -6848,7 +6950,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vaddvq_u8:
usgn = true;
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vaddvq_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
@@ -6860,7 +6962,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vaddvq_u16:
usgn = true;
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vaddvq_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
@@ -6942,6 +7044,24 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
+ case NEON::BI__builtin_neon_vmaxv_f16: {
+ Int = Intrinsic::aarch64_neon_fmaxv;
+ Ty = HalfTy;
+ VTy = llvm::VectorType::get(HalfTy, 4);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+ return Builder.CreateTrunc(Ops[0], HalfTy);
+ }
+ case NEON::BI__builtin_neon_vmaxvq_f16: {
+ Int = Intrinsic::aarch64_neon_fmaxv;
+ Ty = HalfTy;
+ VTy = llvm::VectorType::get(HalfTy, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+ return Builder.CreateTrunc(Ops[0], HalfTy);
+ }
case NEON::BI__builtin_neon_vminv_u8: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
@@ -7014,6 +7134,60 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
+ case NEON::BI__builtin_neon_vminv_f16: {
+ Int = Intrinsic::aarch64_neon_fminv;
+ Ty = HalfTy;
+ VTy = llvm::VectorType::get(HalfTy, 4);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+ return Builder.CreateTrunc(Ops[0], HalfTy);
+ }
+ case NEON::BI__builtin_neon_vminvq_f16: {
+ Int = Intrinsic::aarch64_neon_fminv;
+ Ty = HalfTy;
+ VTy = llvm::VectorType::get(HalfTy, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+ return Builder.CreateTrunc(Ops[0], HalfTy);
+ }
+ case NEON::BI__builtin_neon_vmaxnmv_f16: {
+ Int = Intrinsic::aarch64_neon_fmaxnmv;
+ Ty = HalfTy;
+ VTy = llvm::VectorType::get(HalfTy, 4);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
+ return Builder.CreateTrunc(Ops[0], HalfTy);
+ }
+ case NEON::BI__builtin_neon_vmaxnmvq_f16: {
+ Int = Intrinsic::aarch64_neon_fmaxnmv;
+ Ty = HalfTy;
+ VTy = llvm::VectorType::get(HalfTy, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
+ return Builder.CreateTrunc(Ops[0], HalfTy);
+ }
+ case NEON::BI__builtin_neon_vminnmv_f16: {
+ Int = Intrinsic::aarch64_neon_fminnmv;
+ Ty = HalfTy;
+ VTy = llvm::VectorType::get(HalfTy, 4);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
+ return Builder.CreateTrunc(Ops[0], HalfTy);
+ }
+ case NEON::BI__builtin_neon_vminnmvq_f16: {
+ Int = Intrinsic::aarch64_neon_fminnmv;
+ Ty = HalfTy;
+ VTy = llvm::VectorType::get(HalfTy, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
+ return Builder.CreateTrunc(Ops[0], HalfTy);
+ }
case NEON::BI__builtin_neon_vmul_n_f64: {
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
@@ -7848,8 +8022,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
default: return nullptr;
case X86::BI_mm_prefetch: {
Value *Address = Ops[0];
- Value *RW = ConstantInt::get(Int32Ty, 0);
- Value *Locality = Ops[1];
+ ConstantInt *C = cast<ConstantInt>(Ops[1]);
+ Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
+ Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
Value *Data = ConstantInt::get(Int32Ty, 1);
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
return Builder.CreateCall(F, {Address, RW, Locality, Data});
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index c3709bf2e447..38d7344572d3 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -1929,7 +1929,7 @@ void CodeGenModule::ConstructAttributeList(
RetAttrs.addAttribute(llvm::Attribute::SExt);
else if (RetTy->hasUnsignedIntegerRepresentation())
RetAttrs.addAttribute(llvm::Attribute::ZExt);
- // FALL THROUGH
+ LLVM_FALLTHROUGH;
case ABIArgInfo::Direct:
if (RetAI.getInReg())
RetAttrs.addAttribute(llvm::Attribute::InReg);
@@ -2014,7 +2014,7 @@ void CodeGenModule::ConstructAttributeList(
else
Attrs.addAttribute(llvm::Attribute::ZExt);
}
- // FALL THROUGH
+ LLVM_FALLTHROUGH;
case ABIArgInfo::Direct:
if (ArgNo == 0 && FI.isChainCall())
Attrs.addAttribute(llvm::Attribute::Nest);
@@ -2758,6 +2758,12 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
bool EmitRetDbgLoc,
SourceLocation EndLoc) {
+ if (FI.isNoReturn()) {
+ // Noreturn functions don't return.
+ EmitUnreachable(EndLoc);
+ return;
+ }
+
if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
// Naked functions don't have epilogues.
Builder.CreateUnreachable();
@@ -3718,7 +3724,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const CGCallee &Callee,
ReturnValueSlot ReturnValue,
const CallArgList &CallArgs,
- llvm::Instruction **callOrInvoke) {
+ llvm::Instruction **callOrInvoke,
+ SourceLocation Loc) {
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
assert(Callee.isOrdinary());
@@ -4241,7 +4248,15 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
SRetPtr.getPointer());
- Builder.CreateUnreachable();
+ // Strip away the noreturn attribute to better diagnose unreachable UB.
+ if (SanOpts.has(SanitizerKind::Unreachable)) {
+ if (auto *F = CS.getCalledFunction())
+ F->removeFnAttr(llvm::Attribute::NoReturn);
+ CS.removeAttribute(llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::NoReturn);
+ }
+
+ EmitUnreachable(Loc);
Builder.ClearInsertionPoint();
// FIXME: For now, emit a dummy basic block because expr emitters in
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index caea41ec0e03..aeed4d658a4e 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -2653,7 +2653,6 @@ llvm::DIModule *CGDebugInfo::getParentModuleOrNull(const Decl *D) {
// file where the type's definition is located, so it might be
// best to make this behavior a command line or debugger tuning
// option.
- FullSourceLoc Loc(D->getLocation(), CGM.getContext().getSourceManager());
if (Module *M = D->getOwningModule()) {
// This is a (sub-)module.
auto Info = ExternalASTSource::ASTSourceDescriptor(*M);
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 6c9d9f170ace..1ec084ff3f5b 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -133,7 +133,7 @@ static const EHPersonality &getObjCPersonality(const llvm::Triple &T,
case ObjCRuntime::GNUstep:
if (L.ObjCRuntime.getVersion() >= VersionTuple(1, 7))
return EHPersonality::GNUstep_ObjC;
- // fallthrough
+ LLVM_FALLTHROUGH;
case ObjCRuntime::GCC:
case ObjCRuntime::ObjFW:
if (L.SjLjExceptions)
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 98740e8f9aab..90eeddf5cc0b 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -3076,6 +3076,17 @@ void CodeGenFunction::EmitCfiCheckFail() {
CGM.addUsedGlobal(F);
}
+void CodeGenFunction::EmitUnreachable(SourceLocation Loc) {
+ if (SanOpts.has(SanitizerKind::Unreachable)) {
+ SanitizerScope SanScope(this);
+ EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
+ SanitizerKind::Unreachable),
+ SanitizerHandler::BuiltinUnreachable,
+ EmitCheckSourceLocation(Loc), None);
+ }
+ Builder.CreateUnreachable();
+}
+
void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked) {
llvm::BasicBlock *Cont = createBasicBlock("cont");
@@ -3790,8 +3801,10 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
FieldTBAAInfo.Offset +=
Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
- // Update the final access type.
+ // Update the final access type and size.
FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
+ FieldTBAAInfo.Size =
+ getContext().getTypeSizeInChars(FieldType).getQuantity();
}
Address addr = base.getAddress();
@@ -4616,7 +4629,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
Callee.setFunctionPointer(CalleePtr);
}
- return EmitCall(FnInfo, Callee, ReturnValue, Args);
+ return EmitCall(FnInfo, Callee, ReturnValue, Args, nullptr, E->getExprLoc());
}
LValue CodeGenFunction::
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 1ab8433864c4..0f05cab66d7e 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -692,7 +692,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
return Visit(E->getSubExpr());
}
- // fallthrough
+ LLVM_FALLTHROUGH;
case CK_NoOp:
case CK_UserDefinedConversion:
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 41bb199ffde7..0749b0ac46a7 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -89,7 +89,8 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
*this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs);
auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(
Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize);
- return EmitCall(FnInfo, Callee, ReturnValue, Args);
+ return EmitCall(FnInfo, Callee, ReturnValue, Args, nullptr,
+ CE ? CE->getExprLoc() : SourceLocation());
}
RValue CodeGenFunction::EmitCXXDestructorCall(
@@ -446,7 +447,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
EmitCallArgs(Args, FPT, E->arguments());
return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required,
/*PrefixSize=*/0),
- Callee, ReturnValue, Args);
+ Callee, ReturnValue, Args, nullptr, E->getExprLoc());
}
RValue
@@ -613,7 +614,7 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
case CXXConstructExpr::CK_VirtualBase:
ForVirtualBase = true;
- // fall-through
+ LLVM_FALLTHROUGH;
case CXXConstructExpr::CK_NonVirtualBase:
Type = Ctor_Base;
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index e860b3045f0e..9094d3f8a91c 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -761,15 +761,16 @@ ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second;
llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second;
-
llvm::Value *DSTr, *DSTi;
if (LHSr->getType()->isFloatingPointTy()) {
- // If we have a complex operand on the RHS, we delegate to a libcall to
- // handle all of the complexities and minimize underflow/overflow cases.
+ // If we have a complex operand on the RHS and FastMath is not allowed, we
+ // delegate to a libcall to handle all of the complexities and minimize
+ // underflow/overflow cases. When FastMath is allowed we construct the
+ // divide inline using the same algorithm as for integer operands.
//
// FIXME: We would be able to avoid the libcall in many places if we
// supported imaginary types in addition to complex types.
- if (RHSi) {
+ if (RHSi && !CGF.getLangOpts().FastMath) {
BinOpInfo LibCallOp = Op;
// If LHS was a real, supply a null imaginary part.
if (!LHSi)
@@ -791,11 +792,31 @@ ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
case llvm::Type::FP128TyID:
return EmitComplexBinOpLibCall("__divtc3", LibCallOp);
}
- }
- assert(LHSi && "Can have at most one non-complex operand!");
+ } else if (RHSi) {
+ if (!LHSi)
+ LHSi = llvm::Constant::getNullValue(RHSi->getType());
+
+ // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+ llvm::Value *AC = Builder.CreateFMul(LHSr, RHSr); // a*c
+ llvm::Value *BD = Builder.CreateFMul(LHSi, RHSi); // b*d
+ llvm::Value *ACpBD = Builder.CreateFAdd(AC, BD); // ac+bd
+
+ llvm::Value *CC = Builder.CreateFMul(RHSr, RHSr); // c*c
+ llvm::Value *DD = Builder.CreateFMul(RHSi, RHSi); // d*d
+ llvm::Value *CCpDD = Builder.CreateFAdd(CC, DD); // cc+dd
- DSTr = Builder.CreateFDiv(LHSr, RHSr);
- DSTi = Builder.CreateFDiv(LHSi, RHSr);
+ llvm::Value *BC = Builder.CreateFMul(LHSi, RHSr); // b*c
+ llvm::Value *AD = Builder.CreateFMul(LHSr, RHSi); // a*d
+ llvm::Value *BCmAD = Builder.CreateFSub(BC, AD); // bc-ad
+
+ DSTr = Builder.CreateFDiv(ACpBD, CCpDD);
+ DSTi = Builder.CreateFDiv(BCmAD, CCpDD);
+ } else {
+ assert(LHSi && "Can have at most one non-complex operand!");
+
+ DSTr = Builder.CreateFDiv(LHSr, RHSr);
+ DSTi = Builder.CreateFDiv(LHSi, RHSr);
+ }
} else {
assert(Op.LHS.second && Op.RHS.second &&
"Both operands of integer complex operators must be complex!");
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index ab5bbc03db95..cd62d00dfb53 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -3288,11 +3288,15 @@ public:
/// LLVM arguments and the types they were derived from.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
ReturnValueSlot ReturnValue, const CallArgList &Args,
- llvm::Instruction **callOrInvoke = nullptr);
-
+ llvm::Instruction **callOrInvoke, SourceLocation Loc);
+ RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
+ ReturnValueSlot ReturnValue, const CallArgList &Args,
+ llvm::Instruction **callOrInvoke = nullptr) {
+ return EmitCall(CallInfo, Callee, ReturnValue, Args, callOrInvoke,
+ SourceLocation());
+ }
RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E,
- ReturnValueSlot ReturnValue,
- llvm::Value *Chain = nullptr);
+ ReturnValueSlot ReturnValue, llvm::Value *Chain = nullptr);
RValue EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue = ReturnValueSlot());
RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
@@ -3391,7 +3395,8 @@ public:
const llvm::CmpInst::Predicate Fp,
const llvm::CmpInst::Predicate Ip,
const llvm::Twine &Name = "");
- llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
+ llvm::Triple::ArchType Arch);
llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID,
unsigned LLVMIntrinsic,
@@ -3400,7 +3405,8 @@ public:
unsigned Modifier,
const CallExpr *E,
SmallVectorImpl<llvm::Value *> &Ops,
- Address PtrOp0, Address PtrOp1);
+ Address PtrOp0, Address PtrOp1,
+ llvm::Triple::ArchType Arch);
llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
unsigned Modifier, llvm::Type *ArgTy,
const CallExpr *E);
@@ -3414,7 +3420,8 @@ public:
llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
llvm::Type *Ty, bool usgn, const char *name);
llvm::Value *vectorWrapScalar16(llvm::Value *Op);
- llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
+ llvm::Triple::ArchType Arch);
llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
@@ -3747,6 +3754,10 @@ public:
llvm::ConstantInt *TypeId, llvm::Value *Ptr,
ArrayRef<llvm::Constant *> StaticArgs);
+ /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime
+ /// checking is enabled. Otherwise, just emit an unreachable instruction.
+ void EmitUnreachable(SourceLocation Loc);
+
/// \brief Create a basic block that will call the trap intrinsic, and emit a
/// conditional branch to it, for the -ftrapv checks.
void EmitTrapCheck(llvm::Value *Checked);
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index c59dc71da596..5bdf81aaf66e 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -103,6 +103,7 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
Int16Ty = llvm::Type::getInt16Ty(LLVMContext);
Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
+ HalfTy = llvm::Type::getHalfTy(LLVMContext);
FloatTy = llvm::Type::getFloatTy(LLVMContext);
DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
@@ -4289,7 +4290,11 @@ void CodeGenModule::ClearUnusedCoverageMapping(const Decl *D) {
}
void CodeGenModule::EmitDeferredUnusedCoverageMappings() {
- for (const auto &Entry : DeferredEmptyCoverageMappingDecls) {
+ // We call takeVector() here to avoid use-after-free.
+ // FIXME: DeferredEmptyCoverageMappingDecls is getting mutated because
+ // we deserialize function bodies to emit coverage info for them, and that
+ // deserializes more declarations. How should we handle that case?
+ for (const auto &Entry : DeferredEmptyCoverageMappingDecls.takeVector()) {
if (!Entry.second)
continue;
const Decl *D = Entry.first;
diff --git a/lib/CodeGen/CodeGenTBAA.cpp b/lib/CodeGen/CodeGenTBAA.cpp
index f394ea288d46..ad473032db17 100644
--- a/lib/CodeGen/CodeGenTBAA.cpp
+++ b/lib/CodeGen/CodeGenTBAA.cpp
@@ -59,7 +59,10 @@ llvm::MDNode *CodeGenTBAA::getRoot() {
llvm::MDNode *CodeGenTBAA::createScalarTypeNode(StringRef Name,
llvm::MDNode *Parent,
uint64_t Size) {
- (void)Size; // TODO: Support generation of size-aware type nodes.
+ if (CodeGenOpts.NewStructPathTBAA) {
+ llvm::Metadata *Id = MDHelper.createString(Name);
+ return MDHelper.createTBAATypeNode(Parent, Size, Id);
+ }
return MDHelper.createTBAAScalarTypeNode(Name, Parent);
}
@@ -158,6 +161,10 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
if (Ty->isPointerType() || Ty->isReferenceType())
return createScalarTypeNode("any pointer", getChar(), Size);
+ // Accesses to arrays are accesses to objects of their element types.
+ if (CodeGenOpts.NewStructPathTBAA && Ty->isArrayType())
+ return getTypeInfo(cast<ArrayType>(Ty)->getElementType());
+
// Enum types are distinct types. In C++ they have "underlying types",
// however they aren't related for TBAA.
if (const EnumType *ETy = dyn_cast<EnumType>(Ty)) {
@@ -300,8 +307,12 @@ llvm::MDNode *CodeGenTBAA::getBaseTypeInfoHelper(const Type *Ty) {
OutName = RD->getName();
}
- // TODO: Support size-aware type nodes and create one here for the
- // given aggregate type.
+ if (CodeGenOpts.NewStructPathTBAA) {
+ llvm::MDNode *Parent = getChar();
+ uint64_t Size = Context.getTypeSizeInChars(Ty).getQuantity();
+ llvm::Metadata *Id = MDHelper.createString(OutName);
+ return MDHelper.createTBAATypeNode(Parent, Size, Id, Fields);
+ }
// Create the struct type node with a vector of pairs (offset, type).
SmallVector<std::pair<llvm::MDNode*, uint64_t>, 4> OffsetsAndTypes;
@@ -348,6 +359,10 @@ llvm::MDNode *CodeGenTBAA::getAccessTagInfo(TBAAAccessInfo Info) {
Info.BaseType = Info.AccessType;
assert(!Info.Offset && "Nonzero offset for an access with no base type!");
}
+ if (CodeGenOpts.NewStructPathTBAA) {
+ return N = MDHelper.createTBAAAccessTag(Info.BaseType, Info.AccessType,
+ Info.Offset, Info.Size);
+ }
return N = MDHelper.createTBAAStructTagNode(Info.BaseType, Info.AccessType,
Info.Offset);
}
diff --git a/lib/CodeGen/CodeGenTypeCache.h b/lib/CodeGen/CodeGenTypeCache.h
index 2af7b30eafb4..fb096ac89987 100644
--- a/lib/CodeGen/CodeGenTypeCache.h
+++ b/lib/CodeGen/CodeGenTypeCache.h
@@ -37,7 +37,7 @@ struct CodeGenTypeCache {
/// i8, i16, i32, and i64
llvm::IntegerType *Int8Ty, *Int16Ty, *Int32Ty, *Int64Ty;
/// float, double
- llvm::Type *FloatTy, *DoubleTy;
+ llvm::Type *HalfTy, *FloatTy, *DoubleTy;
/// int
llvm::IntegerType *IntTy;
diff --git a/lib/Driver/SanitizerArgs.cpp b/lib/Driver/SanitizerArgs.cpp
index f617d8b4551e..6ba8892f3501 100644
--- a/lib/Driver/SanitizerArgs.cpp
+++ b/lib/Driver/SanitizerArgs.cpp
@@ -440,6 +440,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
RecoverableKinds &= ~Unrecoverable;
TrappingKinds &= Kinds;
+ RecoverableKinds &= ~TrappingKinds;
// Setup blacklist files.
// Add default blacklist from resource directory.
@@ -683,6 +684,8 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
Sanitizers.Mask |= Kinds;
RecoverableSanitizers.Mask |= RecoverableKinds;
TrapSanitizers.Mask |= TrappingKinds;
+ assert(!(RecoverableKinds & TrappingKinds) &&
+ "Overlap between recoverable and trapping sanitizers");
}
static std::string toString(const clang::SanitizerSet &Sanitizers) {
diff --git a/lib/Driver/ToolChains/Clang.cpp b/lib/Driver/ToolChains/Clang.cpp
index 0a89ff96d3c8..7b3f4bc9d872 100644
--- a/lib/Driver/ToolChains/Clang.cpp
+++ b/lib/Driver/ToolChains/Clang.cpp
@@ -4389,6 +4389,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fapple-pragma-pack");
if (Args.hasFlag(options::OPT_fsave_optimization_record,
+ options::OPT_foptimization_record_file_EQ,
options::OPT_fno_save_optimization_record, false)) {
CmdArgs.push_back("-opt-record-file");
diff --git a/lib/Driver/ToolChains/CommonArgs.cpp b/lib/Driver/ToolChains/CommonArgs.cpp
index ab51a8c3cc90..60f96d03c9c8 100644
--- a/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/lib/Driver/ToolChains/CommonArgs.cpp
@@ -549,6 +549,12 @@ void tools::linkSanitizerRuntimeDeps(const ToolChain &TC,
TC.getTriple().getOS() != llvm::Triple::NetBSD &&
TC.getTriple().getOS() != llvm::Triple::RTEMS)
CmdArgs.push_back("-ldl");
+ // Required for forkpty on some OSes
+ if (TC.getTriple().getOS() == llvm::Triple::NetBSD)
+ CmdArgs.push_back("-lutil");
+ // Required for backtrace on some OSes
+ if (TC.getTriple().getOS() == llvm::Triple::NetBSD)
+ CmdArgs.push_back("-lexecinfo");
}
static void
diff --git a/lib/Driver/ToolChains/Darwin.cpp b/lib/Driver/ToolChains/Darwin.cpp
index 28efa86538ed..289f4ed92f6c 100644
--- a/lib/Driver/ToolChains/Darwin.cpp
+++ b/lib/Driver/ToolChains/Darwin.cpp
@@ -1181,9 +1181,12 @@ struct DarwinPlatform {
};
using DarwinPlatformKind = Darwin::DarwinPlatformKind;
+ using DarwinEnvironmentKind = Darwin::DarwinEnvironmentKind;
DarwinPlatformKind getPlatform() const { return Platform; }
+ DarwinEnvironmentKind getEnvironment() const { return Environment; }
+
StringRef getOSVersion() const {
if (Kind == OSVersionArg)
return Argument->getValue();
@@ -1233,6 +1236,19 @@ struct DarwinPlatform {
llvm_unreachable("Unsupported Darwin Source Kind");
}
+ static DarwinPlatform createFromTarget(llvm::Triple::OSType OS,
+ StringRef OSVersion, Arg *A,
+ llvm::Triple::EnvironmentType Env) {
+ DarwinPlatform Result(TargetArg, getPlatformFromOS(OS), OSVersion, A);
+ switch (Env) {
+ case llvm::Triple::Simulator:
+ Result.Environment = DarwinEnvironmentKind::Simulator;
+ break;
+ default:
+ break;
+ }
+ return Result;
+ }
static DarwinPlatform createOSVersionArg(DarwinPlatformKind Platform,
Arg *A) {
return DarwinPlatform(OSVersionArg, Platform, A);
@@ -1250,35 +1266,35 @@ struct DarwinPlatform {
}
static DarwinPlatform createFromArch(llvm::Triple::OSType OS,
StringRef Value) {
- DarwinPlatformKind Platform;
+ return DarwinPlatform(InferredFromArch, getPlatformFromOS(OS), Value);
+ }
+
+private:
+ DarwinPlatform(SourceKind Kind, DarwinPlatformKind Platform, Arg *Argument)
+ : Kind(Kind), Platform(Platform), Argument(Argument) {}
+ DarwinPlatform(SourceKind Kind, DarwinPlatformKind Platform, StringRef Value,
+ Arg *Argument = nullptr)
+ : Kind(Kind), Platform(Platform), OSVersion(Value), Argument(Argument) {}
+
+ static DarwinPlatformKind getPlatformFromOS(llvm::Triple::OSType OS) {
switch (OS) {
case llvm::Triple::Darwin:
case llvm::Triple::MacOSX:
- Platform = DarwinPlatformKind::MacOS;
- break;
+ return DarwinPlatformKind::MacOS;
case llvm::Triple::IOS:
- Platform = DarwinPlatformKind::IPhoneOS;
- break;
+ return DarwinPlatformKind::IPhoneOS;
case llvm::Triple::TvOS:
- Platform = DarwinPlatformKind::TvOS;
- break;
+ return DarwinPlatformKind::TvOS;
case llvm::Triple::WatchOS:
- Platform = DarwinPlatformKind::WatchOS;
- break;
+ return DarwinPlatformKind::WatchOS;
default:
llvm_unreachable("Unable to infer Darwin variant");
}
- return DarwinPlatform(InferredFromArch, Platform, Value);
}
-private:
- DarwinPlatform(SourceKind Kind, DarwinPlatformKind Platform, Arg *Argument)
- : Kind(Kind), Platform(Platform), Argument(Argument) {}
- DarwinPlatform(SourceKind Kind, DarwinPlatformKind Platform, StringRef Value)
- : Kind(Kind), Platform(Platform), OSVersion(Value), Argument(nullptr) {}
-
SourceKind Kind;
DarwinPlatformKind Platform;
+ DarwinEnvironmentKind Environment = DarwinEnvironmentKind::NativeEnvironment;
std::string OSVersion;
Arg *Argument;
StringRef EnvVarName;
@@ -1449,20 +1465,15 @@ inferDeploymentTargetFromArch(DerivedArgList &Args, const Darwin &Toolchain,
const Driver &TheDriver) {
llvm::Triple::OSType OSTy = llvm::Triple::UnknownOS;
- // Set the OSTy based on -target if -arch isn't present.
- if (Args.hasArg(options::OPT_target) && !Args.hasArg(options::OPT_arch)) {
- OSTy = Triple.getOS();
- } else {
- StringRef MachOArchName = Toolchain.getMachOArchName(Args);
- if (MachOArchName == "armv7" || MachOArchName == "armv7s" ||
- MachOArchName == "arm64")
- OSTy = llvm::Triple::IOS;
- else if (MachOArchName == "armv7k")
- OSTy = llvm::Triple::WatchOS;
- else if (MachOArchName != "armv6m" && MachOArchName != "armv7m" &&
- MachOArchName != "armv7em")
- OSTy = llvm::Triple::MacOSX;
- }
+ StringRef MachOArchName = Toolchain.getMachOArchName(Args);
+ if (MachOArchName == "armv7" || MachOArchName == "armv7s" ||
+ MachOArchName == "arm64")
+ OSTy = llvm::Triple::IOS;
+ else if (MachOArchName == "armv7k")
+ OSTy = llvm::Triple::WatchOS;
+ else if (MachOArchName != "armv6m" && MachOArchName != "armv7m" &&
+ MachOArchName != "armv7em")
+ OSTy = llvm::Triple::MacOSX;
if (OSTy == llvm::Triple::UnknownOS)
return None;
@@ -1470,6 +1481,20 @@ inferDeploymentTargetFromArch(DerivedArgList &Args, const Darwin &Toolchain,
getOSVersion(OSTy, Triple, TheDriver));
}
+/// Returns the deployment target that's specified using the -target option.
+Optional<DarwinPlatform> getDeploymentTargetFromTargetArg(
+ DerivedArgList &Args, const llvm::Triple &Triple, const Driver &TheDriver) {
+ if (!Args.hasArg(options::OPT_target))
+ return None;
+ if (Triple.getOS() == llvm::Triple::Darwin ||
+ Triple.getOS() == llvm::Triple::UnknownOS)
+ return None;
+ std::string OSVersion = getOSVersion(Triple.getOS(), Triple, TheDriver);
+ return DarwinPlatform::createFromTarget(Triple.getOS(), OSVersion,
+ Args.getLastArg(options::OPT_target),
+ Triple.getEnvironment());
+}
+
} // namespace
void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
@@ -1494,24 +1519,52 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
}
}
- // The OS target can be specified using the -m<os>version-min argument.
+ // The OS and the version can be specified using the -target argument.
Optional<DarwinPlatform> OSTarget =
- getDeploymentTargetFromOSVersionArg(Args, getDriver());
- // If no deployment target was specified on the command line, check for
- // environment defines.
- if (!OSTarget)
- OSTarget =
- getDeploymentTargetFromEnvironmentVariables(getDriver(), getTriple());
- // If there is no command-line argument to specify the Target version and
- // no environment variable defined, see if we can set the default based
- // on -isysroot.
- if (!OSTarget)
- OSTarget = inferDeploymentTargetFromSDK(Args);
- // If no OS targets have been specified, try to guess platform from -target
- // or arch name and compute the version from the triple.
- if (!OSTarget)
- OSTarget =
- inferDeploymentTargetFromArch(Args, *this, getTriple(), getDriver());
+ getDeploymentTargetFromTargetArg(Args, getTriple(), getDriver());
+ if (OSTarget) {
+ Optional<DarwinPlatform> OSVersionArgTarget =
+ getDeploymentTargetFromOSVersionArg(Args, getDriver());
+ if (OSVersionArgTarget) {
+ unsigned TargetMajor, TargetMinor, TargetMicro;
+ bool TargetExtra;
+ unsigned ArgMajor, ArgMinor, ArgMicro;
+ bool ArgExtra;
+ if (OSTarget->getPlatform() != OSVersionArgTarget->getPlatform() ||
+ (Driver::GetReleaseVersion(OSTarget->getOSVersion(), TargetMajor,
+ TargetMinor, TargetMicro, TargetExtra) &&
+ Driver::GetReleaseVersion(OSVersionArgTarget->getOSVersion(),
+ ArgMajor, ArgMinor, ArgMicro, ArgExtra) &&
+ (VersionTuple(TargetMajor, TargetMinor, TargetMicro) !=
+ VersionTuple(ArgMajor, ArgMinor, ArgMicro) ||
+ TargetExtra != ArgExtra))) {
+ // Warn about -m<os>-version-min that doesn't match the OS version
+ // that's specified in the target.
+ std::string OSVersionArg = OSVersionArgTarget->getAsString(Args, Opts);
+ std::string TargetArg = OSTarget->getAsString(Args, Opts);
+ getDriver().Diag(clang::diag::warn_drv_overriding_flag_option)
+ << OSVersionArg << TargetArg;
+ }
+ }
+ } else {
+ // The OS target can be specified using the -m<os>version-min argument.
+ OSTarget = getDeploymentTargetFromOSVersionArg(Args, getDriver());
+ // If no deployment target was specified on the command line, check for
+ // environment defines.
+ if (!OSTarget)
+ OSTarget =
+ getDeploymentTargetFromEnvironmentVariables(getDriver(), getTriple());
+ // If there is no command-line argument to specify the Target version and
+ // no environment variable defined, see if we can set the default based
+ // on -isysroot.
+ if (!OSTarget)
+ OSTarget = inferDeploymentTargetFromSDK(Args);
+ // If no OS targets have been specified, try to guess platform from -target
+ // or arch name and compute the version from the triple.
+ if (!OSTarget)
+ OSTarget =
+ inferDeploymentTargetFromArch(Args, *this, getTriple(), getDriver());
+ }
assert(OSTarget && "Unable to infer Darwin variant");
OSTarget->addOSVersionMinArgument(Args, Opts);
@@ -1562,10 +1615,11 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
} else
llvm_unreachable("unknown kind of Darwin platform");
- DarwinEnvironmentKind Environment = NativeEnvironment;
+ DarwinEnvironmentKind Environment = OSTarget->getEnvironment();
// Recognize iOS targets with an x86 architecture as the iOS simulator.
- if (Platform != MacOS && (getTriple().getArch() == llvm::Triple::x86 ||
- getTriple().getArch() == llvm::Triple::x86_64))
+ if (Environment == NativeEnvironment && Platform != MacOS &&
+ (getTriple().getArch() == llvm::Triple::x86 ||
+ getTriple().getArch() == llvm::Triple::x86_64))
Environment = Simulator;
setTarget(Platform, Environment, Major, Minor, Micro);
diff --git a/lib/Driver/ToolChains/Fuchsia.cpp b/lib/Driver/ToolChains/Fuchsia.cpp
index 10ee7b7829be..269d34d18f1e 100644
--- a/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/lib/Driver/ToolChains/Fuchsia.cpp
@@ -280,5 +280,6 @@ SanitizerMask Fuchsia::getSupportedSanitizers() const {
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::SafeStack;
Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::Scudo;
return Res;
}
diff --git a/lib/Driver/ToolChains/Myriad.cpp b/lib/Driver/ToolChains/Myriad.cpp
index 6fdb5a2248dd..06079b109dd1 100644
--- a/lib/Driver/ToolChains/Myriad.cpp
+++ b/lib/Driver/ToolChains/Myriad.cpp
@@ -199,7 +199,7 @@ void tools::Myriad::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
std::string Exec =
- Args.MakeArgString(TC.GetProgramPath("sparc-myriad-elf-ld"));
+ Args.MakeArgString(TC.GetProgramPath("sparc-myriad-rtems-ld"));
C.addCommand(llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
CmdArgs, Inputs));
}
@@ -218,10 +218,11 @@ MyriadToolChain::MyriadToolChain(const Driver &D, const llvm::Triple &Triple,
D.Diag(clang::diag::err_target_unsupported_arch)
<< Triple.getArchName() << "myriad";
LLVM_FALLTHROUGH;
+ case llvm::Triple::shave:
+ return;
case llvm::Triple::sparc:
case llvm::Triple::sparcel:
- case llvm::Triple::shave:
- GCCInstallation.init(Triple, Args, {"sparc-myriad-elf"});
+ GCCInstallation.init(Triple, Args, {"sparc-myriad-rtems"});
}
if (GCCInstallation.isValid()) {
@@ -231,7 +232,7 @@ MyriadToolChain::MyriadToolChain(const Driver &D, const llvm::Triple &Triple,
addPathIfExists(D, CompilerSupportDir, getFilePaths());
}
// libstd++ and libc++ must both be found in this one place.
- addPathIfExists(D, D.Dir + "/../sparc-myriad-elf/lib", getFilePaths());
+ addPathIfExists(D, D.Dir + "/../sparc-myriad-rtems/lib", getFilePaths());
}
MyriadToolChain::~MyriadToolChain() {}
diff --git a/lib/Frontend/CompilerInstance.cpp b/lib/Frontend/CompilerInstance.cpp
index 32f1232bbe24..7208177aa012 100644
--- a/lib/Frontend/CompilerInstance.cpp
+++ b/lib/Frontend/CompilerInstance.cpp
@@ -1854,6 +1854,7 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
// Verify that the rest of the module path actually corresponds to
// a submodule.
+ bool MapPrivateSubModToTopLevel = false;
if (!getLangOpts().ModulesTS && Path.size() > 1) {
for (unsigned I = 1, N = Path.size(); I != N; ++I) {
StringRef Name = Path[I].first->getName();
@@ -1892,7 +1893,40 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
Sub = Module->findSubmodule(Best[0]);
}
}
-
+
+ // If the user is requesting Foo.Private and it doesn't exist, try to
+ // match Foo_Private and emit a warning asking for the user to write
+ // @import Foo_Private instead. FIXME: remove this when existing clients
+ // migrate off of Foo.Private syntax.
+ if (!Sub && PP->getLangOpts().ImplicitModules && Name == "Private" &&
+ Module == Module->getTopLevelModule()) {
+ SmallString<128> PrivateModule(Module->Name);
+ PrivateModule.append("_Private");
+
+ SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> PrivPath;
+ auto &II = PP->getIdentifierTable().get(
+ PrivateModule, PP->getIdentifierInfo(Module->Name)->getTokenID());
+ PrivPath.push_back(std::make_pair(&II, Path[0].second));
+
+ if (PP->getHeaderSearchInfo().lookupModule(PrivateModule))
+ Sub =
+ loadModule(ImportLoc, PrivPath, Visibility, IsInclusionDirective);
+ if (Sub) {
+ MapPrivateSubModToTopLevel = true;
+ if (!getDiagnostics().isIgnored(
+ diag::warn_no_priv_submodule_use_toplevel, ImportLoc)) {
+ getDiagnostics().Report(Path[I].second,
+ diag::warn_no_priv_submodule_use_toplevel)
+ << Path[I].first << Module->getFullModuleName() << PrivateModule
+ << SourceRange(Path[0].second, Path[I].second)
+ << FixItHint::CreateReplacement(SourceRange(Path[0].second),
+ PrivateModule);
+ getDiagnostics().Report(Sub->DefinitionLoc,
+ diag::note_private_top_level_defined);
+ }
+ }
+ }
+
if (!Sub) {
// No submodule by this name. Complain, and don't look for further
// submodules.
@@ -1909,7 +1943,7 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
// Make the named module visible, if it's not already part of the module
// we are parsing.
if (ModuleName != getLangOpts().CurrentModule) {
- if (!Module->IsFromModuleFile) {
+ if (!Module->IsFromModuleFile && !MapPrivateSubModToTopLevel) {
// We have an umbrella header or directory that doesn't actually include
// all of the headers within the directory it covers. Complain about
// this missing submodule and recover by forgetting that we ever saw
diff --git a/lib/Frontend/PrecompiledPreamble.cpp b/lib/Frontend/PrecompiledPreamble.cpp
index f6964d02b237..7e1323fd83bb 100644
--- a/lib/Frontend/PrecompiledPreamble.cpp
+++ b/lib/Frontend/PrecompiledPreamble.cpp
@@ -30,7 +30,7 @@
#include "llvm/Support/Mutex.h"
#include "llvm/Support/MutexGuard.h"
#include "llvm/Support/Process.h"
-
+#include <limits>
#include <utility>
using namespace clang;
@@ -333,6 +333,7 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
std::unique_ptr<PrecompilePreambleAction> Act;
Act.reset(new PrecompilePreambleAction(
StoreInMemory ? &Storage.asMemory().Data : nullptr, Callbacks));
+ Callbacks.BeforeExecute(*Clang);
if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0]))
return BuildPreambleError::BeginSourceFileFailed;
@@ -380,6 +381,27 @@ PreambleBounds PrecompiledPreamble::getBounds() const {
return PreambleBounds(PreambleBytes.size(), PreambleEndsAtStartOfLine);
}
+std::size_t PrecompiledPreamble::getSize() const {
+ switch (Storage.getKind()) {
+ case PCHStorage::Kind::Empty:
+ assert(false && "Calling getSize() on invalid PrecompiledPreamble. "
+ "Was it std::moved?");
+ return 0;
+ case PCHStorage::Kind::InMemory:
+ return Storage.asMemory().Data.size();
+ case PCHStorage::Kind::TempFile: {
+ uint64_t Result;
+ if (llvm::sys::fs::file_size(Storage.asFile().getFilePath(), Result))
+ return 0;
+
+ assert(Result <= std::numeric_limits<std::size_t>::max() &&
+ "file size did not fit into size_t");
+ return Result;
+ }
+ }
+ llvm_unreachable("Unhandled storage kind");
+}
+
bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
const llvm::MemoryBuffer *MainFileBuffer,
PreambleBounds Bounds,
@@ -505,8 +527,8 @@ PrecompiledPreamble::TempPCHFile::createInSystemTempDir(const Twine &Prefix,
StringRef Suffix) {
llvm::SmallString<64> File;
// Using a version of createTemporaryFile with a file descriptor guarantees
- // that we would never get a race condition in a multi-threaded setting (i.e.,
- // multiple threads getting the same temporary path).
+ // that we would never get a race condition in a multi-threaded setting
+ // (i.e., multiple threads getting the same temporary path).
int FD;
auto EC = llvm::sys::fs::createTemporaryFile(Prefix, Suffix, FD, File);
if (EC)
@@ -694,6 +716,7 @@ void PrecompiledPreamble::setupPreambleStorage(
}
}
+void PreambleCallbacks::BeforeExecute(CompilerInstance &CI) {}
void PreambleCallbacks::AfterExecute(CompilerInstance &CI) {}
void PreambleCallbacks::AfterPCHEmitted(ASTWriter &Writer) {}
void PreambleCallbacks::HandleTopLevelDecl(DeclGroupRef DG) {}
diff --git a/lib/Headers/__clang_cuda_intrinsics.h b/lib/Headers/__clang_cuda_intrinsics.h
index 02d68a2e618e..1794eb3dc1d6 100644
--- a/lib/Headers/__clang_cuda_intrinsics.h
+++ b/lib/Headers/__clang_cuda_intrinsics.h
@@ -34,23 +34,24 @@
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
#pragma push_macro("__MAKE_SHUFFLES")
-#define __MAKE_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, __Mask) \
- inline __device__ int __FnName(int __val, int __offset, \
+#define __MAKE_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, __Mask, \
+ __Type) \
+ inline __device__ int __FnName(int __val, __Type __offset, \
int __width = warpSize) { \
return __IntIntrinsic(__val, __offset, \
((warpSize - __width) << 8) | (__Mask)); \
} \
- inline __device__ float __FnName(float __val, int __offset, \
+ inline __device__ float __FnName(float __val, __Type __offset, \
int __width = warpSize) { \
return __FloatIntrinsic(__val, __offset, \
((warpSize - __width) << 8) | (__Mask)); \
} \
- inline __device__ unsigned int __FnName(unsigned int __val, int __offset, \
+ inline __device__ unsigned int __FnName(unsigned int __val, __Type __offset, \
int __width = warpSize) { \
return static_cast<unsigned int>( \
::__FnName(static_cast<int>(__val), __offset, __width)); \
} \
- inline __device__ long long __FnName(long long __val, int __offset, \
+ inline __device__ long long __FnName(long long __val, __Type __offset, \
int __width = warpSize) { \
struct __Bits { \
int __a, __b; \
@@ -65,12 +66,29 @@
memcpy(&__ret, &__tmp, sizeof(__tmp)); \
return __ret; \
} \
+ inline __device__ long __FnName(long __val, __Type __offset, \
+ int __width = warpSize) { \
+ _Static_assert(sizeof(long) == sizeof(long long) || \
+ sizeof(long) == sizeof(int)); \
+ if (sizeof(long) == sizeof(long long)) { \
+ return static_cast<long>( \
+ ::__FnName(static_cast<long long>(__val), __offset, __width)); \
+ } else if (sizeof(long) == sizeof(int)) { \
+ return static_cast<long>( \
+ ::__FnName(static_cast<int>(__val), __offset, __width)); \
+ } \
+ } \
+ inline __device__ unsigned long __FnName( \
+ unsigned long __val, __Type __offset, int __width = warpSize) { \
+ return static_cast<unsigned long>( \
+ ::__FnName(static_cast<long>(__val), __offset, __width)); \
+ } \
inline __device__ unsigned long long __FnName( \
- unsigned long long __val, int __offset, int __width = warpSize) { \
+ unsigned long long __val, __Type __offset, int __width = warpSize) { \
return static_cast<unsigned long long>(::__FnName( \
static_cast<unsigned long long>(__val), __offset, __width)); \
} \
- inline __device__ double __FnName(double __val, int __offset, \
+ inline __device__ double __FnName(double __val, __Type __offset, \
int __width = warpSize) { \
long long __tmp; \
_Static_assert(sizeof(__tmp) == sizeof(__val)); \
@@ -81,13 +99,15 @@
return __ret; \
}
-__MAKE_SHUFFLES(__shfl, __nvvm_shfl_idx_i32, __nvvm_shfl_idx_f32, 0x1f);
+__MAKE_SHUFFLES(__shfl, __nvvm_shfl_idx_i32, __nvvm_shfl_idx_f32, 0x1f, int);
// We use 0 rather than 31 as our mask, because shfl.up applies to lanes >=
// maxLane.
-__MAKE_SHUFFLES(__shfl_up, __nvvm_shfl_up_i32, __nvvm_shfl_up_f32, 0);
-__MAKE_SHUFFLES(__shfl_down, __nvvm_shfl_down_i32, __nvvm_shfl_down_f32, 0x1f);
-__MAKE_SHUFFLES(__shfl_xor, __nvvm_shfl_bfly_i32, __nvvm_shfl_bfly_f32, 0x1f);
-
+__MAKE_SHUFFLES(__shfl_up, __nvvm_shfl_up_i32, __nvvm_shfl_up_f32, 0,
+ unsigned int);
+__MAKE_SHUFFLES(__shfl_down, __nvvm_shfl_down_i32, __nvvm_shfl_down_f32, 0x1f,
+ unsigned int);
+__MAKE_SHUFFLES(__shfl_xor, __nvvm_shfl_bfly_i32, __nvvm_shfl_bfly_f32, 0x1f,
+ int);
#pragma pop_macro("__MAKE_SHUFFLES")
#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
@@ -97,25 +117,26 @@ __MAKE_SHUFFLES(__shfl_xor, __nvvm_shfl_bfly_i32, __nvvm_shfl_bfly_f32, 0x1f);
// __shfl_sync_* variants available in CUDA-9
#pragma push_macro("__MAKE_SYNC_SHUFFLES")
#define __MAKE_SYNC_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, \
- __Mask) \
- inline __device__ int __FnName(unsigned int __mask, int __val, int __offset, \
- int __width = warpSize) { \
+ __Mask, __Type) \
+ inline __device__ int __FnName(unsigned int __mask, int __val, \
+ __Type __offset, int __width = warpSize) { \
return __IntIntrinsic(__mask, __val, __offset, \
((warpSize - __width) << 8) | (__Mask)); \
} \
inline __device__ float __FnName(unsigned int __mask, float __val, \
- int __offset, int __width = warpSize) { \
+ __Type __offset, int __width = warpSize) { \
return __FloatIntrinsic(__mask, __val, __offset, \
((warpSize - __width) << 8) | (__Mask)); \
} \
inline __device__ unsigned int __FnName(unsigned int __mask, \
- unsigned int __val, int __offset, \
+ unsigned int __val, __Type __offset, \
int __width = warpSize) { \
return static_cast<unsigned int>( \
::__FnName(__mask, static_cast<int>(__val), __offset, __width)); \
} \
inline __device__ long long __FnName(unsigned int __mask, long long __val, \
- int __offset, int __width = warpSize) { \
+ __Type __offset, \
+ int __width = warpSize) { \
struct __Bits { \
int __a, __b; \
}; \
@@ -130,13 +151,13 @@ __MAKE_SHUFFLES(__shfl_xor, __nvvm_shfl_bfly_i32, __nvvm_shfl_bfly_f32, 0x1f);
return __ret; \
} \
inline __device__ unsigned long long __FnName( \
- unsigned int __mask, unsigned long long __val, int __offset, \
+ unsigned int __mask, unsigned long long __val, __Type __offset, \
int __width = warpSize) { \
return static_cast<unsigned long long>(::__FnName( \
__mask, static_cast<unsigned long long>(__val), __offset, __width)); \
} \
inline __device__ long __FnName(unsigned int __mask, long __val, \
- int __offset, int __width = warpSize) { \
+ __Type __offset, int __width = warpSize) { \
_Static_assert(sizeof(long) == sizeof(long long) || \
sizeof(long) == sizeof(int)); \
if (sizeof(long) == sizeof(long long)) { \
@@ -147,14 +168,14 @@ __MAKE_SHUFFLES(__shfl_xor, __nvvm_shfl_bfly_i32, __nvvm_shfl_bfly_f32, 0x1f);
::__FnName(__mask, static_cast<int>(__val), __offset, __width)); \
} \
} \
- inline __device__ unsigned long __FnName(unsigned int __mask, \
- unsigned long __val, int __offset, \
- int __width = warpSize) { \
+ inline __device__ unsigned long __FnName( \
+ unsigned int __mask, unsigned long __val, __Type __offset, \
+ int __width = warpSize) { \
return static_cast<unsigned long>( \
::__FnName(__mask, static_cast<long>(__val), __offset, __width)); \
} \
inline __device__ double __FnName(unsigned int __mask, double __val, \
- int __offset, int __width = warpSize) { \
+ __Type __offset, int __width = warpSize) { \
long long __tmp; \
_Static_assert(sizeof(__tmp) == sizeof(__val)); \
memcpy(&__tmp, &__val, sizeof(__val)); \
@@ -164,15 +185,15 @@ __MAKE_SHUFFLES(__shfl_xor, __nvvm_shfl_bfly_i32, __nvvm_shfl_bfly_f32, 0x1f);
return __ret; \
}
__MAKE_SYNC_SHUFFLES(__shfl_sync, __nvvm_shfl_sync_idx_i32,
- __nvvm_shfl_sync_idx_f32, 0x1f);
+ __nvvm_shfl_sync_idx_f32, 0x1f, int);
// We use 0 rather than 31 as our mask, because shfl.up applies to lanes >=
// maxLane.
__MAKE_SYNC_SHUFFLES(__shfl_up_sync, __nvvm_shfl_sync_up_i32,
- __nvvm_shfl_sync_up_f32, 0);
+ __nvvm_shfl_sync_up_f32, 0, unsigned int);
__MAKE_SYNC_SHUFFLES(__shfl_down_sync, __nvvm_shfl_sync_down_i32,
- __nvvm_shfl_sync_down_f32, 0x1f);
+ __nvvm_shfl_sync_down_f32, 0x1f, unsigned int);
__MAKE_SYNC_SHUFFLES(__shfl_xor_sync, __nvvm_shfl_sync_bfly_i32,
- __nvvm_shfl_sync_bfly_f32, 0x1f);
+ __nvvm_shfl_sync_bfly_f32, 0x1f, int);
#pragma pop_macro("__MAKE_SYNC_SHUFFLES")
inline __device__ void __syncwarp(unsigned int mask = 0xffffffff) {
diff --git a/lib/Headers/cpuid.h b/lib/Headers/cpuid.h
index 2dd0add236b8..3ae90de0b91f 100644
--- a/lib/Headers/cpuid.h
+++ b/lib/Headers/cpuid.h
@@ -173,16 +173,24 @@
#define bit_AVX512VL 0x80000000
/* Features in %ecx for leaf 7 sub-leaf 0 */
-#define bit_PREFTCHWT1 0x00000001
-#define bit_AVX512VBMI 0x00000002
-#define bit_PKU 0x00000004
-#define bit_OSPKE 0x00000010
+#define bit_PREFTCHWT1 0x00000001
+#define bit_AVX512VBMI 0x00000002
+#define bit_PKU 0x00000004
+#define bit_OSPKE 0x00000010
+#define bit_AVX512VBMI2 0x00000040
+#define bit_SHSTK 0x00000080
+#define bit_GFNI 0x00000100
+#define bit_VAES 0x00000200
+#define bit_VPCLMULQDQ 0x00000400
+#define bit_AVX512VNNI 0x00000800
+#define bit_AVX512BITALG 0x00001000
#define bit_AVX512VPOPCNTDQ 0x00004000
-#define bit_RDPID 0x00400000
+#define bit_RDPID 0x00400000
/* Features in %edx for leaf 7 sub-leaf 0 */
#define bit_AVX5124VNNIW 0x00000004
#define bit_AVX5124FMAPS 0x00000008
+#define bit_IBT 0x00100000
/* Features in %eax for leaf 13 sub-leaf 1 */
#define bit_XSAVEOPT 0x00000001
@@ -192,6 +200,7 @@
/* Features in %ecx for leaf 0x80000001 */
#define bit_LAHF_LM 0x00000001
#define bit_ABM 0x00000020
+#define bit_LZCNT bit_ABM /* for gcc compat */
#define bit_SSE4a 0x00000040
#define bit_PRFCHW 0x00000100
#define bit_XOP 0x00000800
diff --git a/lib/Headers/xmmintrin.h b/lib/Headers/xmmintrin.h
index bbc2117b4ea1..279c0275d93f 100644
--- a/lib/Headers/xmmintrin.h
+++ b/lib/Headers/xmmintrin.h
@@ -2035,9 +2035,11 @@ _mm_storer_ps(float *__p, __m128 __a)
_mm_store_ps(__p, __a);
}
-#define _MM_HINT_T0 3
-#define _MM_HINT_T1 2
-#define _MM_HINT_T2 1
+#define _MM_HINT_ET0 7
+#define _MM_HINT_ET1 6
+#define _MM_HINT_T0 3
+#define _MM_HINT_T1 2
+#define _MM_HINT_T2 1
#define _MM_HINT_NTA 0
#ifndef _MSC_VER
@@ -2068,7 +2070,8 @@ _mm_storer_ps(float *__p, __m128 __a)
/// be generated. \n
/// _MM_HINT_T2: Move data using the T2 hint. The PREFETCHT2 instruction will
/// be generated.
-#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, (sel)))
+#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), \
+ ((sel) >> 2) & 1, (sel) & 0x3))
#endif
/// \brief Stores a 64-bit integer in the specified aligned memory location. To
diff --git a/lib/Index/IndexSymbol.cpp b/lib/Index/IndexSymbol.cpp
index 03db0cd53f7a..733d4dbc2f94 100644
--- a/lib/Index/IndexSymbol.cpp
+++ b/lib/Index/IndexSymbol.cpp
@@ -42,10 +42,10 @@ static bool isUnitTest(const ObjCMethodDecl *D) {
static void checkForIBOutlets(const Decl *D, SymbolPropertySet &PropSet) {
if (D->hasAttr<IBOutletAttr>()) {
- PropSet |= (unsigned)SymbolProperty::IBAnnotated;
+ PropSet |= (SymbolPropertySet)SymbolProperty::IBAnnotated;
} else if (D->hasAttr<IBOutletCollectionAttr>()) {
- PropSet |= (unsigned)SymbolProperty::IBAnnotated;
- PropSet |= (unsigned)SymbolProperty::IBOutletCollection;
+ PropSet |= (SymbolPropertySet)SymbolProperty::IBAnnotated;
+ PropSet |= (SymbolPropertySet)SymbolProperty::IBOutletCollection;
}
}
@@ -93,7 +93,7 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
Info.Lang = SymbolLanguage::C;
if (isFunctionLocalSymbol(D)) {
- Info.Properties |= (unsigned)SymbolProperty::Local;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::Local;
}
if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
@@ -118,17 +118,19 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
if (!CXXRec->isCLike()) {
Info.Lang = SymbolLanguage::CXX;
if (CXXRec->getDescribedClassTemplate()) {
- Info.Properties |= (unsigned)SymbolProperty::Generic;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::Generic;
}
}
}
if (isa<ClassTemplatePartialSpecializationDecl>(D)) {
- Info.Properties |= (unsigned)SymbolProperty::Generic;
- Info.Properties |= (unsigned)SymbolProperty::TemplatePartialSpecialization;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::Generic;
+ Info.Properties |=
+ (SymbolPropertySet)SymbolProperty::TemplatePartialSpecialization;
} else if (isa<ClassTemplateSpecializationDecl>(D)) {
- Info.Properties |= (unsigned)SymbolProperty::Generic;
- Info.Properties |= (unsigned)SymbolProperty::TemplateSpecialization;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::Generic;
+ Info.Properties |=
+ (SymbolPropertySet)SymbolProperty::TemplateSpecialization;
}
} else if (auto *VD = dyn_cast<VarDecl>(D)) {
@@ -142,15 +144,17 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
if (isa<VarTemplatePartialSpecializationDecl>(D)) {
Info.Lang = SymbolLanguage::CXX;
- Info.Properties |= (unsigned)SymbolProperty::Generic;
- Info.Properties |= (unsigned)SymbolProperty::TemplatePartialSpecialization;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::Generic;
+ Info.Properties |=
+ (SymbolPropertySet)SymbolProperty::TemplatePartialSpecialization;
} else if (isa<VarTemplateSpecializationDecl>(D)) {
Info.Lang = SymbolLanguage::CXX;
- Info.Properties |= (unsigned)SymbolProperty::Generic;
- Info.Properties |= (unsigned)SymbolProperty::TemplateSpecialization;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::Generic;
+ Info.Properties |=
+ (SymbolPropertySet)SymbolProperty::TemplateSpecialization;
} else if (VD->getDescribedVarTemplate()) {
Info.Lang = SymbolLanguage::CXX;
- Info.Properties |= (unsigned)SymbolProperty::Generic;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::Generic;
}
} else {
@@ -181,7 +185,7 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
if (!ClsD)
ClsD = cast<ObjCImplementationDecl>(D)->getClassInterface();
if (isUnitTestCase(ClsD))
- Info.Properties |= (unsigned)SymbolProperty::UnitTest;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::UnitTest;
break;
}
case Decl::ObjCProtocol:
@@ -198,7 +202,7 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
else
ClsD = cast<ObjCCategoryImplDecl>(D)->getClassInterface();
if (isUnitTestCase(ClsD))
- Info.Properties |= (unsigned)SymbolProperty::UnitTest;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::UnitTest;
break;
}
case Decl::ObjCMethod: {
@@ -212,9 +216,9 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
}
Info.Lang = SymbolLanguage::ObjC;
if (isUnitTest(MD))
- Info.Properties |= (unsigned)SymbolProperty::UnitTest;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::UnitTest;
if (D->hasAttr<IBActionAttr>())
- Info.Properties |= (unsigned)SymbolProperty::IBAnnotated;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::IBAnnotated;
break;
}
case Decl::ObjCProperty:
@@ -223,7 +227,7 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
checkForIBOutlets(D, Info.Properties);
if (auto *Annot = D->getAttr<AnnotateAttr>()) {
if (Annot->getAnnotation() == "gk_inspectable")
- Info.Properties |= (unsigned)SymbolProperty::GKInspectable;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::GKInspectable;
}
break;
case Decl::ObjCIvar:
@@ -268,12 +272,12 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
}
case Decl::ClassTemplate:
Info.Kind = SymbolKind::Class;
- Info.Properties |= (unsigned)SymbolProperty::Generic;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::Generic;
Info.Lang = SymbolLanguage::CXX;
break;
case Decl::FunctionTemplate:
Info.Kind = SymbolKind::Function;
- Info.Properties |= (unsigned)SymbolProperty::Generic;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::Generic;
Info.Lang = SymbolLanguage::CXX;
if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(
cast<FunctionTemplateDecl>(D)->getTemplatedDecl())) {
@@ -294,7 +298,7 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
case Decl::TypeAliasTemplate:
Info.Kind = SymbolKind::TypeAlias;
Info.Lang = SymbolLanguage::CXX;
- Info.Properties |= (unsigned)SymbolProperty::Generic;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::Generic;
break;
case Decl::TypeAlias:
Info.Kind = SymbolKind::TypeAlias;
@@ -304,13 +308,13 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
Info.Kind = SymbolKind::Using;
Info.SubKind = SymbolSubKind::UsingTypename;
Info.Lang = SymbolLanguage::CXX;
- Info.Properties |= (unsigned)SymbolProperty::Generic;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::Generic;
break;
case Decl::UnresolvedUsingValue:
Info.Kind = SymbolKind::Using;
Info.SubKind = SymbolSubKind::UsingValue;
Info.Lang = SymbolLanguage::CXX;
- Info.Properties |= (unsigned)SymbolProperty::Generic;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::Generic;
break;
case Decl::Binding:
Info.Kind = SymbolKind::Variable;
@@ -327,12 +331,13 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->getTemplatedKind() ==
FunctionDecl::TK_FunctionTemplateSpecialization) {
- Info.Properties |= (unsigned)SymbolProperty::Generic;
- Info.Properties |= (unsigned)SymbolProperty::TemplateSpecialization;
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::Generic;
+ Info.Properties |=
+ (SymbolPropertySet)SymbolProperty::TemplateSpecialization;
}
}
- if (Info.Properties & (unsigned)SymbolProperty::Generic)
+ if (Info.Properties & (SymbolPropertySet)SymbolProperty::Generic)
Info.Lang = SymbolLanguage::CXX;
if (auto *attr = D->getExternalSourceSymbolAttr()) {
@@ -490,9 +495,9 @@ StringRef index::getSymbolLanguageString(SymbolLanguage K) {
void index::applyForEachSymbolProperty(SymbolPropertySet Props,
llvm::function_ref<void(SymbolProperty)> Fn) {
-#define APPLY_FOR_PROPERTY(K) \
- if (Props & (unsigned)SymbolProperty::K) \
- Fn(SymbolProperty::K)
+#define APPLY_FOR_PROPERTY(K) \
+ if (Props & (SymbolPropertySet)SymbolProperty::K) \
+ Fn(SymbolProperty::K)
APPLY_FOR_PROPERTY(Generic);
APPLY_FOR_PROPERTY(TemplatePartialSpecialization);
diff --git a/lib/Lex/HeaderSearch.cpp b/lib/Lex/HeaderSearch.cpp
index aa2588659ddf..6976294a2eaf 100644
--- a/lib/Lex/HeaderSearch.cpp
+++ b/lib/Lex/HeaderSearch.cpp
@@ -209,11 +209,14 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, bool AllowSearch) {
// The facility for "private modules" -- adjacent, optional module maps named
// module.private.modulemap that are supposed to define private submodules --
- // is sometimes misused by frameworks that name their associated private
- // module FooPrivate, rather than as a submodule named Foo.Private as
- // intended. Here we compensate for such cases by looking in directories named
- // Foo.framework, when we previously looked and failed to find a
- // FooPrivate.framework.
+ // may have different flavors of names: FooPrivate, Foo_Private and Foo.Private.
+ //
+ // Foo.Private is now depracated in favor of Foo_Private. Users of FooPrivate
+ // should also rename to Foo_Private. Representing private as submodules
+ // could force building unwanted dependencies into the parent module and cause
+ // dependency cycles.
+ if (!Module && SearchName.consume_back("_Private"))
+ Module = lookupModule(ModuleName, SearchName);
if (!Module && SearchName.consume_back("Private"))
Module = lookupModule(ModuleName, SearchName);
return Module;
diff --git a/lib/Lex/ModuleMap.cpp b/lib/Lex/ModuleMap.cpp
index fbbae7a09520..b3ac10c5c5ae 100644
--- a/lib/Lex/ModuleMap.cpp
+++ b/lib/Lex/ModuleMap.cpp
@@ -1608,6 +1608,54 @@ namespace {
} // namespace
+/// Private modules are canonicalized as Foo_Private. Clang provides extra
+/// module map search logic to find the appropriate private module when PCH
+/// is used with implicit module maps. Warn when private modules are written
+/// in other ways (FooPrivate and Foo.Private), providing notes and fixits.
+static void diagnosePrivateModules(const ModuleMap &Map,
+ DiagnosticsEngine &Diags,
+ const Module *ActiveModule) {
+
+ auto GenNoteAndFixIt = [&](StringRef BadName, StringRef Canonical,
+ const Module *M) {
+ auto D = Diags.Report(ActiveModule->DefinitionLoc,
+ diag::note_mmap_rename_top_level_private_module);
+ D << BadName << M->Name;
+ D << FixItHint::CreateReplacement(ActiveModule->DefinitionLoc, Canonical);
+ };
+
+ for (auto E = Map.module_begin(); E != Map.module_end(); ++E) {
+ auto const *M = E->getValue();
+ if (M->Directory != ActiveModule->Directory)
+ continue;
+
+ SmallString<128> FullName(ActiveModule->getFullModuleName());
+ if (!FullName.startswith(M->Name) && !FullName.endswith("Private"))
+ continue;
+ SmallString<128> Canonical(M->Name);
+ Canonical.append("_Private");
+
+ // Foo.Private -> Foo_Private
+ if (ActiveModule->Parent && ActiveModule->Name == "Private" && !M->Parent &&
+ M->Name == ActiveModule->Parent->Name) {
+ Diags.Report(ActiveModule->DefinitionLoc,
+ diag::warn_mmap_mismatched_private_submodule)
+ << FullName;
+ GenNoteAndFixIt(FullName, Canonical, M);
+ continue;
+ }
+
+ // FooPrivate and whatnots -> Foo_Private
+ if (!ActiveModule->Parent && !M->Parent && M->Name != ActiveModule->Name &&
+ ActiveModule->Name != Canonical) {
+ Diags.Report(ActiveModule->DefinitionLoc,
+ diag::warn_mmap_mismatched_private_module_name)
+ << ActiveModule->Name;
+ GenNoteAndFixIt(ActiveModule->Name, Canonical, M);
+ }
+ }
+}
+
/// \brief Parse a module declaration.
///
/// module-declaration:
@@ -1791,41 +1839,21 @@ void ModuleMapParser::parseModuleDecl() {
ActiveModule->NoUndeclaredIncludes = true;
ActiveModule->Directory = Directory;
- if (!ActiveModule->Parent) {
- StringRef MapFileName(ModuleMapFile->getName());
- if (MapFileName.endswith("module.private.modulemap") ||
- MapFileName.endswith("module_private.map")) {
- // Adding a top-level module from a private modulemap is likely a
- // user error; we check to see if there's another top-level module
- // defined in the non-private map in the same dir, and if so emit a
- // warning.
- for (auto E = Map.module_begin(); E != Map.module_end(); ++E) {
- auto const *M = E->getValue();
- if (!M->Parent &&
- M->Directory == ActiveModule->Directory &&
- M->Name != ActiveModule->Name) {
- Diags.Report(ActiveModule->DefinitionLoc,
- diag::warn_mmap_mismatched_top_level_private)
- << ActiveModule->Name << M->Name;
- // The pattern we're defending against here is typically due to
- // a module named FooPrivate which is supposed to be a submodule
- // called Foo.Private. Emit a fixit in that case.
- auto D =
- Diags.Report(ActiveModule->DefinitionLoc,
- diag::note_mmap_rename_top_level_private_as_submodule);
- D << ActiveModule->Name << M->Name;
- StringRef Bad(ActiveModule->Name);
- if (Bad.consume_back("Private")) {
- SmallString<128> Fixed = Bad;
- Fixed.append(".Private");
- D << FixItHint::CreateReplacement(ActiveModule->DefinitionLoc,
- Fixed);
- }
- break;
- }
- }
- }
- }
+
+ // Private modules named as FooPrivate, Foo.Private or similar are likely a
+ // user error; provide warnings, notes and fixits to direct users to use
+ // Foo_Private instead.
+ SourceLocation StartLoc =
+ SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
+ StringRef MapFileName(ModuleMapFile->getName());
+ if (Map.HeaderInfo.getHeaderSearchOpts().ImplicitModuleMaps &&
+ !Diags.isIgnored(diag::warn_mmap_mismatched_private_submodule,
+ StartLoc) &&
+ !Diags.isIgnored(diag::warn_mmap_mismatched_private_module_name,
+ StartLoc) &&
+ (MapFileName.endswith("module.private.modulemap") ||
+ MapFileName.endswith("module_private.map")))
+ diagnosePrivateModules(Map, Diags, ActiveModule);
bool Done = false;
do {
diff --git a/lib/Lex/Preprocessor.cpp b/lib/Lex/Preprocessor.cpp
index c291a4b99d10..7d789e780113 100644
--- a/lib/Lex/Preprocessor.cpp
+++ b/lib/Lex/Preprocessor.cpp
@@ -420,10 +420,9 @@ bool Preprocessor::SetCodeCompletionPoint(const FileEntry *File,
CodeCompletionFile = File;
CodeCompletionOffset = Position - Buffer->getBufferStart();
- std::unique_ptr<MemoryBuffer> NewBuffer =
- MemoryBuffer::getNewUninitMemBuffer(Buffer->getBufferSize() + 1,
- Buffer->getBufferIdentifier());
- char *NewBuf = const_cast<char*>(NewBuffer->getBufferStart());
+ auto NewBuffer = llvm::WritableMemoryBuffer::getNewUninitMemBuffer(
+ Buffer->getBufferSize() + 1, Buffer->getBufferIdentifier());
+ char *NewBuf = NewBuffer->getBufferStart();
char *NewPos = std::copy(Buffer->getBufferStart(), Position, NewBuf);
*NewPos = '\0';
std::copy(Position, Buffer->getBufferEnd(), NewPos+1);
diff --git a/lib/Parse/ParseTemplate.cpp b/lib/Parse/ParseTemplate.cpp
index 6df9df804fd7..56a16b9e0271 100644
--- a/lib/Parse/ParseTemplate.cpp
+++ b/lib/Parse/ParseTemplate.cpp
@@ -372,10 +372,10 @@ bool
Parser::ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams) {
while (1) {
- // FIXME: ParseTemplateParameter should probably just return a NamedDecl.
- if (Decl *TmpParam
+
+ if (NamedDecl *TmpParam
= ParseTemplateParameter(Depth, TemplateParams.size())) {
- TemplateParams.push_back(dyn_cast<NamedDecl>(TmpParam));
+ TemplateParams.push_back(TmpParam);
} else {
// If we failed to parse a template parameter, skip until we find
// a comma or closing brace.
@@ -480,7 +480,7 @@ bool Parser::isStartOfTemplateTypeParameter() {
/// 'class' ...[opt] identifier[opt]
/// 'template' '<' template-parameter-list '>' 'class' identifier[opt]
/// = id-expression
-Decl *Parser::ParseTemplateParameter(unsigned Depth, unsigned Position) {
+NamedDecl *Parser::ParseTemplateParameter(unsigned Depth, unsigned Position) {
if (isStartOfTemplateTypeParameter())
return ParseTypeParameter(Depth, Position);
@@ -502,7 +502,7 @@ Decl *Parser::ParseTemplateParameter(unsigned Depth, unsigned Position) {
/// 'class' identifier[opt] '=' type-id
/// 'typename' ...[opt][C++0x] identifier[opt]
/// 'typename' identifier[opt] '=' type-id
-Decl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
+NamedDecl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
assert(Tok.isOneOf(tok::kw_class, tok::kw_typename) &&
"A type-parameter starts with 'class' or 'typename'");
@@ -564,7 +564,7 @@ Decl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
/// type-parameter-key:
/// 'class'
/// 'typename' [C++1z]
-Decl *
+NamedDecl *
Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
assert(Tok.is(tok::kw_template) && "Expected 'template' keyword");
@@ -669,7 +669,7 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
/// template-parameter:
/// ...
/// parameter-declaration
-Decl *
+NamedDecl *
Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
// Parse the declaration-specifiers (i.e., the type).
// FIXME: The type should probably be restricted in some way... Not all
diff --git a/lib/Rewrite/HTMLRewrite.cpp b/lib/Rewrite/HTMLRewrite.cpp
index 23d1895e31b4..618c0179f100 100644
--- a/lib/Rewrite/HTMLRewrite.cpp
+++ b/lib/Rewrite/HTMLRewrite.cpp
@@ -342,6 +342,7 @@ void html::AddHeaderFooterInternalBuiltinCSS(Rewriter &R, FileID FID,
" .CodeInsertionHint { font-weight: bold; background-color: #10dd10 }\n"
" .CodeRemovalHint { background-color:#de1010 }\n"
" .CodeRemovalHint { border-bottom:1px solid #6F9DBE }\n"
+ " .selected{ background-color:orange !important; }\n"
" table.simpletable {\n"
" padding: 5px;\n"
" font-size:12pt;\n"
diff --git a/lib/Sema/CodeCompleteConsumer.cpp b/lib/Sema/CodeCompleteConsumer.cpp
index 542b65327b7d..3431ddcf70a2 100644
--- a/lib/Sema/CodeCompleteConsumer.cpp
+++ b/lib/Sema/CodeCompleteConsumer.cpp
@@ -562,7 +562,7 @@ void CodeCompletionResult::computeCursorKindAndAvailability(bool Accessible) {
// Do nothing: Patterns can come with cursor kinds!
break;
}
- // Fall through
+ LLVM_FALLTHROUGH;
case RK_Declaration: {
// Set the availability based on attributes.
diff --git a/lib/Sema/JumpDiagnostics.cpp b/lib/Sema/JumpDiagnostics.cpp
index 865aea9e2284..64fa2c34b238 100644
--- a/lib/Sema/JumpDiagnostics.cpp
+++ b/lib/Sema/JumpDiagnostics.cpp
@@ -323,7 +323,7 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
BuildScopeInformation(Var, ParentScope);
++StmtsToSkip;
}
- // Fall through
+ LLVM_FALLTHROUGH;
case Stmt::GotoStmtClass:
// Remember both what scope a goto is in as well as the fact that we have
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
index 94070bb9c9aa..803f87b3c568 100644
--- a/lib/Sema/SemaChecking.cpp
+++ b/lib/Sema/SemaChecking.cpp
@@ -2278,7 +2278,7 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
default:
return false;
case X86::BI_mm_prefetch:
- i = 1; l = 0; u = 3;
+ i = 1; l = 0; u = 7;
break;
case X86::BI__builtin_ia32_sha1rnds4:
case X86::BI__builtin_ia32_shuf_f32x4_256_mask:
@@ -3020,7 +3020,7 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__atomic_sub_fetch:
IsAddSub = true;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__c11_atomic_fetch_xor:
@@ -8487,7 +8487,7 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) {
return IntRange(R.Width, /*NonNegative*/ true);
}
}
- // fallthrough
+ LLVM_FALLTHROUGH;
case BO_ShlAssign:
return IntRange::forValueOfType(C, GetExprType(E));
diff --git a/lib/Sema/SemaCodeComplete.cpp b/lib/Sema/SemaCodeComplete.cpp
index 834e149d1af4..9aed178763dc 100644
--- a/lib/Sema/SemaCodeComplete.cpp
+++ b/lib/Sema/SemaCodeComplete.cpp
@@ -1420,7 +1420,7 @@ static void AddFunctionSpecifiers(Sema::ParserCompletionContext CCC,
Results.AddResult(Result("mutable"));
Results.AddResult(Result("virtual"));
}
- // Fall through
+ LLVM_FALLTHROUGH;
case Sema::PCC_ObjCInterface:
case Sema::PCC_ObjCImplementation:
@@ -1638,7 +1638,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
AddObjCTopLevelResults(Results, true);
AddTypedefResult(Results);
- // Fall through
+ LLVM_FALLTHROUGH;
case Sema::PCC_Class:
if (SemaRef.getLangOpts().CPlusPlus) {
@@ -1688,7 +1688,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Results.AddResult(Result(Builder.TakeString()));
}
}
- // Fall through
+ LLVM_FALLTHROUGH;
case Sema::PCC_Template:
case Sema::PCC_MemberTemplate:
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
index ec5ca6973568..a1fc725f8df4 100644
--- a/lib/Sema/SemaDecl.cpp
+++ b/lib/Sema/SemaDecl.cpp
@@ -10671,7 +10671,7 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
// that has an in-class initializer, so we type-check this like
// a declaration.
//
- // Fall through
+ LLVM_FALLTHROUGH;
case VarDecl::DeclarationOnly:
// It's only a declaration.
@@ -12179,9 +12179,11 @@ bool Sema::canSkipFunctionBody(Decl *D) {
}
Decl *Sema::ActOnSkippedFunctionBody(Decl *Decl) {
- if (FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Decl))
+ if (!Decl)
+ return nullptr;
+ if (FunctionDecl *FD = Decl->getAsFunction())
FD->setHasSkippedBody();
- else if (ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(Decl))
+ else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(Decl))
MD->setHasSkippedBody();
return Decl;
}
diff --git a/lib/Sema/SemaDeclAttr.cpp b/lib/Sema/SemaDeclAttr.cpp
index 676d00357c96..21fe46ad9dd1 100644
--- a/lib/Sema/SemaDeclAttr.cpp
+++ b/lib/Sema/SemaDeclAttr.cpp
@@ -1844,12 +1844,6 @@ static void handleIFuncAttr(Sema &S, Decl *D, const AttributeList &Attr) {
S.Diag(Attr.getLoc(), diag::err_alias_is_definition) << FD << 1;
return;
}
- // FIXME: it should be handled as a target specific attribute.
- if (S.Context.getTargetInfo().getTriple().getObjectFormat() !=
- llvm::Triple::ELF) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
- return;
- }
D->addAttr(::new (S.Context) IFuncAttr(Attr.getRange(), S.Context, Str,
Attr.getAttributeSpellingListIndex()));
diff --git a/lib/Sema/SemaDeclCXX.cpp b/lib/Sema/SemaDeclCXX.cpp
index 96472a0a70fe..f2fb95c39163 100644
--- a/lib/Sema/SemaDeclCXX.cpp
+++ b/lib/Sema/SemaDeclCXX.cpp
@@ -8157,7 +8157,7 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
PastFunctionChunk = true;
break;
}
- // Fall through.
+ LLVM_FALLTHROUGH;
case DeclaratorChunk::Array:
NeedsTypedef = true;
extendRight(After, Chunk.getSourceRange());
@@ -12352,7 +12352,7 @@ static bool hasOneRealArgument(MultiExprArg Args) {
if (!Args[1]->isDefaultArgument())
return false;
- // fall through
+ LLVM_FALLTHROUGH;
case 1:
return !Args[0]->isDefaultArgument();
}
diff --git a/lib/Sema/SemaExpr.cpp b/lib/Sema/SemaExpr.cpp
index 929806ac6bfa..4746355e9800 100644
--- a/lib/Sema/SemaExpr.cpp
+++ b/lib/Sema/SemaExpr.cpp
@@ -817,7 +817,7 @@ void Sema::checkVariadicArgument(const Expr *E, VariadicCallType CT) {
E->getLocStart(), nullptr,
PDiag(diag::warn_cxx98_compat_pass_non_pod_arg_to_vararg)
<< Ty << CT);
- // Fall through.
+ LLVM_FALLTHROUGH;
case VAK_Valid:
if (Ty->isRecordType()) {
// This is unlikely to be what the user intended. If the class has a
@@ -2881,7 +2881,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
valueKind = VK_RValue;
break;
}
- // fallthrough
+ LLVM_FALLTHROUGH;
case Decl::ImplicitParam:
case Decl::ParmVar: {
@@ -2978,7 +2978,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
valueKind = VK_LValue;
break;
}
- // fallthrough
+ LLVM_FALLTHROUGH;
case Decl::CXXConversion:
case Decl::CXXDestructor:
diff --git a/lib/Sema/SemaExprMember.cpp b/lib/Sema/SemaExprMember.cpp
index 03ddcc0a3eca..dd516ea3b428 100644
--- a/lib/Sema/SemaExprMember.cpp
+++ b/lib/Sema/SemaExprMember.cpp
@@ -251,7 +251,7 @@ Sema::BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
case IMA_Field_Uneval_Context:
Diag(R.getNameLoc(), diag::warn_cxx98_compat_non_static_member_use)
<< R.getLookupNameInfo().getName();
- // Fall through.
+ LLVM_FALLTHROUGH;
case IMA_Static:
case IMA_Abstract:
case IMA_Mixed_StaticContext:
diff --git a/lib/Sema/SemaExprObjC.cpp b/lib/Sema/SemaExprObjC.cpp
index 6ed5047c35da..cd0c2c47ae4c 100644
--- a/lib/Sema/SemaExprObjC.cpp
+++ b/lib/Sema/SemaExprObjC.cpp
@@ -2981,6 +2981,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
case OMF_init:
if (Method)
checkInitMethod(Method, ReceiverType);
+ break;
case OMF_None:
case OMF_alloc:
diff --git a/lib/Sema/SemaInit.cpp b/lib/Sema/SemaInit.cpp
index e4789cdf53bf..011051da58e5 100644
--- a/lib/Sema/SemaInit.cpp
+++ b/lib/Sema/SemaInit.cpp
@@ -6212,7 +6212,7 @@ static const InitializedEntity *getEntityForTemporaryLifetimeExtension(
if (Entity->getParent())
return getEntityForTemporaryLifetimeExtension(Entity->getParent(),
Entity);
- // Fall through.
+ LLVM_FALLTHROUGH;
case InitializedEntity::EK_Delegating:
// We can reach this case for aggregate initialization in a constructor:
// struct A { int &&r; };
@@ -7656,7 +7656,7 @@ bool InitializationSequence::Diagnose(Sema &S,
<< Args[0]->getSourceRange();
break;
}
- // Intentional fallthrough
+ LLVM_FALLTHROUGH;
case FK_NonConstLValueReferenceBindingToUnrelated:
S.Diag(Kind.getLocation(),
diff --git a/lib/Sema/SemaOpenMP.cpp b/lib/Sema/SemaOpenMP.cpp
index b34bb3388d71..0880b2d79060 100644
--- a/lib/Sema/SemaOpenMP.cpp
+++ b/lib/Sema/SemaOpenMP.cpp
@@ -1290,9 +1290,14 @@ bool Sema::IsOpenMPCapturedByRef(ValueDecl *D, unsigned Level) {
}
if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
- IsByRef = !DSAStack->hasExplicitDSA(
- D, [](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
- Level, /*NotLastprivate=*/true);
+ IsByRef =
+ !DSAStack->hasExplicitDSA(
+ D,
+ [](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
+ Level, /*NotLastprivate=*/true) &&
+ // If the variable is artificial and must be captured by value - try to
+ // capture by value.
+ !(isa<OMPCapturedExprDecl>(D) && D->hasAttr<OMPCaptureKindAttr>());
}
// When passing data by copy, we need to make sure it fits the uintptr size
@@ -2321,10 +2326,11 @@ static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
ASTContext &C = S.getASTContext();
Expr *Init = AsExpression ? CaptureExpr : CaptureExpr->IgnoreImpCasts();
QualType Ty = Init->getType();
+ Attr *OMPCaptureKind = nullptr;
if (CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue()) {
- if (S.getLangOpts().CPlusPlus)
+ if (S.getLangOpts().CPlusPlus) {
Ty = C.getLValueReferenceType(Ty);
- else {
+ } else {
Ty = C.getPointerType(Ty);
ExprResult Res =
S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_AddrOf, Init);
@@ -2333,11 +2339,16 @@ static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
Init = Res.get();
}
WithInit = true;
+ } else if (AsExpression) {
+ // This variable must be captured by value.
+ OMPCaptureKind = OMPCaptureKindAttr::CreateImplicit(C, OMPC_unknown);
}
auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
CaptureExpr->getLocStart());
if (!WithInit)
CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C, SourceRange()));
+ if (OMPCaptureKind)
+ CED->addAttr(OMPCaptureKind);
S.CurContext->addHiddenDecl(CED);
S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
return CED;
@@ -2346,31 +2357,34 @@ static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
bool WithInit) {
OMPCapturedExprDecl *CD;
- if (auto *VD = S.IsOpenMPCapturedDecl(D))
+ if (auto *VD = S.IsOpenMPCapturedDecl(D)) {
CD = cast<OMPCapturedExprDecl>(VD);
- else
+ } else {
CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
/*AsExpression=*/false);
+ }
return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
CaptureExpr->getExprLoc());
}
static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref) {
+ CaptureExpr = S.DefaultLvalueConversion(CaptureExpr).get();
if (!Ref) {
- auto *CD =
- buildCaptureDecl(S, &S.getASTContext().Idents.get(".capture_expr."),
- CaptureExpr, /*WithInit=*/true, /*AsExpression=*/true);
+ OMPCapturedExprDecl *CD = buildCaptureDecl(
+ S, &S.getASTContext().Idents.get(".capture_expr."), CaptureExpr,
+ /*WithInit=*/true, /*AsExpression=*/true);
Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
CaptureExpr->getExprLoc());
}
ExprResult Res = Ref;
if (!S.getLangOpts().CPlusPlus &&
CaptureExpr->getObjectKind() == OK_Ordinary && CaptureExpr->isGLValue() &&
- Ref->getType()->isPointerType())
+ Ref->getType()->isPointerType()) {
Res = S.CreateBuiltinUnaryOp(CaptureExpr->getExprLoc(), UO_Deref, Ref);
- if (!Res.isUsable())
- return ExprError();
- return CaptureExpr->isGLValue() ? Res : S.DefaultLvalueConversion(Res.get());
+ if (!Res.isUsable())
+ return ExprError();
+ }
+ return S.DefaultLvalueConversion(Res.get());
}
namespace {
@@ -8117,12 +8131,13 @@ OMPClause *Sema::ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
if (Val.isInvalid())
return nullptr;
- ValExpr = MakeFullExpr(Val.get()).get();
+ ValExpr = Val.get();
OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
CaptureRegion =
getOpenMPCaptureRegionForClause(DKind, OMPC_if, NameModifier);
if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
+ ValExpr = MakeFullExpr(ValExpr).get();
llvm::MapVector<Expr *, DeclRefExpr *> Captures;
ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
HelperValStmt = buildPreInits(Context, Captures);
@@ -8239,6 +8254,7 @@ OMPClause *Sema::ActOnOpenMPNumThreadsClause(Expr *NumThreads,
OpenMPDirectiveKind CaptureRegion =
getOpenMPCaptureRegionForClause(DKind, OMPC_num_threads);
if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
+ ValExpr = MakeFullExpr(ValExpr).get();
llvm::MapVector<Expr *, DeclRefExpr *> Captures;
ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
HelperValStmt = buildPreInits(Context, Captures);
@@ -8666,6 +8682,7 @@ OMPClause *Sema::ActOnOpenMPScheduleClause(
DSAStack->getCurrentDirective(), OMPC_schedule) !=
OMPD_unknown &&
!CurContext->isDependentContext()) {
+ ValExpr = MakeFullExpr(ValExpr).get();
llvm::MapVector<Expr *, DeclRefExpr *> Captures;
ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
HelperValStmt = buildPreInits(Context, Captures);
@@ -11355,6 +11372,7 @@ OMPClause *Sema::ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
OpenMPDirectiveKind CaptureRegion =
getOpenMPCaptureRegionForClause(DKind, OMPC_device);
if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
+ ValExpr = MakeFullExpr(ValExpr).get();
llvm::MapVector<Expr *, DeclRefExpr *> Captures;
ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
HelperValStmt = buildPreInits(Context, Captures);
@@ -12378,6 +12396,7 @@ OMPClause *Sema::ActOnOpenMPNumTeamsClause(Expr *NumTeams,
OpenMPDirectiveKind CaptureRegion =
getOpenMPCaptureRegionForClause(DKind, OMPC_num_teams);
if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
+ ValExpr = MakeFullExpr(ValExpr).get();
llvm::MapVector<Expr *, DeclRefExpr *> Captures;
ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
HelperValStmt = buildPreInits(Context, Captures);
@@ -12404,6 +12423,7 @@ OMPClause *Sema::ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
OpenMPDirectiveKind CaptureRegion =
getOpenMPCaptureRegionForClause(DKind, OMPC_thread_limit);
if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
+ ValExpr = MakeFullExpr(ValExpr).get();
llvm::MapVector<Expr *, DeclRefExpr *> Captures;
ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
HelperValStmt = buildPreInits(Context, Captures);
@@ -12514,6 +12534,7 @@ OMPClause *Sema::ActOnOpenMPDistScheduleClause(
DSAStack->getCurrentDirective(), OMPC_dist_schedule) !=
OMPD_unknown &&
!CurContext->isDependentContext()) {
+ ValExpr = MakeFullExpr(ValExpr).get();
llvm::MapVector<Expr *, DeclRefExpr *> Captures;
ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
HelperValStmt = buildPreInits(Context, Captures);
diff --git a/lib/Sema/SemaOverload.cpp b/lib/Sema/SemaOverload.cpp
index 268be9430a56..2144845f4dd3 100644
--- a/lib/Sema/SemaOverload.cpp
+++ b/lib/Sema/SemaOverload.cpp
@@ -5790,7 +5790,7 @@ ExprResult Sema::PerformContextualImplicitConversion(
HadMultipleCandidates,
ExplicitConversions))
return ExprError();
- // fall through 'OR_Deleted' case.
+ LLVM_FALLTHROUGH;
case OR_Deleted:
// We'll complain below about a non-integral condition type.
break;
@@ -8651,7 +8651,7 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
case OO_Plus: // '+' is either unary or binary
if (Args.size() == 1)
OpBuilder.addUnaryPlusPointerOverloads();
- // Fall through.
+ LLVM_FALLTHROUGH;
case OO_Minus: // '-' is either unary or binary
if (Args.size() == 1) {
@@ -8682,7 +8682,7 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
case OO_EqualEqual:
case OO_ExclaimEqual:
OpBuilder.addEqualEqualOrNotEqualMemberPointerOrNullptrOverloads();
- // Fall through.
+ LLVM_FALLTHROUGH;
case OO_Less:
case OO_Greater:
@@ -8719,12 +8719,12 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
case OO_Equal:
OpBuilder.addAssignmentMemberPointerOrEnumeralOverloads();
- // Fall through.
+ LLVM_FALLTHROUGH;
case OO_PlusEqual:
case OO_MinusEqual:
OpBuilder.addAssignmentPointerOverloads(Op == OO_Equal);
- // Fall through.
+ LLVM_FALLTHROUGH;
case OO_StarEqual:
case OO_SlashEqual:
diff --git a/lib/Sema/SemaTemplate.cpp b/lib/Sema/SemaTemplate.cpp
index c70a8ba8f126..d94cb0d0f485 100644
--- a/lib/Sema/SemaTemplate.cpp
+++ b/lib/Sema/SemaTemplate.cpp
@@ -792,7 +792,7 @@ static void maybeDiagnoseTemplateParameterShadow(Sema &SemaRef, Scope *S,
/// ParamNameLoc is the location of the parameter name (if any).
/// If the type parameter has a default argument, it will be added
/// later via ActOnTypeParameterDefault.
-Decl *Sema::ActOnTypeParameter(Scope *S, bool Typename,
+NamedDecl *Sema::ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
@@ -922,13 +922,67 @@ QualType Sema::CheckNonTypeTemplateParameterType(QualType T,
return QualType();
}
-Decl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
+NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *Default) {
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ // Check that we have valid decl-specifiers specified.
+ auto CheckValidDeclSpecifiers = [this, &D] {
+ // C++ [temp.param]
+ // p1
+ // template-parameter:
+ // ...
+ // parameter-declaration
+ // p2
+ // ... A storage class shall not be specified in a template-parameter
+ // declaration.
+ // [dcl.typedef]p1:
+ // The typedef specifier [...] shall not be used in the decl-specifier-seq
+ // of a parameter-declaration
+ const DeclSpec &DS = D.getDeclSpec();
+ auto EmitDiag = [this](SourceLocation Loc) {
+ Diag(Loc, diag::err_invalid_decl_specifier_in_nontype_parm)
+ << FixItHint::CreateRemoval(Loc);
+ };
+ if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified)
+ EmitDiag(DS.getStorageClassSpecLoc());
+
+ if (DS.getThreadStorageClassSpec() != TSCS_unspecified)
+ EmitDiag(DS.getThreadStorageClassSpecLoc());
+
+ // [dcl.inline]p1:
+ // The inline specifier can be applied only to the declaration or
+ // definition of a variable or function.
+
+ if (DS.isInlineSpecified())
+ EmitDiag(DS.getInlineSpecLoc());
+
+ // [dcl.constexpr]p1:
+ // The constexpr specifier shall be applied only to the definition of a
+ // variable or variable template or the declaration of a function or
+ // function template.
+
+ if (DS.isConstexprSpecified())
+ EmitDiag(DS.getConstexprSpecLoc());
+
+ // [dcl.fct.spec]p1:
+ // Function-specifiers can be used only in function declarations.
+
+ if (DS.isVirtualSpecified())
+ EmitDiag(DS.getVirtualSpecLoc());
+
+ if (DS.isExplicitSpecified())
+ EmitDiag(DS.getExplicitSpecLoc());
+
+ if (DS.isNoreturnSpecified())
+ EmitDiag(DS.getNoreturnSpecLoc());
+ };
+
+ CheckValidDeclSpecifiers();
+
if (TInfo->getType()->isUndeducedType()) {
Diag(D.getIdentifierLoc(),
diag::warn_cxx14_compat_template_nontype_parm_auto_type)
@@ -999,7 +1053,7 @@ Decl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
/// ActOnTemplateTemplateParameter - Called when a C++ template template
/// parameter (e.g. T in template <template \<typename> class T> class array)
/// has been parsed. S is the current scope.
-Decl *Sema::ActOnTemplateTemplateParameter(Scope* S,
+NamedDecl *Sema::ActOnTemplateTemplateParameter(Scope* S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
diff --git a/lib/Sema/SemaTemplateDeduction.cpp b/lib/Sema/SemaTemplateDeduction.cpp
index 564692b03020..f8ee60251698 100644
--- a/lib/Sema/SemaTemplateDeduction.cpp
+++ b/lib/Sema/SemaTemplateDeduction.cpp
@@ -5324,7 +5324,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
case Type::InjectedClassName:
T = cast<InjectedClassNameType>(T)->getInjectedSpecializationType();
- // fall through
+ LLVM_FALLTHROUGH;
case Type::TemplateSpecialization: {
const TemplateSpecializationType *Spec
@@ -5431,6 +5431,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
MarkUsedTemplateParameters(Ctx,
cast<DeducedType>(T)->getDeducedType(),
OnlyDeduced, Depth, Used);
+ break;
// None of these types have any template parameters in them.
case Type::Builtin:
diff --git a/lib/Sema/SemaTemplateInstantiateDecl.cpp b/lib/Sema/SemaTemplateInstantiateDecl.cpp
index f627f6017f38..1deb8638756b 100644
--- a/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -1587,9 +1587,10 @@ static QualType adjustFunctionTypeForInstantiation(ASTContext &Context,
}
/// Normal class members are of more specific types and therefore
-/// don't make it here. This function serves two purposes:
+/// don't make it here. This function serves three purposes:
/// 1) instantiating function templates
/// 2) substituting friend declarations
+/// 3) substituting deduction guide declarations for nested class templates
Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
TemplateParameterList *TemplateParams) {
// Check whether there is already a function template specialization for
@@ -1650,16 +1651,19 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
TemplateArgs);
}
+ DeclarationNameInfo NameInfo
+ = SemaRef.SubstDeclarationNameInfo(D->getNameInfo(), TemplateArgs);
+
FunctionDecl *Function;
if (auto *DGuide = dyn_cast<CXXDeductionGuideDecl>(D)) {
Function = CXXDeductionGuideDecl::Create(
SemaRef.Context, DC, D->getInnerLocStart(), DGuide->isExplicit(),
- D->getNameInfo(), T, TInfo, D->getSourceRange().getEnd());
+ NameInfo, T, TInfo, D->getSourceRange().getEnd());
if (DGuide->isCopyDeductionCandidate())
cast<CXXDeductionGuideDecl>(Function)->setIsCopyDeductionCandidate();
} else {
Function = FunctionDecl::Create(
- SemaRef.Context, DC, D->getInnerLocStart(), D->getNameInfo(), T, TInfo,
+ SemaRef.Context, DC, D->getInnerLocStart(), NameInfo, T, TInfo,
D->getCanonicalDecl()->getStorageClass(), D->isInlineSpecified(),
D->hasWrittenPrototype(), D->isConstexpr());
Function->setRangeEnd(D->getSourceRange().getEnd());
@@ -3855,7 +3859,8 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
}
// Note, we should never try to instantiate a deleted function template.
- assert((Pattern || PatternDecl->isDefaulted()) &&
+ assert((Pattern || PatternDecl->isDefaulted() ||
+ PatternDecl->hasSkippedBody()) &&
"unexpected kind of function template definition");
// C++1y [temp.explicit]p10:
@@ -3940,16 +3945,20 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
}
}
- // Instantiate the function body.
- StmtResult Body = SubstStmt(Pattern, TemplateArgs);
+ if (PatternDecl->hasSkippedBody()) {
+ ActOnSkippedFunctionBody(Function);
+ } else {
+ // Instantiate the function body.
+ StmtResult Body = SubstStmt(Pattern, TemplateArgs);
- if (Body.isInvalid())
- Function->setInvalidDecl();
+ if (Body.isInvalid())
+ Function->setInvalidDecl();
- // FIXME: finishing the function body while in an expression evaluation
- // context seems wrong. Investigate more.
- ActOnFinishFunctionBody(Function, Body.get(),
- /*IsInstantiation=*/true);
+ // FIXME: finishing the function body while in an expression evaluation
+ // context seems wrong. Investigate more.
+ ActOnFinishFunctionBody(Function, Body.get(),
+ /*IsInstantiation=*/true);
+ }
PerformDependentDiagnostics(PatternDecl, TemplateArgs);
@@ -5195,7 +5204,9 @@ void Sema::PerformPendingInstantiations(bool LocalOnly) {
case TSK_ExplicitInstantiationDefinition:
// We only need an instantiation if the pending instantiation *is* the
// explicit instantiation.
- if (Var != Var->getMostRecentDecl()) continue;
+ if (Var != Var->getMostRecentDecl())
+ continue;
+ break;
case TSK_ImplicitInstantiation:
break;
}
diff --git a/lib/Sema/SemaType.cpp b/lib/Sema/SemaType.cpp
index 2fffe8e17970..2530b766f5f7 100644
--- a/lib/Sema/SemaType.cpp
+++ b/lib/Sema/SemaType.cpp
@@ -367,7 +367,7 @@ static DeclaratorChunk *maybeMovePastReturnType(Declarator &declarator,
if (onlyBlockPointers)
continue;
- // fallthrough
+ LLVM_FALLTHROUGH;
case DeclaratorChunk::BlockPointer:
result = &ptrChunk;
@@ -1340,7 +1340,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
}
}
- // FALL THROUGH.
+ LLVM_FALLTHROUGH;
case DeclSpec::TST_int: {
if (DS.getTypeSpecSign() != DeclSpec::TSS_unsigned) {
switch (DS.getTypeSpecWidth()) {
@@ -3137,10 +3137,15 @@ static void warnAboutRedundantParens(Sema &S, Declarator &D, QualType T) {
(T->isRecordType() || T->isDependentType()) &&
D.getDeclSpec().getTypeQualifiers() == 0 && D.isFirstDeclarator();
+ bool StartsWithDeclaratorId = true;
for (auto &C : D.type_objects()) {
switch (C.Kind) {
- case DeclaratorChunk::Pointer:
case DeclaratorChunk::Paren:
+ if (&C == &Paren)
+ continue;
+ LLVM_FALLTHROUGH;
+ case DeclaratorChunk::Pointer:
+ StartsWithDeclaratorId = false;
continue;
case DeclaratorChunk::Array:
@@ -3154,18 +3159,25 @@ static void warnAboutRedundantParens(Sema &S, Declarator &D, QualType T) {
// We assume that something like 'T (&x) = y;' is highly likely to not
// be intended to be a temporary object.
CouldBeTemporaryObject = false;
+ StartsWithDeclaratorId = false;
continue;
case DeclaratorChunk::Function:
// In a new-type-id, function chunks require parentheses.
if (D.getContext() == Declarator::CXXNewContext)
return;
- LLVM_FALLTHROUGH;
+ // FIXME: "A(f())" deserves a vexing-parse warning, not just a
+ // redundant-parens warning, but we don't know whether the function
+ // chunk was syntactically valid as an expression here.
+ CouldBeTemporaryObject = false;
+ continue;
+
case DeclaratorChunk::BlockPointer:
case DeclaratorChunk::MemberPointer:
case DeclaratorChunk::Pipe:
// These cannot appear in expressions.
CouldBeTemporaryObject = false;
+ StartsWithDeclaratorId = false;
continue;
}
}
@@ -3186,6 +3198,18 @@ static void warnAboutRedundantParens(Sema &S, Declarator &D, QualType T) {
SourceRange ParenRange(Paren.Loc, Paren.EndLoc);
if (!CouldBeTemporaryObject) {
+ // If we have A (::B), the parentheses affect the meaning of the program.
+ // Suppress the warning in that case. Don't bother looking at the DeclSpec
+ // here: even (e.g.) "int ::x" is visually ambiguous even though it's
+ // formally unambiguous.
+ if (StartsWithDeclaratorId && D.getCXXScopeSpec().isValid()) {
+ for (NestedNameSpecifier *NNS = D.getCXXScopeSpec().getScopeRep(); NNS;
+ NNS = NNS->getPrefix()) {
+ if (NNS->getKind() == NestedNameSpecifier::Global)
+ return;
+ }
+ }
+
S.Diag(Paren.Loc, diag::warn_redundant_parens_around_declarator)
<< ParenRange << FixItHint::CreateRemoval(Paren.Loc)
<< FixItHint::CreateRemoval(Paren.EndLoc);
@@ -3890,7 +3914,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
case Declarator::PrototypeContext:
case Declarator::TrailingReturnContext:
isFunctionOrMethod = true;
- // fallthrough
+ LLVM_FALLTHROUGH;
case Declarator::MemberContext:
if (state.getDeclarator().isObjCIvar() && !isFunctionOrMethod) {
@@ -3904,7 +3928,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
break;
}
- // fallthrough
+ LLVM_FALLTHROUGH;
case Declarator::FileContext:
case Declarator::KNRTypeListContext: {
@@ -4063,7 +4087,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
case CAMN_InnerPointers:
if (NumPointersRemaining == 0)
break;
- // Fallthrough.
+ LLVM_FALLTHROUGH;
case CAMN_Yes:
checkNullabilityConsistency(S, pointerKind, pointerLoc, pointerEndLoc);
diff --git a/lib/Serialization/ASTReader.cpp b/lib/Serialization/ASTReader.cpp
index 111ac4fcdaa4..4ed822e04f6c 100644
--- a/lib/Serialization/ASTReader.cpp
+++ b/lib/Serialization/ASTReader.cpp
@@ -9235,8 +9235,16 @@ void ASTReader::finishPendingActions() {
const FunctionDecl *Defn = nullptr;
if (!getContext().getLangOpts().Modules || !FD->hasBody(Defn)) {
FD->setLazyBody(PB->second);
- } else
- mergeDefinitionVisibility(const_cast<FunctionDecl*>(Defn), FD);
+ } else {
+ auto *NonConstDefn = const_cast<FunctionDecl*>(Defn);
+ mergeDefinitionVisibility(NonConstDefn, FD);
+
+ if (!FD->isLateTemplateParsed() &&
+ !NonConstDefn->isLateTemplateParsed() &&
+ FD->getODRHash() != NonConstDefn->getODRHash()) {
+ PendingFunctionOdrMergeFailures[FD].push_back(NonConstDefn);
+ }
+ }
continue;
}
@@ -9253,7 +9261,8 @@ void ASTReader::finishPendingActions() {
}
void ASTReader::diagnoseOdrViolations() {
- if (PendingOdrMergeFailures.empty() && PendingOdrMergeChecks.empty())
+ if (PendingOdrMergeFailures.empty() && PendingOdrMergeChecks.empty() &&
+ PendingFunctionOdrMergeFailures.empty())
return;
// Trigger the import of the full definition of each class that had any
@@ -9275,6 +9284,20 @@ void ASTReader::diagnoseOdrViolations() {
}
}
+ // Trigger the import of functions.
+ auto FunctionOdrMergeFailures = std::move(PendingFunctionOdrMergeFailures);
+ PendingFunctionOdrMergeFailures.clear();
+ for (auto &Merge : FunctionOdrMergeFailures) {
+ Merge.first->buildLookup();
+ Merge.first->decls_begin();
+ Merge.first->getBody();
+ for (auto &FD : Merge.second) {
+ FD->buildLookup();
+ FD->decls_begin();
+ FD->getBody();
+ }
+ }
+
// For each declaration from a merged context, check that the canonical
// definition of that context also contains a declaration of the same
// entity.
@@ -9357,13 +9380,35 @@ void ASTReader::diagnoseOdrViolations() {
}
}
- if (OdrMergeFailures.empty())
+ if (OdrMergeFailures.empty() && FunctionOdrMergeFailures.empty())
return;
// Ensure we don't accidentally recursively enter deserialization while
// we're producing our diagnostics.
Deserializing RecursionGuard(this);
+ // Common code for hashing helpers.
+ ODRHash Hash;
+ auto ComputeQualTypeODRHash = [&Hash](QualType Ty) {
+ Hash.clear();
+ Hash.AddQualType(Ty);
+ return Hash.CalculateHash();
+ };
+
+ auto ComputeODRHash = [&Hash](const Stmt *S) {
+ assert(S);
+ Hash.clear();
+ Hash.AddStmt(S);
+ return Hash.CalculateHash();
+ };
+
+ auto ComputeSubDeclODRHash = [&Hash](const Decl *D) {
+ assert(D);
+ Hash.clear();
+ Hash.AddSubDecl(D);
+ return Hash.CalculateHash();
+ };
+
// Issue any pending ODR-failure diagnostics.
for (auto &Merge : OdrMergeFailures) {
// If we've already pointed out a specific problem with this class, don't
@@ -9411,13 +9456,6 @@ void ASTReader::diagnoseOdrViolations() {
<< SecondModule << Range << DiffType;
};
- ODRHash Hash;
- auto ComputeQualTypeODRHash = [&Hash](QualType Ty) {
- Hash.clear();
- Hash.AddQualType(Ty);
- return Hash.CalculateHash();
- };
-
unsigned FirstNumBases = FirstDD->NumBases;
unsigned FirstNumVBases = FirstDD->NumVBases;
unsigned SecondNumBases = SecondDD->NumBases;
@@ -9520,14 +9558,12 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstTemplate && SecondTemplate) {
DeclHashes FirstTemplateHashes;
DeclHashes SecondTemplateHashes;
- ODRHash Hash;
auto PopulateTemplateParameterHashs =
- [&Hash](DeclHashes &Hashes, const ClassTemplateDecl *TD) {
+ [&ComputeSubDeclODRHash](DeclHashes &Hashes,
+ const ClassTemplateDecl *TD) {
for (auto *D : TD->getTemplateParameters()->asArray()) {
- Hash.clear();
- Hash.AddSubDecl(D);
- Hashes.emplace_back(D, Hash.CalculateHash());
+ Hashes.emplace_back(D, ComputeSubDeclODRHash(D));
}
};
@@ -9696,18 +9732,15 @@ void ASTReader::diagnoseOdrViolations() {
DeclHashes FirstHashes;
DeclHashes SecondHashes;
- ODRHash Hash;
- auto PopulateHashes = [&Hash, FirstRecord](DeclHashes &Hashes,
- CXXRecordDecl *Record) {
+ auto PopulateHashes = [&ComputeSubDeclODRHash, FirstRecord](
+ DeclHashes &Hashes, CXXRecordDecl *Record) {
for (auto *D : Record->decls()) {
// Due to decl merging, the first CXXRecordDecl is the parent of
// Decls in both records.
if (!ODRHash::isWhitelistedDecl(D, FirstRecord))
continue;
- Hash.clear();
- Hash.AddSubDecl(D);
- Hashes.emplace_back(D, Hash.CalculateHash());
+ Hashes.emplace_back(D, ComputeSubDeclODRHash(D));
}
};
PopulateHashes(FirstHashes, FirstRecord);
@@ -9901,19 +9934,6 @@ void ASTReader::diagnoseOdrViolations() {
<< SecondModule << Range << DiffType;
};
- auto ComputeODRHash = [&Hash](const Stmt* S) {
- assert(S);
- Hash.clear();
- Hash.AddStmt(S);
- return Hash.CalculateHash();
- };
-
- auto ComputeQualTypeODRHash = [&Hash](QualType Ty) {
- Hash.clear();
- Hash.AddQualType(Ty);
- return Hash.CalculateHash();
- };
-
switch (FirstDiffType) {
case Other:
case EndOfClass:
@@ -10488,6 +10508,160 @@ void ASTReader::diagnoseOdrViolations() {
<< Merge.first;
}
}
+
+ // Issue ODR failures diagnostics for functions.
+ for (auto &Merge : FunctionOdrMergeFailures) {
+ enum ODRFunctionDifference {
+ ReturnType,
+ ParameterName,
+ ParameterType,
+ ParameterSingleDefaultArgument,
+ ParameterDifferentDefaultArgument,
+ FunctionBody,
+ };
+
+ FunctionDecl *FirstFunction = Merge.first;
+ std::string FirstModule = getOwningModuleNameForDiagnostic(FirstFunction);
+
+ bool Diagnosed = false;
+ for (auto &SecondFunction : Merge.second) {
+
+ if (FirstFunction == SecondFunction)
+ continue;
+
+ std::string SecondModule =
+ getOwningModuleNameForDiagnostic(SecondFunction);
+
+ auto ODRDiagError = [FirstFunction, &FirstModule,
+ this](SourceLocation Loc, SourceRange Range,
+ ODRFunctionDifference DiffType) {
+ return Diag(Loc, diag::err_module_odr_violation_function)
+ << FirstFunction << FirstModule.empty() << FirstModule << Range
+ << DiffType;
+ };
+ auto ODRDiagNote = [&SecondModule, this](SourceLocation Loc,
+ SourceRange Range,
+ ODRFunctionDifference DiffType) {
+ return Diag(Loc, diag::note_module_odr_violation_function)
+ << SecondModule << Range << DiffType;
+ };
+
+ if (ComputeQualTypeODRHash(FirstFunction->getReturnType()) !=
+ ComputeQualTypeODRHash(SecondFunction->getReturnType())) {
+ ODRDiagError(FirstFunction->getReturnTypeSourceRange().getBegin(),
+ FirstFunction->getReturnTypeSourceRange(), ReturnType)
+ << FirstFunction->getReturnType();
+ ODRDiagNote(SecondFunction->getReturnTypeSourceRange().getBegin(),
+ SecondFunction->getReturnTypeSourceRange(), ReturnType)
+ << SecondFunction->getReturnType();
+ Diagnosed = true;
+ break;
+ }
+
+ assert(FirstFunction->param_size() == SecondFunction->param_size() &&
+ "Merged functions with different number of parameters");
+
+ auto ParamSize = FirstFunction->param_size();
+ bool ParameterMismatch = false;
+ for (unsigned I = 0; I < ParamSize; ++I) {
+ auto *FirstParam = FirstFunction->getParamDecl(I);
+ auto *SecondParam = SecondFunction->getParamDecl(I);
+
+ assert(getContext().hasSameType(FirstParam->getType(),
+ SecondParam->getType()) &&
+ "Merged function has different parameter types.");
+
+ if (FirstParam->getDeclName() != SecondParam->getDeclName()) {
+ ODRDiagError(FirstParam->getLocation(), FirstParam->getSourceRange(),
+ ParameterName)
+ << I + 1 << FirstParam->getDeclName();
+ ODRDiagNote(SecondParam->getLocation(), SecondParam->getSourceRange(),
+ ParameterName)
+ << I + 1 << SecondParam->getDeclName();
+ ParameterMismatch = true;
+ break;
+ };
+
+ QualType FirstParamType = FirstParam->getType();
+ QualType SecondParamType = SecondParam->getType();
+ if (FirstParamType != SecondParamType &&
+ ComputeQualTypeODRHash(FirstParamType) !=
+ ComputeQualTypeODRHash(SecondParamType)) {
+ if (const DecayedType *ParamDecayedType =
+ FirstParamType->getAs<DecayedType>()) {
+ ODRDiagError(FirstParam->getLocation(),
+ FirstParam->getSourceRange(), ParameterType)
+ << (I + 1) << FirstParamType << true
+ << ParamDecayedType->getOriginalType();
+ } else {
+ ODRDiagError(FirstParam->getLocation(),
+ FirstParam->getSourceRange(), ParameterType)
+ << (I + 1) << FirstParamType << false;
+ }
+
+ if (const DecayedType *ParamDecayedType =
+ SecondParamType->getAs<DecayedType>()) {
+ ODRDiagNote(SecondParam->getLocation(),
+ SecondParam->getSourceRange(), ParameterType)
+ << (I + 1) << SecondParamType << true
+ << ParamDecayedType->getOriginalType();
+ } else {
+ ODRDiagNote(SecondParam->getLocation(),
+ SecondParam->getSourceRange(), ParameterType)
+ << (I + 1) << SecondParamType << false;
+ }
+ ParameterMismatch = true;
+ break;
+ }
+
+ const Expr *FirstInit = FirstParam->getInit();
+ const Expr *SecondInit = SecondParam->getInit();
+ if ((FirstInit == nullptr) != (SecondInit == nullptr)) {
+ ODRDiagError(FirstParam->getLocation(), FirstParam->getSourceRange(),
+ ParameterSingleDefaultArgument)
+ << (I + 1) << (FirstInit == nullptr)
+ << (FirstInit ? FirstInit->getSourceRange() : SourceRange());
+ ODRDiagNote(SecondParam->getLocation(), SecondParam->getSourceRange(),
+ ParameterSingleDefaultArgument)
+ << (I + 1) << (SecondInit == nullptr)
+ << (SecondInit ? SecondInit->getSourceRange() : SourceRange());
+ ParameterMismatch = true;
+ break;
+ }
+
+ if (FirstInit && SecondInit &&
+ ComputeODRHash(FirstInit) != ComputeODRHash(SecondInit)) {
+ ODRDiagError(FirstParam->getLocation(), FirstParam->getSourceRange(),
+ ParameterDifferentDefaultArgument)
+ << (I + 1) << FirstInit->getSourceRange();
+ ODRDiagNote(SecondParam->getLocation(), SecondParam->getSourceRange(),
+ ParameterDifferentDefaultArgument)
+ << (I + 1) << SecondInit->getSourceRange();
+ ParameterMismatch = true;
+ break;
+ }
+
+ assert(ComputeSubDeclODRHash(FirstParam) ==
+ ComputeSubDeclODRHash(SecondParam) &&
+ "Undiagnosed parameter difference.");
+ }
+
+ if (ParameterMismatch) {
+ Diagnosed = true;
+ break;
+ }
+
+ // If no error has been generated before now, assume the problem is in
+ // the body and generate a message.
+ ODRDiagError(FirstFunction->getLocation(),
+ FirstFunction->getSourceRange(), FunctionBody);
+ ODRDiagNote(SecondFunction->getLocation(),
+ SecondFunction->getSourceRange(), FunctionBody);
+ Diagnosed = true;
+ break;
+ }
+ assert(Diagnosed && "Unable to emit ODR diagnostic.");
+ }
}
void ASTReader::StartedDeserializing() {
diff --git a/lib/Serialization/ASTReaderDecl.cpp b/lib/Serialization/ASTReaderDecl.cpp
index a3bf0d971267..efbaf92a849a 100644
--- a/lib/Serialization/ASTReaderDecl.cpp
+++ b/lib/Serialization/ASTReaderDecl.cpp
@@ -796,6 +796,9 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
FD->setCachedLinkage(Linkage(Record.readInt()));
FD->EndRangeLoc = ReadSourceLocation();
+ FD->ODRHash = Record.readInt();
+ FD->HasODRHash = true;
+
switch ((FunctionDecl::TemplatedKind)Record.readInt()) {
case FunctionDecl::TK_NonTemplate:
mergeRedeclarable(FD, Redecl);
diff --git a/lib/Serialization/ASTWriterDecl.cpp b/lib/Serialization/ASTWriterDecl.cpp
index 3dac3a48297a..bb72a3b383ea 100644
--- a/lib/Serialization/ASTWriterDecl.cpp
+++ b/lib/Serialization/ASTWriterDecl.cpp
@@ -538,6 +538,8 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
Record.push_back(D->getLinkageInternal());
Record.AddSourceLocation(D->getLocEnd());
+ Record.push_back(D->getODRHash());
+
Record.push_back(D->getTemplatedKind());
switch (D->getTemplatedKind()) {
case FunctionDecl::TK_NonTemplate:
@@ -2072,6 +2074,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // LateParsed
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Linkage
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LocEnd
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // ODRHash
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // TemplateKind
// This Array slurps the rest of the record. Fortunately we want to encode
// (nearly) all the remaining (variable number of) fields in the same way.
diff --git a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
index 23b43759a34b..db9179e018a1 100644
--- a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
+++ b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -22,6 +22,7 @@
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
+#include "clang/AST/ParentMap.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -262,8 +263,19 @@ void DynamicTypePropagation::checkPostCall(const CallEvent &Call,
if (const MemRegion *Target = Ctor->getCXXThisVal().getAsRegion()) {
// We just finished a base constructor. Now we can use the subclass's
// type when resolving virtual calls.
- const Decl *D = C.getLocationContext()->getDecl();
- recordFixedType(Target, cast<CXXConstructorDecl>(D), C);
+ const LocationContext *LCtx = C.getLocationContext();
+
+ // FIXME: In C++17 classes with non-virtual bases may be treated as
+ // aggregates, and in such case no top-frame constructor will be called.
+ // Figure out if we need to do anything in this case.
+ // FIXME: Instead of relying on the ParentMap, we should have the
+ // trigger-statement (InitListExpr in this case) available in this
+ // callback, ideally as part of CallEvent.
+ if (dyn_cast_or_null<InitListExpr>(
+ LCtx->getParentMap().getParent(Ctor->getOriginExpr())))
+ return;
+
+ recordFixedType(Target, cast<CXXConstructorDecl>(LCtx->getDecl()), C);
}
return;
}
diff --git a/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index a51dda6fe858..6c0c53dd64cb 100644
--- a/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -293,7 +293,9 @@ public:
SmallString<100> Buf;
llvm::raw_svector_ostream Os(Buf);
Os << "Excessive padding in '";
- Os << QualType::getAsString(RD->getTypeForDecl(), Qualifiers()) << "'";
+ Os << QualType::getAsString(RD->getTypeForDecl(), Qualifiers(),
+ LangOptions())
+ << "'";
if (auto *TSD = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
// TODO: make this show up better in the console output and in
diff --git a/lib/StaticAnalyzer/Core/BugReporter.cpp b/lib/StaticAnalyzer/Core/BugReporter.cpp
index 4a5d25fc5634..dc284888eb03 100644
--- a/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -885,8 +885,12 @@ static bool GenerateMinimalPathDiagnostic(
if (NextNode) {
// Add diagnostic pieces from custom visitors.
BugReport *R = PDB.getBugReport();
+ llvm::FoldingSet<PathDiagnosticPiece> DeduplicationSet;
for (auto &V : visitors) {
if (auto p = V->VisitNode(N, NextNode, PDB, *R)) {
+ if (DeduplicationSet.GetOrInsertNode(p.get()) != p.get())
+ continue;
+
updateStackPiecesWithMessage(*p, CallStack);
PD.getActivePath().push_front(std::move(p));
}
@@ -1584,8 +1588,12 @@ static bool GenerateExtensivePathDiagnostic(
// Add pieces from custom visitors.
BugReport *R = PDB.getBugReport();
+ llvm::FoldingSet<PathDiagnosticPiece> DeduplicationSet;
for (auto &V : visitors) {
if (auto p = V->VisitNode(N, NextNode, PDB, *R)) {
+ if (DeduplicationSet.GetOrInsertNode(p.get()) != p.get())
+ continue;
+
const PathDiagnosticLocation &Loc = p->getLocation();
EB.addEdge(Loc, true);
updateStackPiecesWithMessage(*p, CallStack);
@@ -1879,8 +1887,12 @@ static bool GenerateAlternateExtensivePathDiagnostic(
continue;
// Add pieces from custom visitors.
+ llvm::FoldingSet<PathDiagnosticPiece> DeduplicationSet;
for (auto &V : visitors) {
if (auto p = V->VisitNode(N, NextNode, PDB, *report)) {
+ if (DeduplicationSet.GetOrInsertNode(p.get()) != p.get())
+ continue;
+
addEdgeToPath(PD.getActivePath(), PrevLoc, p->getLocation(), PDB.LC);
updateStackPiecesWithMessage(*p, CallStack);
PD.getActivePath().push_front(std::move(p));
diff --git a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index 7304d789431e..972f4c5f3da2 100644
--- a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -985,12 +985,8 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
if (!S || !N)
return false;
- if (const Expr *Ex = dyn_cast<Expr>(S)) {
- Ex = Ex->IgnoreParenCasts();
- const Expr *PeeledEx = peelOffOuterExpr(Ex, N);
- if (Ex != PeeledEx)
- S = PeeledEx;
- }
+ if (const Expr *Ex = dyn_cast<Expr>(S))
+ S = peelOffOuterExpr(Ex, N);
const Expr *Inner = nullptr;
if (const Expr *Ex = dyn_cast<Expr>(S)) {
@@ -1142,9 +1138,12 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
else
RVal = state->getSVal(L->getRegion());
- const MemRegion *RegionRVal = RVal.getAsRegion();
report.addVisitor(llvm::make_unique<UndefOrNullArgVisitor>(L->getRegion()));
+ if (Optional<KnownSVal> KV = RVal.getAs<KnownSVal>())
+ report.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
+ *KV, L->getRegion(), EnableNullFPSuppression));
+ const MemRegion *RegionRVal = RVal.getAsRegion();
if (RegionRVal && isa<SymbolicRegion>(RegionRVal)) {
report.markInteresting(RegionRVal);
report.addVisitor(llvm::make_unique<TrackConstraintBRVisitor>(
diff --git a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index 03e0095d0e83..dad93111966f 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -14,6 +14,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/StmtCXX.h"
+#include "clang/AST/ParentMap.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
@@ -267,6 +268,23 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
}
// FALLTHROUGH
case CXXConstructExpr::CK_NonVirtualBase:
+ // In C++17, classes with non-virtual bases may be aggregates, so they would
+ // be initialized as aggregates without a constructor call, so we may have
+ // a base class constructed directly into an initializer list without
+ // having the derived-class constructor call on the previous stack frame.
+ // Initializer lists may be nested into more initializer lists that
+ // correspond to surrounding aggregate initializations.
+ // FIXME: For now this code essentially bails out. We need to find the
+ // correct target region and set it.
+ // FIXME: Instead of relying on the ParentMap, we should have the
+ // trigger-statement (InitListExpr in this case) passed down from CFG or
+ // otherwise always available during construction.
+ if (dyn_cast_or_null<InitListExpr>(LCtx->getParentMap().getParent(CE))) {
+ MemRegionManager &MRMgr = getSValBuilder().getRegionManager();
+ Target = MRMgr.getCXXTempObjectRegion(CE, LCtx);
+ break;
+ }
+ // FALLTHROUGH
case CXXConstructExpr::CK_Delegating: {
const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
Loc ThisPtr = getSValBuilder().getCXXThis(CurCtor,
diff --git a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index 9b820e81e374..ebf1487d4bfc 100644
--- a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -91,6 +91,9 @@ public:
// Rewrite the file specified by FID with HTML formatting.
void RewriteFile(Rewriter &R, const SourceManager& SMgr,
const PathPieces& path, FileID FID);
+
+ /// \return Javascript for navigating the HTML report using j/k keys.
+ std::string generateKeyboardNavigationJavascript();
};
} // end anonymous namespace
@@ -337,6 +340,9 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
int LineNumber = path.back()->getLocation().asLocation().getExpansionLineNumber();
int ColumnNumber = path.back()->getLocation().asLocation().getExpansionColumnNumber();
+ R.InsertTextBefore(SMgr.getLocForStartOfFile(FID),
+ generateKeyboardNavigationJavascript());
+
// Add the name of the file as an <h1> tag.
{
std::string s;
@@ -378,8 +384,14 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
os << "<tr><td></td><td>" << html::EscapeText(*I) << "</td></tr>\n";
}
- os << "</table>\n<!-- REPORTSUMMARYEXTRA -->\n"
- "<h3>Annotated Source Code</h3>\n";
+ os << R"<<<(
+</table>
+<!-- REPORTSUMMARYEXTRA -->
+<h3>Annotated Source Code</h3>
+<p><span class='macro'>[?]
+ <span class='expansion'>Use j/k keys for keyboard navigation</span>
+</span></p>
+)<<<";
R.InsertTextBefore(SMgr.getLocForStartOfFile(FID), os.str());
}
@@ -777,3 +789,82 @@ void HTMLDiagnostics::HighlightRange(Rewriter& R, FileID BugFileID,
html::HighlightRange(R, InstantiationStart, E, HighlightStart, HighlightEnd);
}
+
+std::string HTMLDiagnostics::generateKeyboardNavigationJavascript() {
+ return R"<<<(
+<script type='text/javascript'>
+var digitMatcher = new RegExp("[0-9]+");
+
+document.addEventListener("DOMContentLoaded", function() {
+ document.querySelectorAll(".PathNav > a").forEach(
+ function(currentValue, currentIndex) {
+ var hrefValue = currentValue.getAttribute("href");
+ currentValue.onclick = function() {
+ scrollTo(document.querySelector(hrefValue));
+ return false;
+ };
+ });
+});
+
+var findNum = function() {
+ var s = document.querySelector(".selected");
+ if (!s || s.id == "EndPath") {
+ return 0;
+ }
+ var out = parseInt(digitMatcher.exec(s.id)[0]);
+ return out;
+};
+
+var scrollTo = function(el) {
+ document.querySelectorAll(".selected").forEach(function(s) {
+ s.classList.remove("selected");
+ });
+ el.classList.add("selected");
+ window.scrollBy(0, el.getBoundingClientRect().top -
+ (window.innerHeight / 2));
+}
+
+var move = function(num, up, numItems) {
+ if (num == 1 && up || num == numItems - 1 && !up) {
+ return 0;
+ } else if (num == 0 && up) {
+ return numItems - 1;
+ } else if (num == 0 && !up) {
+ return 1 % numItems;
+ }
+ return up ? num - 1 : num + 1;
+}
+
+var numToId = function(num) {
+ if (num == 0) {
+ return document.getElementById("EndPath")
+ }
+ return document.getElementById("Path" + num);
+};
+
+var navigateTo = function(up) {
+ var numItems = document.querySelectorAll(".line > .msg").length;
+ var currentSelected = findNum();
+ var newSelected = move(currentSelected, up, numItems);
+ var newEl = numToId(newSelected, numItems);
+
+ // Scroll element into center.
+ scrollTo(newEl);
+};
+
+window.addEventListener("keydown", function (event) {
+ if (event.defaultPrevented) {
+ return;
+ }
+ if (event.key == "j") {
+ navigateTo(/*up=*/false);
+ } else if (event.key == "k") {
+ navigateTo(/*up=*/true);
+ } else {
+ return;
+ }
+ event.preventDefault();
+}, true);
+</script>
+ )<<<";
+}
diff --git a/lib/StaticAnalyzer/Core/RegionStore.cpp b/lib/StaticAnalyzer/Core/RegionStore.cpp
index 7f2a481c6b0d..e2e69bb28ec2 100644
--- a/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -2132,9 +2132,10 @@ RegionStoreManager::bindArray(RegionBindingsConstRef B,
NewB = bind(NewB, loc::MemRegionVal(ER), *VI);
}
- // If the init list is shorter than the array length, set the
- // array default value.
- if (Size.hasValue() && i < Size.getValue())
+ // If the init list is shorter than the array length (or the array has
+ // variable length), set the array default value. Values that are already set
+ // are not overwritten.
+ if (!Size.hasValue() || i < Size.getValue())
NewB = setImplicitDefaultValue(NewB, R, ElementTy);
return NewB;