aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/AST/ASTContext.cpp10
-rw-r--r--lib/AST/Expr.cpp25
-rw-r--r--lib/AST/ExprCXX.cpp122
-rw-r--r--lib/AST/ExprObjC.cpp43
-rw-r--r--lib/AST/MicrosoftMangle.cpp21
-rw-r--r--lib/AST/OpenMPClause.cpp96
-rw-r--r--lib/Basic/Builtins.cpp14
-rw-r--r--lib/Basic/LangOptions.cpp8
-rw-r--r--lib/Basic/Targets.cpp11
-rw-r--r--lib/CodeGen/BackendUtil.cpp7
-rw-r--r--lib/CodeGen/CGCall.cpp16
-rw-r--r--lib/CodeGen/CGObjCMac.cpp6
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.cpp757
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.h219
-rw-r--r--lib/CodeGen/CGStmtOpenMP.cpp116
-rw-r--r--lib/CodeGen/CGVTables.cpp4
-rw-r--r--lib/CodeGen/CodeGenFunction.h10
-rw-r--r--lib/CodeGen/CodeGenModule.cpp15
-rw-r--r--lib/CodeGen/CodeGenModule.h3
-rw-r--r--lib/CodeGen/CodeGenPGO.cpp10
-rw-r--r--lib/CodeGen/CoverageMappingGen.cpp34
-rw-r--r--lib/Driver/Driver.cpp4
-rw-r--r--lib/Driver/ToolChains.cpp3
-rw-r--r--lib/Driver/Tools.cpp62
-rw-r--r--lib/Driver/Tools.h4
-rw-r--r--lib/Format/ContinuationIndenter.cpp41
-rw-r--r--lib/Format/FormatToken.cpp10
-rw-r--r--lib/Format/FormatToken.h6
-rw-r--r--lib/Format/TokenAnnotator.cpp46
-rw-r--r--lib/Frontend/CodeGenOptions.cpp8
-rw-r--r--lib/Frontend/CompilerInvocation.cpp72
-rw-r--r--lib/Headers/CMakeLists.txt1
-rw-r--r--lib/Headers/ia32intrin.h32
-rw-r--r--lib/Headers/immintrin.h2
-rw-r--r--lib/Headers/pkuintrin.h48
-rw-r--r--lib/Parse/ParseExpr.cpp17
-rw-r--r--lib/Parse/ParseStmtAsm.cpp5
-rw-r--r--lib/Sema/SemaChecking.cpp2
-rw-r--r--lib/Sema/SemaDecl.cpp47
-rw-r--r--lib/Sema/SemaDeclCXX.cpp24
-rw-r--r--lib/Sema/SemaExprObjC.cpp6
-rw-r--r--lib/Sema/SemaOpenMP.cpp7
-rw-r--r--lib/Sema/SemaOverload.cpp56
-rw-r--r--lib/Sema/SemaStmtAsm.cpp22
-rw-r--r--lib/Sema/SemaTemplateDeduction.cpp32
-rw-r--r--lib/Serialization/ASTReader.cpp7
-rw-r--r--lib/Serialization/ASTReaderStmt.cpp15
-rw-r--r--lib/Serialization/ASTWriter.cpp7
-rw-r--r--lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp55
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocChecker.cpp10
-rw-r--r--lib/StaticAnalyzer/Core/BugReporterVisitors.cpp10
51 files changed, 1716 insertions, 492 deletions
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index 108677abb8a4..d4abbe47cafb 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -3658,14 +3658,13 @@ static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName());
}
-static bool areSortedAndUniqued(ObjCProtocolDecl * const *Protocols,
- unsigned NumProtocols) {
- if (NumProtocols == 0) return true;
+static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) {
+ if (Protocols.empty()) return true;
if (Protocols[0]->getCanonicalDecl() != Protocols[0])
return false;
- for (unsigned i = 1; i != NumProtocols; ++i)
+ for (unsigned i = 1; i != Protocols.size(); ++i)
if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 ||
Protocols[i]->getCanonicalDecl() != Protocols[i])
return false;
@@ -3730,8 +3729,7 @@ QualType ASTContext::getObjCObjectType(
[&](QualType type) {
return type.isCanonical();
});
- bool protocolsSorted = areSortedAndUniqued(protocols.data(),
- protocols.size());
+ bool protocolsSorted = areSortedAndUniqued(protocols);
if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
// Determine the canonical type arguments.
ArrayRef<QualType> canonTypeArgs;
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index bdd7a45f0850..f9757b209294 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -3711,8 +3711,7 @@ DesignatedInitExpr::Create(const ASTContext &C, Designator *Designators,
ArrayRef<Expr*> IndexExprs,
SourceLocation ColonOrEqualLoc,
bool UsesColonSyntax, Expr *Init) {
- void *Mem = C.Allocate(sizeof(DesignatedInitExpr) +
- sizeof(Stmt *) * (IndexExprs.size() + 1),
+ void *Mem = C.Allocate(totalSizeToAlloc<Stmt *>(IndexExprs.size() + 1),
llvm::alignOf<DesignatedInitExpr>());
return new (Mem) DesignatedInitExpr(C, C.VoidTy, NumDesignators, Designators,
ColonOrEqualLoc, UsesColonSyntax,
@@ -3721,8 +3720,8 @@ DesignatedInitExpr::Create(const ASTContext &C, Designator *Designators,
DesignatedInitExpr *DesignatedInitExpr::CreateEmpty(const ASTContext &C,
unsigned NumIndexExprs) {
- void *Mem = C.Allocate(sizeof(DesignatedInitExpr) +
- sizeof(Stmt *) * (NumIndexExprs + 1), 8);
+ void *Mem = C.Allocate(totalSizeToAlloc<Stmt *>(NumIndexExprs + 1),
+ llvm::alignOf<DesignatedInitExpr>());
return new (Mem) DesignatedInitExpr(NumIndexExprs + 1);
}
@@ -3764,22 +3763,19 @@ SourceLocation DesignatedInitExpr::getLocEnd() const {
Expr *DesignatedInitExpr::getArrayIndex(const Designator& D) const {
assert(D.Kind == Designator::ArrayDesignator && "Requires array designator");
- Stmt *const *SubExprs = reinterpret_cast<Stmt *const *>(this + 1);
- return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1));
+ return getSubExpr(D.ArrayOrRange.Index + 1);
}
Expr *DesignatedInitExpr::getArrayRangeStart(const Designator &D) const {
assert(D.Kind == Designator::ArrayRangeDesignator &&
"Requires array range designator");
- Stmt *const *SubExprs = reinterpret_cast<Stmt *const *>(this + 1);
- return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1));
+ return getSubExpr(D.ArrayOrRange.Index + 1);
}
Expr *DesignatedInitExpr::getArrayRangeEnd(const Designator &D) const {
assert(D.Kind == Designator::ArrayRangeDesignator &&
"Requires array range designator");
- Stmt *const *SubExprs = reinterpret_cast<Stmt *const *>(this + 1);
- return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 2));
+ return getSubExpr(D.ArrayOrRange.Index + 2);
}
/// \brief Replaces the designator at index @p Idx with the series
@@ -3863,9 +3859,9 @@ const OpaqueValueExpr *OpaqueValueExpr::findInCopyConstruct(const Expr *e) {
PseudoObjectExpr *PseudoObjectExpr::Create(const ASTContext &Context,
EmptyShell sh,
unsigned numSemanticExprs) {
- void *buffer = Context.Allocate(sizeof(PseudoObjectExpr) +
- (1 + numSemanticExprs) * sizeof(Expr*),
- llvm::alignOf<PseudoObjectExpr>());
+ void *buffer =
+ Context.Allocate(totalSizeToAlloc<Expr *>(1 + numSemanticExprs),
+ llvm::alignOf<PseudoObjectExpr>());
return new(buffer) PseudoObjectExpr(sh, numSemanticExprs);
}
@@ -3892,8 +3888,7 @@ PseudoObjectExpr *PseudoObjectExpr::Create(const ASTContext &C, Expr *syntax,
assert(semantics[resultIndex]->getObjectKind() == OK_Ordinary);
}
- void *buffer = C.Allocate(sizeof(PseudoObjectExpr) +
- (1 + semantics.size()) * sizeof(Expr*),
+ void *buffer = C.Allocate(totalSizeToAlloc<Expr *>(semantics.size() + 1),
llvm::alignOf<PseudoObjectExpr>());
return new(buffer) PseudoObjectExpr(type, VK, syntax, semantics,
resultIndex);
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index 4bb4b5073c4f..d35efcb101b5 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -766,7 +766,7 @@ const IdentifierInfo *UserDefinedLiteral::getUDSuffix() const {
CXXDefaultArgExpr *
CXXDefaultArgExpr::Create(const ASTContext &C, SourceLocation Loc,
ParmVarDecl *Param, Expr *SubExpr) {
- void *Mem = C.Allocate(sizeof(CXXDefaultArgExpr) + sizeof(Stmt *));
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(1));
return new (Mem) CXXDefaultArgExpr(CXXDefaultArgExprClass, Loc, Param,
SubExpr);
}
@@ -924,29 +924,22 @@ LambdaCaptureKind LambdaCapture::getCaptureKind() const {
return CapByCopy ? LCK_ByCopy : LCK_ByRef;
}
-LambdaExpr::LambdaExpr(QualType T,
- SourceRange IntroducerRange,
+LambdaExpr::LambdaExpr(QualType T, SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
- ArrayRef<Capture> Captures,
- bool ExplicitParams,
- bool ExplicitResultType,
- ArrayRef<Expr *> CaptureInits,
+ ArrayRef<LambdaCapture> Captures, bool ExplicitParams,
+ bool ExplicitResultType, ArrayRef<Expr *> CaptureInits,
ArrayRef<VarDecl *> ArrayIndexVars,
ArrayRef<unsigned> ArrayIndexStarts,
SourceLocation ClosingBrace,
bool ContainsUnexpandedParameterPack)
- : Expr(LambdaExprClass, T, VK_RValue, OK_Ordinary,
- T->isDependentType(), T->isDependentType(), T->isDependentType(),
- ContainsUnexpandedParameterPack),
- IntroducerRange(IntroducerRange),
- CaptureDefaultLoc(CaptureDefaultLoc),
- NumCaptures(Captures.size()),
- CaptureDefault(CaptureDefault),
- ExplicitParams(ExplicitParams),
- ExplicitResultType(ExplicitResultType),
- ClosingBrace(ClosingBrace)
-{
+ : Expr(LambdaExprClass, T, VK_RValue, OK_Ordinary, T->isDependentType(),
+ T->isDependentType(), T->isDependentType(),
+ ContainsUnexpandedParameterPack),
+ IntroducerRange(IntroducerRange), CaptureDefaultLoc(CaptureDefaultLoc),
+ NumCaptures(Captures.size()), CaptureDefault(CaptureDefault),
+ ExplicitParams(ExplicitParams), ExplicitResultType(ExplicitResultType),
+ ClosingBrace(ClosingBrace) {
assert(CaptureInits.size() == Captures.size() && "Wrong number of arguments");
CXXRecordDecl *Class = getLambdaClass();
CXXRecordDecl::LambdaDefinitionData &Data = Class->getLambdaData();
@@ -957,8 +950,9 @@ LambdaExpr::LambdaExpr(QualType T,
const ASTContext &Context = Class->getASTContext();
Data.NumCaptures = NumCaptures;
Data.NumExplicitCaptures = 0;
- Data.Captures = (Capture *)Context.Allocate(sizeof(Capture) * NumCaptures);
- Capture *ToCapture = Data.Captures;
+ Data.Captures =
+ (LambdaCapture *)Context.Allocate(sizeof(LambdaCapture) * NumCaptures);
+ LambdaCapture *ToCapture = Data.Captures;
for (unsigned I = 0, N = Captures.size(); I != N; ++I) {
if (Captures[I].isExplicit())
++Data.NumExplicitCaptures;
@@ -986,30 +980,20 @@ LambdaExpr::LambdaExpr(QualType T,
}
}
-LambdaExpr *LambdaExpr::Create(const ASTContext &Context,
- CXXRecordDecl *Class,
- SourceRange IntroducerRange,
- LambdaCaptureDefault CaptureDefault,
- SourceLocation CaptureDefaultLoc,
- ArrayRef<Capture> Captures,
- bool ExplicitParams,
- bool ExplicitResultType,
- ArrayRef<Expr *> CaptureInits,
- ArrayRef<VarDecl *> ArrayIndexVars,
- ArrayRef<unsigned> ArrayIndexStarts,
- SourceLocation ClosingBrace,
- bool ContainsUnexpandedParameterPack) {
+LambdaExpr *LambdaExpr::Create(
+ const ASTContext &Context, CXXRecordDecl *Class,
+ SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault,
+ SourceLocation CaptureDefaultLoc, ArrayRef<LambdaCapture> Captures,
+ bool ExplicitParams, bool ExplicitResultType, ArrayRef<Expr *> CaptureInits,
+ ArrayRef<VarDecl *> ArrayIndexVars, ArrayRef<unsigned> ArrayIndexStarts,
+ SourceLocation ClosingBrace, bool ContainsUnexpandedParameterPack) {
// Determine the type of the expression (i.e., the type of the
// function object we're creating).
QualType T = Context.getTypeDeclType(Class);
- unsigned Size = sizeof(LambdaExpr) + sizeof(Stmt *) * (Captures.size() + 1);
- if (!ArrayIndexVars.empty()) {
- Size += sizeof(unsigned) * (Captures.size() + 1);
- // Realign for following VarDecl array.
- Size = llvm::RoundUpToAlignment(Size, llvm::alignOf<VarDecl*>());
- Size += sizeof(VarDecl *) * ArrayIndexVars.size();
- }
+ unsigned Size = totalSizeToAlloc<Stmt *, unsigned, VarDecl *>(
+ Captures.size() + 1, ArrayIndexVars.empty() ? 0 : Captures.size() + 1,
+ ArrayIndexVars.size());
void *Mem = Context.Allocate(Size);
return new (Mem) LambdaExpr(T, IntroducerRange,
CaptureDefault, CaptureDefaultLoc, Captures,
@@ -1021,10 +1005,9 @@ LambdaExpr *LambdaExpr::Create(const ASTContext &Context,
LambdaExpr *LambdaExpr::CreateDeserialized(const ASTContext &C,
unsigned NumCaptures,
unsigned NumArrayIndexVars) {
- unsigned Size = sizeof(LambdaExpr) + sizeof(Stmt *) * (NumCaptures + 1);
- if (NumArrayIndexVars)
- Size += sizeof(VarDecl) * NumArrayIndexVars
- + sizeof(unsigned) * (NumCaptures + 1);
+ unsigned Size = totalSizeToAlloc<Stmt *, unsigned, VarDecl *>(
+ NumCaptures + 1, NumArrayIndexVars ? NumCaptures + 1 : 0,
+ NumArrayIndexVars);
void *Mem = C.Allocate(Size);
return new (Mem) LambdaExpr(EmptyShell(), NumCaptures, NumArrayIndexVars > 0);
}
@@ -1108,7 +1091,7 @@ CompoundStmt *LambdaExpr::getBody() const {
*const_cast<clang::Stmt **>(&getStoredStmts()[NumCaptures]) =
getCallOperator()->getBody();
- return reinterpret_cast<CompoundStmt *>(getStoredStmts()[NumCaptures]);
+ return static_cast<CompoundStmt *>(getStoredStmts()[NumCaptures]);
}
bool LambdaExpr::isMutable() const {
@@ -1125,14 +1108,13 @@ ExprWithCleanups::ExprWithCleanups(Expr *subexpr,
SubExpr(subexpr) {
ExprWithCleanupsBits.NumObjects = objects.size();
for (unsigned i = 0, e = objects.size(); i != e; ++i)
- getObjectsBuffer()[i] = objects[i];
+ getTrailingObjects<CleanupObject>()[i] = objects[i];
}
ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C, Expr *subexpr,
ArrayRef<CleanupObject> objects) {
- size_t size = sizeof(ExprWithCleanups)
- + objects.size() * sizeof(CleanupObject);
- void *buffer = C.Allocate(size, llvm::alignOf<ExprWithCleanups>());
+ void *buffer = C.Allocate(totalSizeToAlloc<CleanupObject>(objects.size()),
+ llvm::alignOf<ExprWithCleanups>());
return new (buffer) ExprWithCleanups(subexpr, objects);
}
@@ -1144,8 +1126,8 @@ ExprWithCleanups::ExprWithCleanups(EmptyShell empty, unsigned numObjects)
ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C,
EmptyShell empty,
unsigned numObjects) {
- size_t size = sizeof(ExprWithCleanups) + numObjects * sizeof(CleanupObject);
- void *buffer = C.Allocate(size, llvm::alignOf<ExprWithCleanups>());
+ void *buffer = C.Allocate(totalSizeToAlloc<CleanupObject>(numObjects),
+ llvm::alignOf<ExprWithCleanups>());
return new (buffer) ExprWithCleanups(empty, numObjects);
}
@@ -1165,7 +1147,7 @@ CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *Type,
LParenLoc(LParenLoc),
RParenLoc(RParenLoc),
NumArgs(Args.size()) {
- Stmt **StoredArgs = reinterpret_cast<Stmt **>(this + 1);
+ Expr **StoredArgs = getTrailingObjects<Expr *>();
for (unsigned I = 0; I != Args.size(); ++I) {
if (Args[I]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
@@ -1180,16 +1162,14 @@ CXXUnresolvedConstructExpr::Create(const ASTContext &C,
SourceLocation LParenLoc,
ArrayRef<Expr*> Args,
SourceLocation RParenLoc) {
- void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) +
- sizeof(Expr *) * Args.size());
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(Args.size()));
return new (Mem) CXXUnresolvedConstructExpr(Type, LParenLoc, Args, RParenLoc);
}
CXXUnresolvedConstructExpr *
CXXUnresolvedConstructExpr::CreateEmpty(const ASTContext &C, unsigned NumArgs) {
Stmt::EmptyShell Empty;
- void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) +
- sizeof(Expr *) * NumArgs);
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(NumArgs));
return new (Mem) CXXUnresolvedConstructExpr(Empty, NumArgs);
}
@@ -1404,16 +1384,16 @@ SizeOfPackExpr::Create(ASTContext &Context, SourceLocation OperatorLoc,
SourceLocation RParenLoc,
Optional<unsigned> Length,
ArrayRef<TemplateArgument> PartialArgs) {
- void *Storage = Context.Allocate(
- sizeof(SizeOfPackExpr) + sizeof(TemplateArgument) * PartialArgs.size());
+ void *Storage =
+ Context.Allocate(totalSizeToAlloc<TemplateArgument>(PartialArgs.size()));
return new (Storage) SizeOfPackExpr(Context.getSizeType(), OperatorLoc, Pack,
PackLoc, RParenLoc, Length, PartialArgs);
}
SizeOfPackExpr *SizeOfPackExpr::CreateDeserialized(ASTContext &Context,
unsigned NumPartialArgs) {
- void *Storage = Context.Allocate(
- sizeof(SizeOfPackExpr) + sizeof(TemplateArgument) * NumPartialArgs);
+ void *Storage =
+ Context.Allocate(totalSizeToAlloc<TemplateArgument>(NumPartialArgs));
return new (Storage) SizeOfPackExpr(EmptyShell(), NumPartialArgs);
}
@@ -1440,24 +1420,22 @@ FunctionParmPackExpr::FunctionParmPackExpr(QualType T, ParmVarDecl *ParamPack,
ParamPack(ParamPack), NameLoc(NameLoc), NumParameters(NumParams) {
if (Params)
std::uninitialized_copy(Params, Params + NumParams,
- reinterpret_cast<ParmVarDecl **>(this + 1));
+ getTrailingObjects<ParmVarDecl *>());
}
FunctionParmPackExpr *
FunctionParmPackExpr::Create(const ASTContext &Context, QualType T,
ParmVarDecl *ParamPack, SourceLocation NameLoc,
ArrayRef<ParmVarDecl *> Params) {
- return new (Context.Allocate(sizeof(FunctionParmPackExpr) +
- sizeof(ParmVarDecl*) * Params.size()))
- FunctionParmPackExpr(T, ParamPack, NameLoc, Params.size(), Params.data());
+ return new (Context.Allocate(totalSizeToAlloc<ParmVarDecl *>(Params.size())))
+ FunctionParmPackExpr(T, ParamPack, NameLoc, Params.size(), Params.data());
}
FunctionParmPackExpr *
FunctionParmPackExpr::CreateEmpty(const ASTContext &Context,
unsigned NumParams) {
- return new (Context.Allocate(sizeof(FunctionParmPackExpr) +
- sizeof(ParmVarDecl*) * NumParams))
- FunctionParmPackExpr(QualType(), nullptr, SourceLocation(), 0, nullptr);
+ return new (Context.Allocate(totalSizeToAlloc<ParmVarDecl *>(NumParams)))
+ FunctionParmPackExpr(QualType(), nullptr, SourceLocation(), 0, nullptr);
}
void MaterializeTemporaryExpr::setExtendingDecl(const ValueDecl *ExtendedBy,
@@ -1494,8 +1472,8 @@ TypeTraitExpr::TypeTraitExpr(QualType T, SourceLocation Loc, TypeTrait Kind,
TypeTraitExprBits.Value = Value;
TypeTraitExprBits.NumArgs = Args.size();
- TypeSourceInfo **ToArgs = getTypeSourceInfos();
-
+ TypeSourceInfo **ToArgs = getTrailingObjects<TypeSourceInfo *>();
+
for (unsigned I = 0, N = Args.size(); I != N; ++I) {
if (Args[I]->getType()->isDependentType())
setValueDependent(true);
@@ -1514,15 +1492,13 @@ TypeTraitExpr *TypeTraitExpr::Create(const ASTContext &C, QualType T,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc,
bool Value) {
- unsigned Size = sizeof(TypeTraitExpr) + sizeof(TypeSourceInfo*) * Args.size();
- void *Mem = C.Allocate(Size);
+ void *Mem = C.Allocate(totalSizeToAlloc<TypeSourceInfo *>(Args.size()));
return new (Mem) TypeTraitExpr(T, Loc, Kind, Args, RParenLoc, Value);
}
TypeTraitExpr *TypeTraitExpr::CreateDeserialized(const ASTContext &C,
unsigned NumArgs) {
- unsigned Size = sizeof(TypeTraitExpr) + sizeof(TypeSourceInfo*) * NumArgs;
- void *Mem = C.Allocate(Size);
+ void *Mem = C.Allocate(totalSizeToAlloc<TypeSourceInfo *>(NumArgs));
return new (Mem) TypeTraitExpr(EmptyShell());
}
diff --git a/lib/AST/ExprObjC.cpp b/lib/AST/ExprObjC.cpp
index 46298c7a730b..0936a81a597a 100644
--- a/lib/AST/ExprObjC.cpp
+++ b/lib/AST/ExprObjC.cpp
@@ -39,16 +39,14 @@ ObjCArrayLiteral *ObjCArrayLiteral::Create(const ASTContext &C,
ArrayRef<Expr *> Elements,
QualType T, ObjCMethodDecl *Method,
SourceRange SR) {
- void *Mem =
- C.Allocate(sizeof(ObjCArrayLiteral) + Elements.size() * sizeof(Expr *));
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(Elements.size()));
return new (Mem) ObjCArrayLiteral(Elements, T, Method, SR);
}
ObjCArrayLiteral *ObjCArrayLiteral::CreateEmpty(const ASTContext &C,
unsigned NumElements) {
- void *Mem =
- C.Allocate(sizeof(ObjCArrayLiteral) + NumElements * sizeof(Expr *));
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(NumElements));
return new (Mem) ObjCArrayLiteral(EmptyShell(), NumElements);
}
@@ -60,8 +58,9 @@ ObjCDictionaryLiteral::ObjCDictionaryLiteral(ArrayRef<ObjCDictionaryElement> VK,
false, false),
NumElements(VK.size()), HasPackExpansions(HasPackExpansions), Range(SR),
DictWithObjectsMethod(method) {
- KeyValuePair *KeyValues = getKeyValues();
- ExpansionData *Expansions = getExpansionData();
+ KeyValuePair *KeyValues = getTrailingObjects<KeyValuePair>();
+ ExpansionData *Expansions =
+ HasPackExpansions ? getTrailingObjects<ExpansionData>() : nullptr;
for (unsigned I = 0; I < NumElements; I++) {
if (VK[I].Key->isTypeDependent() || VK[I].Key->isValueDependent() ||
VK[I].Value->isTypeDependent() || VK[I].Value->isValueDependent())
@@ -91,23 +90,16 @@ ObjCDictionaryLiteral::Create(const ASTContext &C,
ArrayRef<ObjCDictionaryElement> VK,
bool HasPackExpansions, QualType T,
ObjCMethodDecl *method, SourceRange SR) {
- unsigned ExpansionsSize = 0;
- if (HasPackExpansions)
- ExpansionsSize = sizeof(ExpansionData) * VK.size();
-
- void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) +
- sizeof(KeyValuePair) * VK.size() + ExpansionsSize);
+ void *Mem = C.Allocate(totalSizeToAlloc<KeyValuePair, ExpansionData>(
+ VK.size(), HasPackExpansions ? VK.size() : 0));
return new (Mem) ObjCDictionaryLiteral(VK, HasPackExpansions, T, method, SR);
}
ObjCDictionaryLiteral *
ObjCDictionaryLiteral::CreateEmpty(const ASTContext &C, unsigned NumElements,
bool HasPackExpansions) {
- unsigned ExpansionsSize = 0;
- if (HasPackExpansions)
- ExpansionsSize = sizeof(ExpansionData) * NumElements;
- void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) +
- sizeof(KeyValuePair) * NumElements + ExpansionsSize);
+ void *Mem = C.Allocate(totalSizeToAlloc<KeyValuePair, ExpansionData>(
+ NumElements, HasPackExpansions ? NumElements : 0));
return new (Mem)
ObjCDictionaryLiteral(EmptyShell(), NumElements, HasPackExpansions);
}
@@ -122,15 +114,6 @@ QualType ObjCPropertyRefExpr::getReceiverType(const ASTContext &ctx) const {
return getBase()->getType();
}
-ObjCSubscriptRefExpr *
-ObjCSubscriptRefExpr::Create(const ASTContext &C, Expr *base, Expr *key,
- QualType T, ObjCMethodDecl *getMethod,
- ObjCMethodDecl *setMethod, SourceLocation RB) {
- void *Mem = C.Allocate(sizeof(ObjCSubscriptRefExpr));
- return new (Mem) ObjCSubscriptRefExpr(
- base, key, T, VK_LValue, OK_ObjCSubscript, getMethod, setMethod, RB);
-}
-
ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
SourceLocation LBracLoc,
SourceLocation SuperLoc, bool IsInstanceSuper,
@@ -293,11 +276,9 @@ ObjCMessageExpr *ObjCMessageExpr::alloc(const ASTContext &C,
ObjCMessageExpr *ObjCMessageExpr::alloc(const ASTContext &C, unsigned NumArgs,
unsigned NumStoredSelLocs) {
- unsigned Size = sizeof(ObjCMessageExpr) + sizeof(void *) +
- NumArgs * sizeof(Expr *) +
- NumStoredSelLocs * sizeof(SourceLocation);
return (ObjCMessageExpr *)C.Allocate(
- Size, llvm::AlignOf<ObjCMessageExpr>::Alignment);
+ totalSizeToAlloc<void *, SourceLocation>(NumArgs + 1, NumStoredSelLocs),
+ llvm::AlignOf<ObjCMessageExpr>::Alignment);
}
void ObjCMessageExpr::getSelectorLocs(
@@ -358,7 +339,7 @@ ObjCInterfaceDecl *ObjCMessageExpr::getReceiverInterface() const {
Stmt::child_range ObjCMessageExpr::children() {
Stmt **begin;
if (getReceiverKind() == Instance)
- begin = reinterpret_cast<Stmt **>(this + 1);
+ begin = reinterpret_cast<Stmt **>(getTrailingObjects<void *>());
else
begin = reinterpret_cast<Stmt **>(getArgs());
return child_range(begin,
diff --git a/lib/AST/MicrosoftMangle.cpp b/lib/AST/MicrosoftMangle.cpp
index 136a43b640f7..d45232b6deb0 100644
--- a/lib/AST/MicrosoftMangle.cpp
+++ b/lib/AST/MicrosoftMangle.cpp
@@ -127,8 +127,6 @@ public:
CXXCtorType CT, uint32_t Size, uint32_t NVOffset,
int32_t VBPtrOffset, uint32_t VBIndex,
raw_ostream &Out) override;
- void mangleCXXCatchHandlerType(QualType T, uint32_t Flags,
- raw_ostream &Out) override;
void mangleCXXRTTI(QualType T, raw_ostream &Out) override;
void mangleCXXRTTIName(QualType T, raw_ostream &Out) override;
void mangleCXXRTTIBaseClassDescriptor(const CXXRecordDecl *Derived,
@@ -221,7 +219,7 @@ class MicrosoftCXXNameMangler {
typedef llvm::SmallVector<std::string, 10> BackRefVec;
BackRefVec NameBackReferences;
- typedef llvm::DenseMap<void *, unsigned> ArgBackRefMap;
+ typedef llvm::DenseMap<const void *, unsigned> ArgBackRefMap;
ArgBackRefMap TypeBackReferences;
typedef std::set<int> PassObjectSizeArgsSet;
@@ -1489,7 +1487,7 @@ void MicrosoftCXXNameMangler::manglePassObjectSizeArg(
int Type = POSA->getType();
auto Iter = PassObjectSizeArgs.insert(Type).first;
- void *TypePtr = (void *)&*Iter;
+ auto *TypePtr = (const void *)&*Iter;
ArgBackRefMap::iterator Found = TypeBackReferences.find(TypePtr);
if (Found == TypeBackReferences.end()) {
@@ -2215,7 +2213,8 @@ void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T,
void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T,
Qualifiers Quals, SourceRange Range) {
QualType PointeeType = T->getPointeeType();
- Out << (Quals.hasVolatile() ? 'B' : 'A');
+ assert(!Quals.hasConst() && !Quals.hasVolatile() && "unexpected qualifier!");
+ Out << 'A';
manglePointerExtQualifiers(Quals, PointeeType);
mangleType(PointeeType, Range);
}
@@ -2226,7 +2225,8 @@ void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T,
void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T,
Qualifiers Quals, SourceRange Range) {
QualType PointeeType = T->getPointeeType();
- Out << (Quals.hasVolatile() ? "$$R" : "$$Q");
+ assert(!Quals.hasConst() && !Quals.hasVolatile() && "unexpected qualifier!");
+ Out << "$$Q";
manglePointerExtQualifiers(Quals, PointeeType);
mangleType(PointeeType, Range);
}
@@ -2620,15 +2620,6 @@ void MicrosoftMangleContextImpl::mangleCXXRTTIName(QualType T,
Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result);
}
-void MicrosoftMangleContextImpl::mangleCXXCatchHandlerType(QualType T,
- uint32_t Flags,
- raw_ostream &Out) {
- MicrosoftCXXNameMangler Mangler(*this, Out);
- Mangler.getStream() << "llvm.eh.handlertype.";
- Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result);
- Mangler.getStream() << '.' << Flags;
-}
-
void MicrosoftMangleContextImpl::mangleCXXVirtualDisplacementMap(
const CXXRecordDecl *SrcRD, const CXXRecordDecl *DstRD, raw_ostream &Out) {
MicrosoftCXXNameMangler Mangler(*this, Out);
diff --git a/lib/AST/OpenMPClause.cpp b/lib/AST/OpenMPClause.cpp
index cd60d3727ba1..1ef43f7694cf 100644
--- a/lib/AST/OpenMPClause.cpp
+++ b/lib/AST/OpenMPClause.cpp
@@ -40,9 +40,7 @@ OMPPrivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL) {
// Allocate space for private variables and initializer expressions.
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPPrivateClause),
- llvm::alignOf<Expr *>()) +
- 2 * sizeof(Expr *) * VL.size());
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(2 * VL.size()));
OMPPrivateClause *Clause =
new (Mem) OMPPrivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
@@ -52,9 +50,7 @@ OMPPrivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
OMPPrivateClause *OMPPrivateClause::CreateEmpty(const ASTContext &C,
unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPPrivateClause),
- llvm::alignOf<Expr *>()) +
- 2 * sizeof(Expr *) * N);
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(2 * N));
return new (Mem) OMPPrivateClause(N);
}
@@ -75,9 +71,7 @@ OMPFirstprivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL,
ArrayRef<Expr *> InitVL) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFirstprivateClause),
- llvm::alignOf<Expr *>()) +
- 3 * sizeof(Expr *) * VL.size());
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(3 * VL.size()));
OMPFirstprivateClause *Clause =
new (Mem) OMPFirstprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
@@ -88,9 +82,7 @@ OMPFirstprivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
OMPFirstprivateClause *OMPFirstprivateClause::CreateEmpty(const ASTContext &C,
unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFirstprivateClause),
- llvm::alignOf<Expr *>()) +
- 3 * sizeof(Expr *) * N);
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(3 * N));
return new (Mem) OMPFirstprivateClause(N);
}
@@ -126,9 +118,7 @@ OMPLastprivateClause *OMPLastprivateClause::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLastprivateClause),
- llvm::alignOf<Expr *>()) +
- 5 * sizeof(Expr *) * VL.size());
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * VL.size()));
OMPLastprivateClause *Clause =
new (Mem) OMPLastprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
@@ -140,9 +130,7 @@ OMPLastprivateClause *OMPLastprivateClause::Create(
OMPLastprivateClause *OMPLastprivateClause::CreateEmpty(const ASTContext &C,
unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLastprivateClause),
- llvm::alignOf<Expr *>()) +
- 5 * sizeof(Expr *) * N);
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * N));
return new (Mem) OMPLastprivateClause(N);
}
@@ -151,9 +139,7 @@ OMPSharedClause *OMPSharedClause::Create(const ASTContext &C,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<Expr *> VL) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPSharedClause),
- llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * VL.size());
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
OMPSharedClause *Clause =
new (Mem) OMPSharedClause(StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
@@ -161,9 +147,7 @@ OMPSharedClause *OMPSharedClause::Create(const ASTContext &C,
}
OMPSharedClause *OMPSharedClause::CreateEmpty(const ASTContext &C, unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPSharedClause),
- llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * N);
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
return new (Mem) OMPSharedClause(N);
}
@@ -198,9 +182,7 @@ OMPLinearClause *OMPLinearClause::Create(
ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep) {
// Allocate space for 4 lists (Vars, Inits, Updates, Finals) and 2 expressions
// (Step and CalcStep).
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLinearClause),
- llvm::alignOf<Expr *>()) +
- (5 * VL.size() + 2) * sizeof(Expr *));
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * VL.size() + 2));
OMPLinearClause *Clause = new (Mem) OMPLinearClause(
StartLoc, LParenLoc, Modifier, ModifierLoc, ColonLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
@@ -221,9 +203,7 @@ OMPLinearClause *OMPLinearClause::CreateEmpty(const ASTContext &C,
unsigned NumVars) {
// Allocate space for 4 lists (Vars, Inits, Updates, Finals) and 2 expressions
// (Step and CalcStep).
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLinearClause),
- llvm::alignOf<Expr *>()) +
- (5 * NumVars + 2) * sizeof(Expr *));
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * NumVars + 2));
return new (Mem) OMPLinearClause(NumVars);
}
@@ -231,9 +211,7 @@ OMPAlignedClause *
OMPAlignedClause::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPAlignedClause),
- llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * (VL.size() + 1));
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size() + 1));
OMPAlignedClause *Clause = new (Mem)
OMPAlignedClause(StartLoc, LParenLoc, ColonLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
@@ -243,9 +221,7 @@ OMPAlignedClause::Create(const ASTContext &C, SourceLocation StartLoc,
OMPAlignedClause *OMPAlignedClause::CreateEmpty(const ASTContext &C,
unsigned NumVars) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPAlignedClause),
- llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * (NumVars + 1));
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(NumVars + 1));
return new (Mem) OMPAlignedClause(NumVars);
}
@@ -275,9 +251,7 @@ OMPCopyinClause *OMPCopyinClause::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause),
- llvm::alignOf<Expr *>()) +
- 4 * sizeof(Expr *) * VL.size());
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(4 * VL.size()));
OMPCopyinClause *Clause =
new (Mem) OMPCopyinClause(StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
@@ -288,9 +262,7 @@ OMPCopyinClause *OMPCopyinClause::Create(
}
OMPCopyinClause *OMPCopyinClause::CreateEmpty(const ASTContext &C, unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause),
- llvm::alignOf<Expr *>()) +
- 4 * sizeof(Expr *) * N);
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(4 * N));
return new (Mem) OMPCopyinClause(N);
}
@@ -320,9 +292,7 @@ OMPCopyprivateClause *OMPCopyprivateClause::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyprivateClause),
- llvm::alignOf<Expr *>()) +
- 4 * sizeof(Expr *) * VL.size());
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(4 * VL.size()));
OMPCopyprivateClause *Clause =
new (Mem) OMPCopyprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
@@ -334,9 +304,7 @@ OMPCopyprivateClause *OMPCopyprivateClause::Create(
OMPCopyprivateClause *OMPCopyprivateClause::CreateEmpty(const ASTContext &C,
unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyprivateClause),
- llvm::alignOf<Expr *>()) +
- 4 * sizeof(Expr *) * N);
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(4 * N));
return new (Mem) OMPCopyprivateClause(N);
}
@@ -373,9 +341,7 @@ OMPReductionClause *OMPReductionClause::Create(
NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo,
ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs,
ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPReductionClause),
- llvm::alignOf<Expr *>()) +
- 5 * sizeof(Expr *) * VL.size());
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * VL.size()));
OMPReductionClause *Clause = new (Mem) OMPReductionClause(
StartLoc, LParenLoc, EndLoc, ColonLoc, VL.size(), QualifierLoc, NameInfo);
Clause->setVarRefs(VL);
@@ -388,9 +354,7 @@ OMPReductionClause *OMPReductionClause::Create(
OMPReductionClause *OMPReductionClause::CreateEmpty(const ASTContext &C,
unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPReductionClause),
- llvm::alignOf<Expr *>()) +
- 5 * sizeof(Expr *) * N);
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * N));
return new (Mem) OMPReductionClause(N);
}
@@ -399,9 +363,7 @@ OMPFlushClause *OMPFlushClause::Create(const ASTContext &C,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<Expr *> VL) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFlushClause),
- llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * VL.size());
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
OMPFlushClause *Clause =
new (Mem) OMPFlushClause(StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
@@ -409,9 +371,7 @@ OMPFlushClause *OMPFlushClause::Create(const ASTContext &C,
}
OMPFlushClause *OMPFlushClause::CreateEmpty(const ASTContext &C, unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFlushClause),
- llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * N);
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
return new (Mem) OMPFlushClause(N);
}
@@ -420,9 +380,7 @@ OMPDependClause::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VL) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPDependClause),
- llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * VL.size());
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
OMPDependClause *Clause =
new (Mem) OMPDependClause(StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
@@ -433,9 +391,7 @@ OMPDependClause::Create(const ASTContext &C, SourceLocation StartLoc,
}
OMPDependClause *OMPDependClause::CreateEmpty(const ASTContext &C, unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPDependClause),
- llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * N);
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
return new (Mem) OMPDependClause(N);
}
@@ -445,9 +401,7 @@ OMPMapClause *OMPMapClause::Create(const ASTContext &C, SourceLocation StartLoc,
OpenMPMapClauseKind TypeModifier,
OpenMPMapClauseKind Type,
SourceLocation TypeLoc) {
- void *Mem = C.Allocate(
- llvm::RoundUpToAlignment(sizeof(OMPMapClause), llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * VL.size());
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
OMPMapClause *Clause = new (Mem) OMPMapClause(
TypeModifier, Type, TypeLoc, StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
@@ -458,8 +412,6 @@ OMPMapClause *OMPMapClause::Create(const ASTContext &C, SourceLocation StartLoc,
}
OMPMapClause *OMPMapClause::CreateEmpty(const ASTContext &C, unsigned N) {
- void *Mem = C.Allocate(
- llvm::RoundUpToAlignment(sizeof(OMPMapClause), llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * N);
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
return new (Mem) OMPMapClause(N);
}
diff --git a/lib/Basic/Builtins.cpp b/lib/Basic/Builtins.cpp
index 69b10c13ede1..fb6a6451aa82 100644
--- a/lib/Basic/Builtins.cpp
+++ b/lib/Basic/Builtins.cpp
@@ -48,10 +48,20 @@ void Builtin::Context::InitializeTarget(const TargetInfo &Target,
AuxTSRecords = AuxTarget->getTargetBuiltins();
}
+bool Builtin::Context::isBuiltinFunc(const char *Name) {
+ StringRef FuncName(Name);
+ for (unsigned i = Builtin::NotBuiltin + 1; i != Builtin::FirstTSBuiltin; ++i)
+ if (FuncName.equals(BuiltinInfo[i].Name))
+ return strchr(BuiltinInfo[i].Attributes, 'f') != nullptr;
+
+ return false;
+}
+
bool Builtin::Context::builtinIsSupported(const Builtin::Info &BuiltinInfo,
const LangOptions &LangOpts) {
- bool BuiltinsUnsupported = LangOpts.NoBuiltin &&
- strchr(BuiltinInfo.Attributes, 'f');
+ bool BuiltinsUnsupported =
+ (LangOpts.NoBuiltin || LangOpts.isNoBuiltinFunc(BuiltinInfo.Name)) &&
+ strchr(BuiltinInfo.Attributes, 'f');
bool MathBuiltinsUnsupported =
LangOpts.NoMathBuiltin && BuiltinInfo.HeaderName &&
llvm::StringRef(BuiltinInfo.HeaderName).equals("math.h");
diff --git a/lib/Basic/LangOptions.cpp b/lib/Basic/LangOptions.cpp
index 2c87845b7b0e..1b08b0686046 100644
--- a/lib/Basic/LangOptions.cpp
+++ b/lib/Basic/LangOptions.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/StringRef.h"
using namespace clang;
@@ -36,3 +37,10 @@ void LangOptions::resetNonModularOptions() {
ImplementationOfModule.clear();
}
+bool LangOptions::isNoBuiltinFunc(const char *Name) const {
+ StringRef FuncName(Name);
+ for (unsigned i = 0, e = NoBuiltinFuncs.size(); i != e; ++i)
+ if (FuncName.equals(NoBuiltinFuncs[i]))
+ return true;
+ return false;
+}
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index 893bd7c49815..9ce525709cf4 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -2095,6 +2095,7 @@ class X86TargetInfo : public TargetInfo {
bool HasXSAVEOPT = false;
bool HasXSAVEC = false;
bool HasXSAVES = false;
+ bool HasPKU = false;
/// \brief Enumeration of all of the X86 CPUs supported by Clang.
///
@@ -2596,6 +2597,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "avx512vl", true);
setFeatureEnabledImpl(Features, "xsavec", true);
setFeatureEnabledImpl(Features, "xsaves", true);
+ setFeatureEnabledImpl(Features, "pku", true);
// FALLTHROUGH
case CK_Broadwell:
setFeatureEnabledImpl(Features, "rdseed", true);
@@ -3021,6 +3023,8 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasXSAVEC = true;
} else if (Feature == "+xsaves") {
HasXSAVES = true;
+ } else if (Feature == "+pku") {
+ HasPKU = true;
}
X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
@@ -3322,7 +3326,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__XSAVEC__");
if (HasXSAVES)
Builder.defineMacro("__XSAVES__");
-
+ if (HasPKU)
+ Builder.defineMacro("__PKU__");
if (HasCX16)
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
@@ -3440,6 +3445,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("xsavec", HasXSAVEC)
.Case("xsaves", HasXSAVES)
.Case("xsaveopt", HasXSAVEOPT)
+ .Case("pku", HasPKU)
.Default(false);
}
@@ -5310,7 +5316,8 @@ public:
bool setCPU(const std::string &Name) override {
bool CPUKnown = llvm::StringSwitch<bool>(Name)
.Case("generic", true)
- .Cases("cortex-a53", "cortex-a57", "cortex-a72", "cortex-a35", true)
+ .Cases("cortex-a53", "cortex-a57", "cortex-a72",
+ "cortex-a35", "exynos-m1", true)
.Case("cyclone", true)
.Default(false);
return CPUKnown;
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index 82297e7ee417..7032d00386f3 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -249,6 +249,13 @@ static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
TargetLibraryInfoImpl *TLII = new TargetLibraryInfoImpl(TargetTriple);
if (!CodeGenOpts.SimplifyLibCalls)
TLII->disableAllFunctions();
+ else {
+ // Disable individual libc/libm calls in TargetLibraryInfo.
+ LibFunc::Func F;
+ for (auto &FuncName : CodeGenOpts.getNoBuiltinFuncs())
+ if (TLII->getLibFunc(FuncName, F))
+ TLII->setUnavailable(F);
+ }
switch (CodeGenOpts.getVecLib()) {
case CodeGenOptions::Accelerate:
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 49b5df0c4f06..935985049c01 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -1431,11 +1431,9 @@ static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
}
-void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
- CGCalleeInfo CalleeInfo,
- AttributeListType &PAL,
- unsigned &CallingConv,
- bool AttrOnCallSite) {
+void CodeGenModule::ConstructAttributeList(
+ StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
+ AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) {
llvm::AttrBuilder FuncAttrs;
llvm::AttrBuilder RetAttrs;
bool HasOptnone = false;
@@ -1510,7 +1508,8 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
if (AttrOnCallSite) {
// Attributes that should go on the call site only.
- if (!CodeGenOpts.SimplifyLibCalls)
+ if (!CodeGenOpts.SimplifyLibCalls ||
+ CodeGenOpts.isNoBuiltinFunc(Name.data()))
FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
if (!CodeGenOpts.TrapFuncName.empty())
FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
@@ -3490,8 +3489,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
- CGM.ConstructAttributeList(CallInfo, CalleeInfo, AttributeList, CallingConv,
- true);
+ CGM.ConstructAttributeList(Callee->getName(), CallInfo, CalleeInfo,
+ AttributeList, CallingConv,
+ /*AttrOnCallSite=*/true);
llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
AttributeList);
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 5f3ebbd75765..e30b2875f209 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -4770,11 +4770,7 @@ llvm::Constant *IvarLayoutBuilder::buildBitmap(CGObjCCommonMac &CGObjC,
// This isn't a stable sort, but our algorithm should handle it fine.
llvm::array_pod_sort(IvarsInfo.begin(), IvarsInfo.end());
} else {
-#ifndef NDEBUG
- for (unsigned i = 1; i != IvarsInfo.size(); ++i) {
- assert(IvarsInfo[i - 1].Offset <= IvarsInfo[i].Offset);
- }
-#endif
+ assert(std::is_sorted(IvarsInfo.begin(), IvarsInfo.end()));
}
assert(IvarsInfo.back().Offset < InstanceEnd);
diff --git a/lib/CodeGen/CGOpenMPRuntime.cpp b/lib/CodeGen/CGOpenMPRuntime.cpp
index 0ba7e0639acc..6d4fc9f64b47 100644
--- a/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -11,16 +11,19 @@
//
//===----------------------------------------------------------------------===//
+#include "CGCXXABI.h"
+#include "CGCleanup.h"
#include "CGOpenMPRuntime.h"
#include "CodeGenFunction.h"
-#include "CGCleanup.h"
#include "clang/AST/Decl.h"
#include "clang/AST/StmtOpenMP.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Value.h"
+#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
@@ -215,25 +218,31 @@ private:
/// \brief API for captured statement code generation in OpenMP target
/// constructs. For this captures, implicit parameters are used instead of the
-/// captured fields.
+/// captured fields. The name of the target region has to be unique in a given
+/// application so it is provided by the client, because only the client has
+/// the information to generate that.
class CGOpenMPTargetRegionInfo : public CGOpenMPRegionInfo {
public:
CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
- const RegionCodeGenTy &CodeGen)
+ const RegionCodeGenTy &CodeGen, StringRef HelperName)
: CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
- /*HasCancel = */ false) {}
+ /*HasCancel=*/false),
+ HelperName(HelperName) {}
/// \brief This is unused for target regions because each starts executing
/// with a single thread.
const VarDecl *getThreadIDVariable() const override { return nullptr; }
/// \brief Get the name of the capture helper.
- StringRef getHelperName() const override { return ".omp_offloading."; }
+ StringRef getHelperName() const override { return HelperName; }
static bool classof(const CGCapturedStmtInfo *Info) {
return CGOpenMPRegionInfo::classof(Info) &&
cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
}
+
+private:
+ StringRef HelperName;
};
/// \brief RAII for emitting code of OpenMP constructs.
@@ -301,7 +310,8 @@ LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
}
CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM)
- : CGM(CGM), DefaultOpenMPPSource(nullptr), KmpRoutineEntryPtrTy(nullptr) {
+ : CGM(CGM), DefaultOpenMPPSource(nullptr), KmpRoutineEntryPtrTy(nullptr),
+ OffloadEntriesInfoManager(CGM) {
IdentTy = llvm::StructType::create(
"ident_t", CGM.Int32Ty /* reserved_1 */, CGM.Int32Ty /* flags */,
CGM.Int32Ty /* reserved_2 */, CGM.Int32Ty /* reserved_3 */,
@@ -311,6 +321,8 @@ CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM)
llvm::PointerType::getUnqual(CGM.Int32Ty)};
Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
+
+ loadOffloadInfoMetadata();
}
void CGOpenMPRuntime::clear() {
@@ -931,6 +943,26 @@ CGOpenMPRuntime::createRuntimeFunction(OpenMPRTLFunction Function) {
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
break;
}
+ case OMPRTL__tgt_register_lib: {
+ // Build void __tgt_register_lib(__tgt_bin_desc *desc);
+ QualType ParamTy =
+ CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
+ llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_lib");
+ break;
+ }
+ case OMPRTL__tgt_unregister_lib: {
+ // Build void __tgt_unregister_lib(__tgt_bin_desc *desc);
+ QualType ParamTy =
+ CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
+ llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_unregister_lib");
+ break;
+ }
}
return RTLFn;
}
@@ -1969,6 +2001,381 @@ enum KmpTaskTFields {
};
} // anonymous namespace
+bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
+ // FIXME: Add other entries type when they become supported.
+ return OffloadEntriesTargetRegion.empty();
+}
+
+/// \brief Initialize target region entry.
+void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
+ initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
+ StringRef ParentName, unsigned LineNum,
+ unsigned ColNum, unsigned Order) {
+ assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
+ "only required for the device "
+ "code generation.");
+ OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum][ColNum] =
+ OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr);
+ ++OffloadingEntriesNum;
+}
+
+void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
+ registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
+ StringRef ParentName, unsigned LineNum,
+ unsigned ColNum, llvm::Constant *Addr,
+ llvm::Constant *ID) {
+ // If we are emitting code for a target, the entry is already initialized,
+ // only has to be registered.
+ if (CGM.getLangOpts().OpenMPIsDevice) {
+ assert(hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum,
+ ColNum) &&
+ "Entry must exist.");
+ auto &Entry = OffloadEntriesTargetRegion[DeviceID][FileID][ParentName]
+ [LineNum][ColNum];
+ assert(Entry.isValid() && "Entry not initialized!");
+ Entry.setAddress(Addr);
+ Entry.setID(ID);
+ return;
+ } else {
+ OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum++, Addr, ID);
+ OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum][ColNum] =
+ Entry;
+ }
+}
+
+bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
+ unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum,
+ unsigned ColNum) const {
+ auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
+ if (PerDevice == OffloadEntriesTargetRegion.end())
+ return false;
+ auto PerFile = PerDevice->second.find(FileID);
+ if (PerFile == PerDevice->second.end())
+ return false;
+ auto PerParentName = PerFile->second.find(ParentName);
+ if (PerParentName == PerFile->second.end())
+ return false;
+ auto PerLine = PerParentName->second.find(LineNum);
+ if (PerLine == PerParentName->second.end())
+ return false;
+ auto PerColumn = PerLine->second.find(ColNum);
+ if (PerColumn == PerLine->second.end())
+ return false;
+ // Fail if this entry is already registered.
+ if (PerColumn->second.getAddress() || PerColumn->second.getID())
+ return false;
+ return true;
+}
+
+void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
+ const OffloadTargetRegionEntryInfoActTy &Action) {
+ // Scan all target region entries and perform the provided action.
+ for (auto &D : OffloadEntriesTargetRegion)
+ for (auto &F : D.second)
+ for (auto &P : F.second)
+ for (auto &L : P.second)
+ for (auto &C : L.second)
+ Action(D.first, F.first, P.first(), L.first, C.first, C.second);
+}
+
+/// \brief Create a Ctor/Dtor-like function whose body is emitted through
+/// \a Codegen. This is used to emit the two functions that register and
+/// unregister the descriptor of the current compilation unit.
+static llvm::Function *
+createOffloadingBinaryDescriptorFunction(CodeGenModule &CGM, StringRef Name,
+ const RegionCodeGenTy &Codegen) {
+ auto &C = CGM.getContext();
+ FunctionArgList Args;
+ ImplicitParamDecl DummyPtr(C, /*DC=*/nullptr, SourceLocation(),
+ /*Id=*/nullptr, C.VoidPtrTy);
+ Args.push_back(&DummyPtr);
+
+ CodeGenFunction CGF(CGM);
+ GlobalDecl();
+ auto &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+ C.VoidTy, Args, FunctionType::ExtInfo(),
+ /*isVariadic=*/false);
+ auto FTy = CGM.getTypes().GetFunctionType(FI);
+ auto *Fn =
+ CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, SourceLocation());
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FI, Args, SourceLocation());
+ Codegen(CGF);
+ CGF.FinishFunction();
+ return Fn;
+}
+
+llvm::Function *
+CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
+
+ // If we don't have entries or if we are emitting code for the device, we
+ // don't need to do anything.
+ if (CGM.getLangOpts().OpenMPIsDevice || OffloadEntriesInfoManager.empty())
+ return nullptr;
+
+ auto &M = CGM.getModule();
+ auto &C = CGM.getContext();
+
+ // Get list of devices we care about
+ auto &Devices = CGM.getLangOpts().OMPTargetTriples;
+
+ // We should be creating an offloading descriptor only if there are devices
+ // specified.
+ assert(!Devices.empty() && "No OpenMP offloading devices??");
+
+ // Create the external variables that will point to the begin and end of the
+ // host entries section. These will be defined by the linker.
+ auto *OffloadEntryTy =
+ CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy());
+ llvm::GlobalVariable *HostEntriesBegin = new llvm::GlobalVariable(
+ M, OffloadEntryTy, /*isConstant=*/true,
+ llvm::GlobalValue::ExternalLinkage, /*Initializer=*/0,
+ ".omp_offloading.entries_begin");
+ llvm::GlobalVariable *HostEntriesEnd = new llvm::GlobalVariable(
+ M, OffloadEntryTy, /*isConstant=*/true,
+ llvm::GlobalValue::ExternalLinkage, /*Initializer=*/0,
+ ".omp_offloading.entries_end");
+
+ // Create all device images
+ llvm::SmallVector<llvm::Constant *, 4> DeviceImagesEntires;
+ auto *DeviceImageTy = cast<llvm::StructType>(
+ CGM.getTypes().ConvertTypeForMem(getTgtDeviceImageQTy()));
+
+ for (unsigned i = 0; i < Devices.size(); ++i) {
+ StringRef T = Devices[i].getTriple();
+ auto *ImgBegin = new llvm::GlobalVariable(
+ M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
+ /*Initializer=*/0, Twine(".omp_offloading.img_start.") + Twine(T));
+ auto *ImgEnd = new llvm::GlobalVariable(
+ M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
+ /*Initializer=*/0, Twine(".omp_offloading.img_end.") + Twine(T));
+
+ llvm::Constant *Dev =
+ llvm::ConstantStruct::get(DeviceImageTy, ImgBegin, ImgEnd,
+ HostEntriesBegin, HostEntriesEnd, nullptr);
+ DeviceImagesEntires.push_back(Dev);
+ }
+
+ // Create device images global array.
+ llvm::ArrayType *DeviceImagesInitTy =
+ llvm::ArrayType::get(DeviceImageTy, DeviceImagesEntires.size());
+ llvm::Constant *DeviceImagesInit =
+ llvm::ConstantArray::get(DeviceImagesInitTy, DeviceImagesEntires);
+
+ llvm::GlobalVariable *DeviceImages = new llvm::GlobalVariable(
+ M, DeviceImagesInitTy, /*isConstant=*/true,
+ llvm::GlobalValue::InternalLinkage, DeviceImagesInit,
+ ".omp_offloading.device_images");
+ DeviceImages->setUnnamedAddr(true);
+
+ // This is a Zero array to be used in the creation of the constant expressions
+ llvm::Constant *Index[] = {llvm::Constant::getNullValue(CGM.Int32Ty),
+ llvm::Constant::getNullValue(CGM.Int32Ty)};
+
+ // Create the target region descriptor.
+ auto *BinaryDescriptorTy = cast<llvm::StructType>(
+ CGM.getTypes().ConvertTypeForMem(getTgtBinaryDescriptorQTy()));
+ llvm::Constant *TargetRegionsDescriptorInit = llvm::ConstantStruct::get(
+ BinaryDescriptorTy, llvm::ConstantInt::get(CGM.Int32Ty, Devices.size()),
+ llvm::ConstantExpr::getGetElementPtr(DeviceImagesInitTy, DeviceImages,
+ Index),
+ HostEntriesBegin, HostEntriesEnd, nullptr);
+
+ auto *Desc = new llvm::GlobalVariable(
+ M, BinaryDescriptorTy, /*isConstant=*/true,
+ llvm::GlobalValue::InternalLinkage, TargetRegionsDescriptorInit,
+ ".omp_offloading.descriptor");
+
+ // Emit code to register or unregister the descriptor at execution
+ // startup or closing, respectively.
+
+ // Create a variable to drive the registration and unregistration of the
+ // descriptor, so we can reuse the logic that emits Ctors and Dtors.
+ auto *IdentInfo = &C.Idents.get(".omp_offloading.reg_unreg_var");
+ ImplicitParamDecl RegUnregVar(C, C.getTranslationUnitDecl(), SourceLocation(),
+ IdentInfo, C.CharTy);
+
+ auto *UnRegFn = createOffloadingBinaryDescriptorFunction(
+ CGM, ".omp_offloading.descriptor_unreg", [&](CodeGenFunction &CGF) {
+ CGF.EmitCallOrInvoke(createRuntimeFunction(OMPRTL__tgt_unregister_lib),
+ Desc);
+ });
+ auto *RegFn = createOffloadingBinaryDescriptorFunction(
+ CGM, ".omp_offloading.descriptor_reg", [&](CodeGenFunction &CGF) {
+ CGF.EmitCallOrInvoke(createRuntimeFunction(OMPRTL__tgt_register_lib),
+ Desc);
+ CGM.getCXXABI().registerGlobalDtor(CGF, RegUnregVar, UnRegFn, Desc);
+ });
+ return RegFn;
+}
+
+void CGOpenMPRuntime::createOffloadEntry(llvm::Constant *Addr, StringRef Name,
+ uint64_t Size) {
+ auto *TgtOffloadEntryType = cast<llvm::StructType>(
+ CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy()));
+ llvm::LLVMContext &C = CGM.getModule().getContext();
+ llvm::Module &M = CGM.getModule();
+
+ // Make sure the address has the right type.
+ llvm::Constant *AddrPtr = llvm::ConstantExpr::getBitCast(Addr, CGM.VoidPtrTy);
+
+ // Create constant string with the name.
+ llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
+
+ llvm::GlobalVariable *Str =
+ new llvm::GlobalVariable(M, StrPtrInit->getType(), /*isConstant=*/true,
+ llvm::GlobalValue::InternalLinkage, StrPtrInit,
+ ".omp_offloading.entry_name");
+ Str->setUnnamedAddr(true);
+ llvm::Constant *StrPtr = llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy);
+
+ // Create the entry struct.
+ llvm::Constant *EntryInit = llvm::ConstantStruct::get(
+ TgtOffloadEntryType, AddrPtr, StrPtr,
+ llvm::ConstantInt::get(CGM.SizeTy, Size), nullptr);
+ llvm::GlobalVariable *Entry = new llvm::GlobalVariable(
+ M, TgtOffloadEntryType, true, llvm::GlobalValue::ExternalLinkage,
+ EntryInit, ".omp_offloading.entry");
+
+ // The entry has to be created in the section the linker expects it to be.
+ Entry->setSection(".omp_offloading.entries");
+ // We can't have any padding between symbols, so we need to have 1-byte
+ // alignment.
+ Entry->setAlignment(1);
+ return;
+}
+
+void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
+ // Emit the offloading entries and metadata so that the device codegen side
+ // can
+ // easily figure out what to emit. The produced metadata looks like this:
+ //
+ // !omp_offload.info = !{!1, ...}
+ //
+ // Right now we only generate metadata for function that contain target
+ // regions.
+
+ // If we do not have entries, we dont need to do anything.
+ if (OffloadEntriesInfoManager.empty())
+ return;
+
+ llvm::Module &M = CGM.getModule();
+ llvm::LLVMContext &C = M.getContext();
+ SmallVector<OffloadEntriesInfoManagerTy::OffloadEntryInfo *, 16>
+ OrderedEntries(OffloadEntriesInfoManager.size());
+
+ // Create the offloading info metadata node.
+ llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
+
+ // Auxiliar methods to create metadata values and strings.
+ auto getMDInt = [&](unsigned v) {
+ return llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(C), v));
+ };
+
+ auto getMDString = [&](StringRef v) { return llvm::MDString::get(C, v); };
+
+ // Create function that emits metadata for each target region entry;
+ auto &&TargetRegionMetadataEmitter = [&](
+ unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned Line,
+ unsigned Column,
+ OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
+ llvm::SmallVector<llvm::Metadata *, 32> Ops;
+ // Generate metadata for target regions. Each entry of this metadata
+ // contains:
+ // - Entry 0 -> Kind of this type of metadata (0).
+ // - Entry 1 -> Device ID of the file where the entry was identified.
+ // - Entry 2 -> File ID of the file where the entry was identified.
+ // - Entry 3 -> Mangled name of the function where the entry was identified.
+ // - Entry 4 -> Line in the file where the entry was identified.
+ // - Entry 5 -> Column in the file where the entry was identified.
+ // - Entry 6 -> Order the entry was created.
+ // The first element of the metadata node is the kind.
+ Ops.push_back(getMDInt(E.getKind()));
+ Ops.push_back(getMDInt(DeviceID));
+ Ops.push_back(getMDInt(FileID));
+ Ops.push_back(getMDString(ParentName));
+ Ops.push_back(getMDInt(Line));
+ Ops.push_back(getMDInt(Column));
+ Ops.push_back(getMDInt(E.getOrder()));
+
+ // Save this entry in the right position of the ordered entries array.
+ OrderedEntries[E.getOrder()] = &E;
+
+ // Add metadata to the named metadata node.
+ MD->addOperand(llvm::MDNode::get(C, Ops));
+ };
+
+ OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo(
+ TargetRegionMetadataEmitter);
+
+ for (auto *E : OrderedEntries) {
+ assert(E && "All ordered entries must exist!");
+ if (auto *CE =
+ dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
+ E)) {
+ assert(CE->getID() && CE->getAddress() &&
+ "Entry ID and Addr are invalid!");
+ createOffloadEntry(CE->getID(), CE->getAddress()->getName(), /*Size=*/0);
+ } else
+ llvm_unreachable("Unsupported entry kind.");
+ }
+}
+
+/// \brief Loads all the offload entries information from the host IR
+/// metadata.
+void CGOpenMPRuntime::loadOffloadInfoMetadata() {
+ // If we are in target mode, load the metadata from the host IR. This code has
+ // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
+
+ if (!CGM.getLangOpts().OpenMPIsDevice)
+ return;
+
+ if (CGM.getLangOpts().OMPHostIRFile.empty())
+ return;
+
+ auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
+ if (Buf.getError())
+ return;
+
+ llvm::LLVMContext C;
+ auto ME = llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C);
+
+ if (ME.getError())
+ return;
+
+ llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
+ if (!MD)
+ return;
+
+ for (auto I : MD->operands()) {
+ llvm::MDNode *MN = cast<llvm::MDNode>(I);
+
+ auto getMDInt = [&](unsigned Idx) {
+ llvm::ConstantAsMetadata *V =
+ cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
+ return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
+ };
+
+ auto getMDString = [&](unsigned Idx) {
+ llvm::MDString *V = cast<llvm::MDString>(MN->getOperand(Idx));
+ return V->getString();
+ };
+
+ switch (getMDInt(0)) {
+ default:
+ llvm_unreachable("Unexpected metadata!");
+ break;
+ case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
+ OFFLOAD_ENTRY_INFO_TARGET_REGION:
+ OffloadEntriesInfoManager.initializeTargetRegionEntryInfo(
+ /*DeviceID=*/getMDInt(1), /*FileID=*/getMDInt(2),
+ /*ParentName=*/getMDString(3), /*Line=*/getMDInt(4),
+ /*Column=*/getMDInt(5), /*Order=*/getMDInt(6));
+ break;
+ }
+ }
+}
+
void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
if (!KmpRoutineEntryPtrTy) {
// Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
@@ -1992,6 +2399,80 @@ static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
return Field;
}
+QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
+
+ // Make sure the type of the entry is already created. This is the type we
+ // have to create:
+ // struct __tgt_offload_entry{
+ // void *addr; // Pointer to the offload entry info.
+ // // (function or global)
+ // char *name; // Name of the function or global.
+ // size_t size; // Size of the entry info (0 if it a function).
+ // };
+ if (TgtOffloadEntryQTy.isNull()) {
+ ASTContext &C = CGM.getContext();
+ auto *RD = C.buildImplicitRecord("__tgt_offload_entry");
+ RD->startDefinition();
+ addFieldToRecordDecl(C, RD, C.VoidPtrTy);
+ addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy));
+ addFieldToRecordDecl(C, RD, C.getSizeType());
+ RD->completeDefinition();
+ TgtOffloadEntryQTy = C.getRecordType(RD);
+ }
+ return TgtOffloadEntryQTy;
+}
+
+QualType CGOpenMPRuntime::getTgtDeviceImageQTy() {
+ // These are the types we need to build:
+ // struct __tgt_device_image{
+ // void *ImageStart; // Pointer to the target code start.
+ // void *ImageEnd; // Pointer to the target code end.
+ // // We also add the host entries to the device image, as it may be useful
+ // // for the target runtime to have access to that information.
+ // __tgt_offload_entry *EntriesBegin; // Begin of the table with all
+ // // the entries.
+ // __tgt_offload_entry *EntriesEnd; // End of the table with all the
+ // // entries (non inclusive).
+ // };
+ if (TgtDeviceImageQTy.isNull()) {
+ ASTContext &C = CGM.getContext();
+ auto *RD = C.buildImplicitRecord("__tgt_device_image");
+ RD->startDefinition();
+ addFieldToRecordDecl(C, RD, C.VoidPtrTy);
+ addFieldToRecordDecl(C, RD, C.VoidPtrTy);
+ addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
+ addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
+ RD->completeDefinition();
+ TgtDeviceImageQTy = C.getRecordType(RD);
+ }
+ return TgtDeviceImageQTy;
+}
+
+QualType CGOpenMPRuntime::getTgtBinaryDescriptorQTy() {
+ // struct __tgt_bin_desc{
+ // int32_t NumDevices; // Number of devices supported.
+ // __tgt_device_image *DeviceImages; // Arrays of device images
+ // // (one per device).
+ // __tgt_offload_entry *EntriesBegin; // Begin of the table with all the
+ // // entries.
+ // __tgt_offload_entry *EntriesEnd; // End of the table with all the
+ // // entries (non inclusive).
+ // };
+ if (TgtBinaryDescriptorQTy.isNull()) {
+ ASTContext &C = CGM.getContext();
+ auto *RD = C.buildImplicitRecord("__tgt_bin_desc");
+ RD->startDefinition();
+ addFieldToRecordDecl(
+ C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
+ addFieldToRecordDecl(C, RD, C.getPointerType(getTgtDeviceImageQTy()));
+ addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
+ addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
+ RD->completeDefinition();
+ TgtBinaryDescriptorQTy = C.getRecordType(RD);
+ }
+ return TgtBinaryDescriptorQTy;
+}
+
namespace {
struct PrivateHelpersTy {
PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy,
@@ -3238,20 +3719,115 @@ void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
}
}
-llvm::Value *
-CGOpenMPRuntime::emitTargetOutlinedFunction(const OMPExecutableDirective &D,
- const RegionCodeGenTy &CodeGen) {
+/// \brief Obtain information that uniquely identifies a target entry. This
+/// consists of the file and device IDs as well as line and column numbers
+/// associated with the relevant entry source location.
+static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
+ unsigned &DeviceID, unsigned &FileID,
+ unsigned &LineNum, unsigned &ColumnNum) {
+
+ auto &SM = C.getSourceManager();
+
+ // The loc should be always valid and have a file ID (the user cannot use
+ // #pragma directives in macros)
+
+ assert(Loc.isValid() && "Source location is expected to be always valid.");
+ assert(Loc.isFileID() && "Source location is expected to refer to a file.");
+
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ assert(PLoc.isValid() && "Source location is expected to be always valid.");
+
+ llvm::sys::fs::UniqueID ID;
+ if (llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
+ llvm_unreachable("Source file with target region no longer exists!");
+
+ DeviceID = ID.getDevice();
+ FileID = ID.getFile();
+ LineNum = PLoc.getLine();
+ ColumnNum = PLoc.getColumn();
+ return;
+}
+
+void CGOpenMPRuntime::emitTargetOutlinedFunction(
+ const OMPExecutableDirective &D, StringRef ParentName,
+ llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
+ bool IsOffloadEntry) {
+
+ assert(!ParentName.empty() && "Invalid target region parent name!");
+
const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
+ // Emit target region as a standalone region.
+ auto &&CodeGen = [&CS](CodeGenFunction &CGF) {
+ CGF.EmitStmt(CS.getCapturedStmt());
+ };
+
+ // Create a unique name for the proxy/entry function that using the source
+ // location information of the current target region. The name will be
+ // something like:
+ //
+ // .omp_offloading.DD_FFFF.PP.lBB.cCC
+ //
+ // where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
+ // mangled name of the function that encloses the target region, BB is the
+ // line number of the target region, and CC is the column number of the target
+ // region.
+
+ unsigned DeviceID;
+ unsigned FileID;
+ unsigned Line;
+ unsigned Column;
+ getTargetEntryUniqueInfo(CGM.getContext(), D.getLocStart(), DeviceID, FileID,
+ Line, Column);
+ SmallString<64> EntryFnName;
+ {
+ llvm::raw_svector_ostream OS(EntryFnName);
+ OS << ".omp_offloading" << llvm::format(".%x", DeviceID)
+ << llvm::format(".%x.", FileID) << ParentName << ".l" << Line << ".c"
+ << Column;
+ }
+
CodeGenFunction CGF(CGM, true);
- CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen);
+ CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- return CGF.GenerateOpenMPCapturedStmtFunction(CS);
+
+ OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS);
+
+ // If this target outline function is not an offload entry, we don't need to
+ // register it.
+ if (!IsOffloadEntry)
+ return;
+
+ // The target region ID is used by the runtime library to identify the current
+ // target region, so it only has to be unique and not necessarily point to
+ // anything. It could be the pointer to the outlined function that implements
+ // the target region, but we aren't using that so that the compiler doesn't
+ // need to keep that, and could therefore inline the host function if proven
+ // worthwhile during optimization. In the other hand, if emitting code for the
+ // device, the ID has to be the function address so that it can retrieved from
+ // the offloading entry and launched by the runtime library. We also mark the
+ // outlined function to have external linkage in case we are emitting code for
+ // the device, because these functions will be entry points to the device.
+
+ if (CGM.getLangOpts().OpenMPIsDevice) {
+ OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy);
+ OutlinedFn->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else
+ OutlinedFnID = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
+ llvm::GlobalValue::PrivateLinkage,
+ llvm::Constant::getNullValue(CGM.Int8Ty), ".omp_offload.region_id");
+
+ // Register the information for the entry associated with this target region.
+ OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
+ DeviceID, FileID, ParentName, Line, Column, OutlinedFn, OutlinedFnID);
+ return;
}
void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
llvm::Value *OutlinedFn,
+ llvm::Value *OutlinedFnID,
const Expr *IfCond, const Expr *Device,
ArrayRef<llvm::Value *> CapturedVars) {
if (!CGF.HaveInsertPoint())
@@ -3275,6 +3851,8 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
OMP_DEVICEID_UNDEF = -1,
};
+ assert(OutlinedFn && "Invalid outlined function!");
+
auto &Ctx = CGF.getContext();
// Fill up the arrays with the all the captured variables.
@@ -3373,7 +3951,7 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
// Fill up the pointer arrays and transfer execution to the device.
auto &&ThenGen = [this, &Ctx, &BasePointers, &Pointers, &Sizes, &MapTypes,
- hasVLACaptures, Device, OffloadError,
+ hasVLACaptures, Device, OutlinedFnID, OffloadError,
OffloadErrorQType](CodeGenFunction &CGF) {
unsigned PointerNumVal = BasePointers.size();
llvm::Value *PointerNum = CGF.Builder.getInt32(PointerNumVal);
@@ -3504,10 +4082,8 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
// compiler doesn't need to keep that, and could therefore inline the host
// function if proven worthwhile during optimization.
- llvm::Value *HostPtr = new llvm::GlobalVariable(
- CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
- llvm::GlobalValue::PrivateLinkage,
- llvm::Constant::getNullValue(CGM.Int8Ty), ".offload_hstptr");
+ // From this point on, we need to have an ID of the target region defined.
+ assert(OutlinedFnID && "Invalid outlined function ID!");
// Emit device ID if any.
llvm::Value *DeviceID;
@@ -3518,25 +4094,35 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
DeviceID = CGF.Builder.getInt32(OMP_DEVICEID_UNDEF);
llvm::Value *OffloadingArgs[] = {
- DeviceID, HostPtr, PointerNum, BasePointersArray,
- PointersArray, SizesArray, MapTypesArray};
+ DeviceID, OutlinedFnID, PointerNum, BasePointersArray,
+ PointersArray, SizesArray, MapTypesArray};
auto Return = CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target),
OffloadingArgs);
CGF.EmitStoreOfScalar(Return, OffloadError);
};
- if (IfCond) {
- // Notify that the host version must be executed.
- auto &&ElseGen = [this, OffloadError,
- OffloadErrorQType](CodeGenFunction &CGF) {
- CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/-1u),
- OffloadError);
- };
- emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
+ // Notify that the host version must be executed.
+ auto &&ElseGen = [this, OffloadError,
+ OffloadErrorQType](CodeGenFunction &CGF) {
+ CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/-1u),
+ OffloadError);
+ };
+
+ // If we have a target function ID it means that we need to support
+ // offloading, otherwise, just execute on the host. We need to execute on host
+ // regardless of the conditional in the if clause if, e.g., the user do not
+ // specify target triples.
+ if (OutlinedFnID) {
+ if (IfCond) {
+ emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
+ } else {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ ThenGen(CGF);
+ }
} else {
CodeGenFunction::RunCleanupsScope Scope(CGF);
- ThenGen(CGF);
+ ElseGen(CGF);
}
// Check the error code and execute the host version if required.
@@ -3553,3 +4139,120 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
return;
}
+
+void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
+ StringRef ParentName) {
+ if (!S)
+ return;
+
+ // If we find a OMP target directive, codegen the outline function and
+ // register the result.
+ // FIXME: Add other directives with target when they become supported.
+ bool isTargetDirective = isa<OMPTargetDirective>(S);
+
+ if (isTargetDirective) {
+ auto *E = cast<OMPExecutableDirective>(S);
+ unsigned DeviceID;
+ unsigned FileID;
+ unsigned Line;
+ unsigned Column;
+ getTargetEntryUniqueInfo(CGM.getContext(), E->getLocStart(), DeviceID,
+ FileID, Line, Column);
+
+ // Is this a target region that should not be emitted as an entry point? If
+ // so just signal we are done with this target region.
+ if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(
+ DeviceID, FileID, ParentName, Line, Column))
+ return;
+
+ llvm::Function *Fn;
+ llvm::Constant *Addr;
+ emitTargetOutlinedFunction(*E, ParentName, Fn, Addr,
+ /*isOffloadEntry=*/true);
+ assert(Fn && Addr && "Target region emission failed.");
+ return;
+ }
+
+ if (const OMPExecutableDirective *E = dyn_cast<OMPExecutableDirective>(S)) {
+ if (!E->getAssociatedStmt())
+ return;
+
+ scanForTargetRegionsFunctions(
+ cast<CapturedStmt>(E->getAssociatedStmt())->getCapturedStmt(),
+ ParentName);
+ return;
+ }
+
+ // If this is a lambda function, look into its body.
+ if (auto *L = dyn_cast<LambdaExpr>(S))
+ S = L->getBody();
+
+ // Keep looking for target regions recursively.
+ for (auto *II : S->children())
+ scanForTargetRegionsFunctions(II, ParentName);
+
+ return;
+}
+
+bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
+ auto &FD = *cast<FunctionDecl>(GD.getDecl());
+
+ // If emitting code for the host, we do not process FD here. Instead we do
+ // the normal code generation.
+ if (!CGM.getLangOpts().OpenMPIsDevice)
+ return false;
+
+ // Try to detect target regions in the function.
+ scanForTargetRegionsFunctions(FD.getBody(), CGM.getMangledName(GD));
+
+ // We should not emit any function othen that the ones created during the
+ // scanning. Therefore, we signal that this function is completely dealt
+ // with.
+ return true;
+}
+
+bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
+ if (!CGM.getLangOpts().OpenMPIsDevice)
+ return false;
+
+ // Check if there are Ctors/Dtors in this declaration and look for target
+ // regions in it. We use the complete variant to produce the kernel name
+ // mangling.
+ QualType RDTy = cast<VarDecl>(GD.getDecl())->getType();
+ if (auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
+ for (auto *Ctor : RD->ctors()) {
+ StringRef ParentName =
+ CGM.getMangledName(GlobalDecl(Ctor, Ctor_Complete));
+ scanForTargetRegionsFunctions(Ctor->getBody(), ParentName);
+ }
+ auto *Dtor = RD->getDestructor();
+ if (Dtor) {
+ StringRef ParentName =
+ CGM.getMangledName(GlobalDecl(Dtor, Dtor_Complete));
+ scanForTargetRegionsFunctions(Dtor->getBody(), ParentName);
+ }
+ }
+
+ // If we are in target mode we do not emit any global (declare target is not
+ // implemented yet). Therefore we signal that GD was processed in this case.
+ return true;
+}
+
+bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
+ auto *VD = GD.getDecl();
+ if (isa<FunctionDecl>(VD))
+ return emitTargetFunctions(GD);
+
+ return emitTargetGlobalVariable(GD);
+}
+
+llvm::Function *CGOpenMPRuntime::emitRegistrationFunction() {
+ // If we have offloading in the current module, we need to emit the entries
+ // now and register the offloading descriptor.
+ createOffloadEntriesAndInfoMetadata();
+
+ // Create and register the offloading binary descriptors. This is the main
+ // entity that captures all the information about offloading in the current
+ // compilation unit.
+ return createOffloadingBinaryDescriptorRegistration();
+}
diff --git a/lib/CodeGen/CGOpenMPRuntime.h b/lib/CodeGen/CGOpenMPRuntime.h
index 992f9a8805e2..6b04fbeb09b5 100644
--- a/lib/CodeGen/CGOpenMPRuntime.h
+++ b/lib/CodeGen/CGOpenMPRuntime.h
@@ -35,6 +35,7 @@ class Value;
namespace clang {
class Expr;
+class GlobalDecl;
class OMPExecutableDirective;
class VarDecl;
@@ -165,6 +166,10 @@ private:
// arg_num, void** args_base, void **args, size_t *arg_sizes, int32_t
// *arg_types);
OMPRTL__tgt_target,
+ // Call to void __tgt_register_lib(__tgt_bin_desc *desc);
+ OMPRTL__tgt_register_lib,
+ // Call to void __tgt_unregister_lib(__tgt_bin_desc *desc);
+ OMPRTL__tgt_unregister_lib,
};
/// \brief Values for bit flags used in the ident_t to describe the fields.
@@ -288,7 +293,181 @@ private:
/// } flags;
/// } kmp_depend_info_t;
QualType KmpDependInfoTy;
+ /// \brief Type struct __tgt_offload_entry{
+ /// void *addr; // Pointer to the offload entry info.
+ /// // (function or global)
+ /// char *name; // Name of the function or global.
+ /// size_t size; // Size of the entry info (0 if it a function).
+ /// };
+ QualType TgtOffloadEntryQTy;
+ /// struct __tgt_device_image{
+ /// void *ImageStart; // Pointer to the target code start.
+ /// void *ImageEnd; // Pointer to the target code end.
+ /// // We also add the host entries to the device image, as it may be useful
+ /// // for the target runtime to have access to that information.
+ /// __tgt_offload_entry *EntriesBegin; // Begin of the table with all
+ /// // the entries.
+ /// __tgt_offload_entry *EntriesEnd; // End of the table with all the
+ /// // entries (non inclusive).
+ /// };
+ QualType TgtDeviceImageQTy;
+ /// struct __tgt_bin_desc{
+ /// int32_t NumDevices; // Number of devices supported.
+ /// __tgt_device_image *DeviceImages; // Arrays of device images
+ /// // (one per device).
+ /// __tgt_offload_entry *EntriesBegin; // Begin of the table with all the
+ /// // entries.
+ /// __tgt_offload_entry *EntriesEnd; // End of the table with all the
+ /// // entries (non inclusive).
+ /// };
+ QualType TgtBinaryDescriptorQTy;
+ /// \brief Entity that registers the offloading constants that were emitted so
+ /// far.
+ class OffloadEntriesInfoManagerTy {
+ CodeGenModule &CGM;
+
+ /// \brief Number of entries registered so far.
+ unsigned OffloadingEntriesNum;
+
+ public:
+ /// \brief Base class of the entries info.
+ class OffloadEntryInfo {
+ public:
+ /// \brief Kind of a given entry. Currently, only target regions are
+ /// supported.
+ enum OffloadingEntryInfoKinds {
+ // Entry is a target region.
+ OFFLOAD_ENTRY_INFO_TARGET_REGION = 0,
+ // Invalid entry info.
+ OFFLOAD_ENTRY_INFO_INVALID = ~0u
+ };
+
+ OffloadEntryInfo() : Order(~0u), Kind(OFFLOAD_ENTRY_INFO_INVALID) {}
+ explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order)
+ : Order(Order), Kind(Kind) {}
+
+ bool isValid() const { return Order != ~0u; }
+ unsigned getOrder() const { return Order; }
+ OffloadingEntryInfoKinds getKind() const { return Kind; }
+ static bool classof(const OffloadEntryInfo *Info) { return true; }
+
+ protected:
+ // \brief Order this entry was emitted.
+ unsigned Order;
+
+ OffloadingEntryInfoKinds Kind;
+ };
+
+ /// \brief Return true if a there are no entries defined.
+ bool empty() const;
+ /// \brief Return number of entries defined so far.
+ unsigned size() const { return OffloadingEntriesNum; }
+ OffloadEntriesInfoManagerTy(CodeGenModule &CGM)
+ : CGM(CGM), OffloadingEntriesNum(0) {}
+
+ ///
+ /// Target region entries related.
+ ///
+ /// \brief Target region entries info.
+ class OffloadEntryInfoTargetRegion : public OffloadEntryInfo {
+ // \brief Address of the entity that has to be mapped for offloading.
+ llvm::Constant *Addr;
+ // \brief Address that can be used as the ID of the entry.
+ llvm::Constant *ID;
+
+ public:
+ OffloadEntryInfoTargetRegion()
+ : OffloadEntryInfo(OFFLOAD_ENTRY_INFO_TARGET_REGION, ~0u),
+ Addr(nullptr), ID(nullptr) {}
+ explicit OffloadEntryInfoTargetRegion(unsigned Order,
+ llvm::Constant *Addr,
+ llvm::Constant *ID)
+ : OffloadEntryInfo(OFFLOAD_ENTRY_INFO_TARGET_REGION, Order),
+ Addr(Addr), ID(ID) {}
+
+ llvm::Constant *getAddress() const { return Addr; }
+ llvm::Constant *getID() const { return ID; }
+ void setAddress(llvm::Constant *V) {
+ assert(!Addr && "Address as been set before!");
+ Addr = V;
+ }
+ void setID(llvm::Constant *V) {
+ assert(!ID && "ID as been set before!");
+ ID = V;
+ }
+ static bool classof(const OffloadEntryInfo *Info) {
+ return Info->getKind() == OFFLOAD_ENTRY_INFO_TARGET_REGION;
+ }
+ };
+ /// \brief Initialize target region entry.
+ void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
+ StringRef ParentName, unsigned LineNum,
+ unsigned ColNum, unsigned Order);
+ /// \brief Register target region entry.
+ void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
+ StringRef ParentName, unsigned LineNum,
+ unsigned ColNum, llvm::Constant *Addr,
+ llvm::Constant *ID);
+ /// \brief Return true if a target region entry with the provided
+ /// information exists.
+ bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
+ StringRef ParentName, unsigned LineNum,
+ unsigned ColNum) const;
+ /// brief Applies action \a Action on all registered entries.
+ typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
+ unsigned, OffloadEntryInfoTargetRegion &)>
+ OffloadTargetRegionEntryInfoActTy;
+ void actOnTargetRegionEntriesInfo(
+ const OffloadTargetRegionEntryInfoActTy &Action);
+
+ private:
+ // Storage for target region entries kind. The storage is to be indexed by
+ // file ID, device ID, parent function name, lane number, and column number.
+ typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion>
+ OffloadEntriesTargetRegionPerColumn;
+ typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerColumn>
+ OffloadEntriesTargetRegionPerLine;
+ typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine>
+ OffloadEntriesTargetRegionPerParentName;
+ typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName>
+ OffloadEntriesTargetRegionPerFile;
+ typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile>
+ OffloadEntriesTargetRegionPerDevice;
+ typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
+ OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
+ };
+ OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
+
+ /// \brief Creates and registers offloading binary descriptor for the current
+ /// compilation unit. The function that does the registration is returned.
+ llvm::Function *createOffloadingBinaryDescriptorRegistration();
+
+ /// \brief Creates offloading entry for the provided address \a Addr,
+ /// name \a Name and size \a Size.
+ void createOffloadEntry(llvm::Constant *Addr, StringRef Name, uint64_t Size);
+
+ /// \brief Creates all the offload entries in the current compilation unit
+ /// along with the associated metadata.
+ void createOffloadEntriesAndInfoMetadata();
+
+ /// \brief Loads all the offload entries information from the host IR
+ /// metadata.
+ void loadOffloadInfoMetadata();
+ /// \brief Returns __tgt_offload_entry type.
+ QualType getTgtOffloadEntryQTy();
+
+ /// \brief Returns __tgt_device_image type.
+ QualType getTgtDeviceImageQTy();
+
+ /// \brief Returns __tgt_bin_desc type.
+ QualType getTgtBinaryDescriptorQTy();
+
+ /// \brief Start scanning from statement \a S and and emit all target regions
+ /// found along the way.
+ /// \param S Starting statement.
+ /// \param ParentName Name of the function declaration that is being scanned.
+ void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName);
/// \brief Build type kmp_routine_entry_t (if not built yet).
void emitKmpRoutineEntryT(QualType KmpInt32Ty);
@@ -743,16 +922,24 @@ public:
/// \brief Emit outilined function for 'target' directive.
/// \param D Directive to emit.
- /// \param CodeGen Code generation sequence for the \a D directive.
- virtual llvm::Value *
- emitTargetOutlinedFunction(const OMPExecutableDirective &D,
- const RegionCodeGenTy &CodeGen);
+ /// \param ParentName Name of the function that encloses the target region.
+ /// \param OutlinedFn Outlined function value to be defined by this call.
+ /// \param OutlinedFnID Outlined function ID value to be defined by this call.
+ /// \param IsOffloadEntry True if the outlined function is an offload entry.
+ /// An oulined function may not be an entry if, e.g. the if clause always
+ /// evaluates to false.
+ virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
+ StringRef ParentName,
+ llvm::Function *&OutlinedFn,
+ llvm::Constant *&OutlinedFnID,
+ bool IsOffloadEntry);
/// \brief Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
+ /// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
@@ -760,9 +947,31 @@ public:
/// \param CapturedVars Values captured in the current region.
virtual void emitTargetCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
- llvm::Value *OutlinedFn, const Expr *IfCond,
+ llvm::Value *OutlinedFn,
+ llvm::Value *OutlinedFnID, const Expr *IfCond,
const Expr *Device,
ArrayRef<llvm::Value *> CapturedVars);
+
+ /// \brief Emit the target regions enclosed in \a GD function definition or
+ /// the function itself in case it is a valid device function. Returns true if
+ /// \a GD was dealt with successfully.
+ /// \param FD Function to scan.
+ virtual bool emitTargetFunctions(GlobalDecl GD);
+
+ /// \brief Emit the global variable if it is a valid device global variable.
+ /// Returns true if \a GD was dealt with successfully.
+ /// \param GD Variable declaration to emit.
+ virtual bool emitTargetGlobalVariable(GlobalDecl GD);
+
+ /// \brief Emit the global \a GD if it is meaningful for the target. Returns
+ /// if it was emitted succesfully.
+ /// \param GD Global to scan.
+ virtual bool emitTargetGlobal(GlobalDecl GD);
+
+ /// \brief Creates the offloading descriptor in the event any target region
+ /// was emitted in the current module and return the function that registers
+ /// it.
+ virtual llvm::Function *emitRegistrationFunction();
};
} // namespace CodeGen
diff --git a/lib/CodeGen/CGStmtOpenMP.cpp b/lib/CodeGen/CGStmtOpenMP.cpp
index bcd2ac51929d..14917c20c535 100644
--- a/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/lib/CodeGen/CGStmtOpenMP.cpp
@@ -1125,7 +1125,8 @@ emitPrivateLinearVars(CodeGenFunction &CGF, const OMPExecutableDirective &D,
}
static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
- const OMPExecutableDirective &D) {
+ const OMPExecutableDirective &D,
+ bool IsMonotonic) {
if (!CGF.HaveInsertPoint())
return;
if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
@@ -1136,7 +1137,8 @@ static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
// In presence of finite 'safelen', it may be unsafe to mark all
// the memory instructions parallel, because loop-carried
// dependences of 'safelen' iterations are possible.
- CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
+ if (!IsMonotonic)
+ CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
} else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
/*ignoreResult=*/true);
@@ -1149,11 +1151,12 @@ static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
}
}
-void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D) {
+void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
+ bool IsMonotonic) {
// Walk clauses and process safelen/lastprivate.
- LoopStack.setParallel();
+ LoopStack.setParallel(!IsMonotonic);
LoopStack.setVectorizeEnable(true);
- emitSimdlenSafelenClause(*this, D);
+ emitSimdlenSafelenClause(*this, D, IsMonotonic);
}
void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &D) {
@@ -1255,12 +1258,10 @@ void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
}
-void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
- const OMPLoopDirective &S,
- OMPPrivateScope &LoopScope,
- bool Ordered, Address LB,
- Address UB, Address ST,
- Address IL, llvm::Value *Chunk) {
+void CodeGenFunction::EmitOMPForOuterLoop(
+ OpenMPScheduleClauseKind ScheduleKind, bool IsMonotonic,
+ const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
+ Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
auto &RT = CGM.getOpenMPRuntime();
// Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
@@ -1378,13 +1379,10 @@ void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
// Generate !llvm.loop.parallel metadata for loads and stores for loops
// with dynamic/guided scheduling and without ordered clause.
- if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
- LoopStack.setParallel((ScheduleKind == OMPC_SCHEDULE_dynamic ||
- ScheduleKind == OMPC_SCHEDULE_guided) &&
- !Ordered);
- } else {
- EmitOMPSimdInit(S);
- }
+ if (!isOpenMPSimdDirective(S.getDirectiveKind()))
+ LoopStack.setParallel(!IsMonotonic);
+ else
+ EmitOMPSimdInit(S, IsMonotonic);
SourceLocation Loc = S.getLocStart();
EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
@@ -1425,14 +1423,30 @@ static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
return CGF.EmitLValue(Helper);
}
-static std::pair<llvm::Value * /*Chunk*/, OpenMPScheduleClauseKind>
+namespace {
+ struct ScheduleKindModifiersTy {
+ OpenMPScheduleClauseKind Kind;
+ OpenMPScheduleClauseModifier M1;
+ OpenMPScheduleClauseModifier M2;
+ ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
+ OpenMPScheduleClauseModifier M1,
+ OpenMPScheduleClauseModifier M2)
+ : Kind(Kind), M1(M1), M2(M2) {}
+ };
+} // namespace
+
+static std::pair<llvm::Value * /*Chunk*/, ScheduleKindModifiersTy>
emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S,
bool OuterRegion) {
// Detect the loop schedule kind and chunk.
auto ScheduleKind = OMPC_SCHEDULE_unknown;
+ OpenMPScheduleClauseModifier M1 = OMPC_SCHEDULE_MODIFIER_unknown;
+ OpenMPScheduleClauseModifier M2 = OMPC_SCHEDULE_MODIFIER_unknown;
llvm::Value *Chunk = nullptr;
if (const auto *C = S.getSingleClause<OMPScheduleClause>()) {
ScheduleKind = C->getScheduleKind();
+ M1 = C->getFirstScheduleModifier();
+ M2 = C->getSecondScheduleModifier();
if (const auto *Ch = C->getChunkSize()) {
if (auto *ImpRef = cast_or_null<DeclRefExpr>(C->getHelperChunkSize())) {
if (OuterRegion) {
@@ -1454,7 +1468,7 @@ emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S,
}
}
}
- return std::make_pair(Chunk, ScheduleKind);
+ return std::make_pair(Chunk, ScheduleKindModifiersTy(ScheduleKind, M1, M2));
}
bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
@@ -1530,16 +1544,21 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
auto ScheduleInfo =
emitScheduleClause(*this, S, /*OuterRegion=*/false);
Chunk = ScheduleInfo.first;
- ScheduleKind = ScheduleInfo.second;
+ ScheduleKind = ScheduleInfo.second.Kind;
+ const OpenMPScheduleClauseModifier M1 = ScheduleInfo.second.M1;
+ const OpenMPScheduleClauseModifier M2 = ScheduleInfo.second.M2;
const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
const bool Ordered = S.getSingleClause<OMPOrderedClause>() != nullptr;
+ // OpenMP 4.5, 2.7.1 Loop Construct, Description.
+ // If the static schedule kind is specified or if the ordered clause is
+ // specified, and if no monotonic modifier is specified, the effect will
+ // be as if the monotonic modifier was specified.
if (RT.isStaticNonchunked(ScheduleKind,
/* Chunked */ Chunk != nullptr) &&
!Ordered) {
- if (isOpenMPSimdDirective(S.getDirectiveKind())) {
- EmitOMPSimdInit(S);
- }
+ if (isOpenMPSimdDirective(S.getDirectiveKind()))
+ EmitOMPSimdInit(S, /*IsMonotonic=*/true);
// OpenMP [2.7.1, Loop Construct, Description, table 2-1]
// When no chunk_size is specified, the iteration space is divided into
// chunks that are approximately equal in size, and at most one chunk is
@@ -1549,7 +1568,8 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
IVSize, IVSigned, Ordered,
IL.getAddress(), LB.getAddress(),
UB.getAddress(), ST.getAddress());
- auto LoopExit = getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
+ auto LoopExit =
+ getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
// UB = min(UB, GlobalUB);
EmitIgnoredExpr(S.getEnsureUpperBound());
// IV = LB;
@@ -1566,9 +1586,14 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
// Tell the runtime we are done.
RT.emitForStaticFinish(*this, S.getLocStart());
} else {
+ const bool IsMonotonic = Ordered ||
+ ScheduleKind == OMPC_SCHEDULE_static ||
+ ScheduleKind == OMPC_SCHEDULE_unknown ||
+ M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
+ M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
// Emit the outer loop, which requests its work chunk [LB..UB] from
// runtime and runs the inner loop to process it.
- EmitOMPForOuterLoop(ScheduleKind, S, LoopScope, Ordered,
+ EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
LB.getAddress(), UB.getAddress(), ST.getAddress(),
IL.getAddress(), Chunk);
}
@@ -2546,14 +2571,8 @@ void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
llvm::SmallVector<llvm::Value *, 16> CapturedVars;
GenerateOpenMPCapturedVars(CS, CapturedVars);
- // Emit target region as a standalone region.
- auto &&CodeGen = [&CS](CodeGenFunction &CGF) {
- CGF.EmitStmt(CS.getCapturedStmt());
- };
-
- // Obtain the target region outlined function.
- llvm::Value *Fn =
- CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, CodeGen);
+ llvm::Function *Fn = nullptr;
+ llvm::Constant *FnID = nullptr;
// Check if we have any if clause associated with the directive.
const Expr *IfCond = nullptr;
@@ -2568,7 +2587,34 @@ void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
Device = C->getDevice();
}
- CGM.getOpenMPRuntime().emitTargetCall(*this, S, Fn, IfCond, Device,
+ // Check if we have an if clause whose conditional always evaluates to false
+ // or if we do not have any targets specified. If so the target region is not
+ // an offload entry point.
+ bool IsOffloadEntry = true;
+ if (IfCond) {
+ bool Val;
+ if (ConstantFoldsToSimpleInteger(IfCond, Val) && !Val)
+ IsOffloadEntry = false;
+ }
+ if (CGM.getLangOpts().OMPTargetTriples.empty())
+ IsOffloadEntry = false;
+
+ assert(CurFuncDecl && "No parent declaration for target region!");
+ StringRef ParentName;
+ // In case we have Ctors/Dtors we use the complete type variant to produce
+ // the mangling of the device outlined kernel.
+ if (auto *D = dyn_cast<CXXConstructorDecl>(CurFuncDecl))
+ ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete));
+ else if (auto *D = dyn_cast<CXXDestructorDecl>(CurFuncDecl))
+ ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete));
+ else
+ ParentName =
+ CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CurFuncDecl)));
+
+ CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID,
+ IsOffloadEntry);
+
+ CGM.getOpenMPRuntime().emitTargetCall(*this, S, Fn, FnID, IfCond, Device,
CapturedVars);
}
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index c8f3add67762..a40aab29be07 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -378,8 +378,8 @@ void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
// Apply the standard set of call attributes.
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
- CGM.ConstructAttributeList(*CurFnInfo, MD, AttributeList, CallingConv,
- /*AttrOnCallSite=*/true);
+ CGM.ConstructAttributeList(Callee->getName(), *CurFnInfo, MD, AttributeList,
+ CallingConv, /*AttrOnCallSite=*/true);
llvm::AttributeSet Attrs =
llvm::AttributeSet::get(getLLVMContext(), AttributeList);
Call->setAttributes(Attrs);
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index b4a9186462ec..b3d50352532e 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -2365,17 +2365,17 @@ private:
/// Helpers for the OpenMP loop directives.
void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
- void EmitOMPSimdInit(const OMPLoopDirective &D);
+ void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false);
void EmitOMPSimdFinal(const OMPLoopDirective &D);
/// \brief Emit code for the worksharing loop-based directive.
/// \return true, if this construct has any lastprivate clause, false -
/// otherwise.
bool EmitOMPWorksharingLoop(const OMPLoopDirective &S);
void EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
- const OMPLoopDirective &S,
- OMPPrivateScope &LoopScope, bool Ordered,
- Address LB, Address UB, Address ST,
- Address IL, llvm::Value *Chunk);
+ bool IsMonotonic, const OMPLoopDirective &S,
+ OMPPrivateScope &LoopScope, bool Ordered, Address LB,
+ Address UB, Address ST, Address IL,
+ llvm::Value *Chunk);
/// \brief Emit code for sections directive.
OpenMPDirectiveKind EmitSections(const OMPExecutableDirective &S);
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 173b0dcba1c2..536c55ae4e18 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -375,6 +375,10 @@ void CodeGenModule::Release() {
if (llvm::Function *CudaDtorFunction = CUDARuntime->makeModuleDtorFunction())
AddGlobalDtor(CudaDtorFunction);
}
+ if (OpenMPRuntime)
+ if (llvm::Function *OpenMPRegistrationFunction =
+ OpenMPRuntime->emitRegistrationFunction())
+ AddGlobalCtor(OpenMPRegistrationFunction, 0);
if (PGOReader) {
getModule().setMaximumFunctionCount(PGOReader->getMaximumFunctionCount());
if (PGOStats.hasDiagnostics())
@@ -770,7 +774,8 @@ void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
llvm::Function *F) {
unsigned CallingConv;
AttributeListType AttributeList;
- ConstructAttributeList(Info, D, AttributeList, CallingConv, false);
+ ConstructAttributeList(F->getName(), Info, D, AttributeList, CallingConv,
+ false);
F->setAttributes(llvm::AttributeSet::get(getLLVMContext(), AttributeList));
F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
}
@@ -1490,6 +1495,11 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
}
}
+ // If this is OpenMP device, check if it is legal to emit this global
+ // normally.
+ if (OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD))
+ return;
+
// Ignore declarations, they will be emitted on their first use.
if (const auto *FD = dyn_cast<FunctionDecl>(Global)) {
// Forward declarations are emitted lazily on first use.
@@ -3596,6 +3606,9 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
// File-scope asm is ignored during device-side CUDA compilation.
if (LangOpts.CUDA && LangOpts.CUDAIsDevice)
break;
+ // File-scope asm is ignored during device-side OpenMP compilation.
+ if (LangOpts.OpenMPIsDevice)
+ break;
auto *AD = cast<FileScopeAsmDecl>(D);
getModule().appendModuleInlineAsm(AD->getAsmString()->getString());
break;
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 33113837a4cf..fdb4d78b3558 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -966,13 +966,14 @@ public:
/// Get the LLVM attributes and calling convention to use for a particular
/// function type.
///
+ /// \param Name - The function name.
/// \param Info - The function type information.
/// \param CalleeInfo - The callee information these attributes are being
/// constructed for. If valid, the attributes applied to this decl may
/// contribute to the function attributes and calling convention.
/// \param PAL [out] - On return, the attribute list to use.
/// \param CallingConv [out] - On return, the LLVM calling convention to use.
- void ConstructAttributeList(const CGFunctionInfo &Info,
+ void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info,
CGCalleeInfo CalleeInfo, AttributeListType &PAL,
unsigned &CallingConv, bool AttrOnCallSite);
diff --git a/lib/CodeGen/CodeGenPGO.cpp b/lib/CodeGen/CodeGenPGO.cpp
index 38774332f31d..5ae861eab0d3 100644
--- a/lib/CodeGen/CodeGenPGO.cpp
+++ b/lib/CodeGen/CodeGenPGO.cpp
@@ -721,17 +721,7 @@ CodeGenPGO::applyFunctionAttributes(llvm::IndexedInstrProfReader *PGOReader,
if (!haveRegionCounts())
return;
- uint64_t MaxFunctionCount = PGOReader->getMaximumFunctionCount();
uint64_t FunctionCount = getRegionCount(nullptr);
- if (FunctionCount >= (uint64_t)(0.3 * (double)MaxFunctionCount))
- // Turn on InlineHint attribute for hot functions.
- // FIXME: 30% is from preliminary tuning on SPEC, it may not be optimal.
- Fn->addFnAttr(llvm::Attribute::InlineHint);
- else if (FunctionCount <= (uint64_t)(0.01 * (double)MaxFunctionCount))
- // Turn on Cold attribute for cold functions.
- // FIXME: 1% is from preliminary tuning on SPEC, it may not be optimal.
- Fn->addFnAttr(llvm::Attribute::Cold);
-
Fn->setEntryCount(FunctionCount);
}
diff --git a/lib/CodeGen/CoverageMappingGen.cpp b/lib/CodeGen/CoverageMappingGen.cpp
index eb6edeac4427..1d4d7099e194 100644
--- a/lib/CodeGen/CoverageMappingGen.cpp
+++ b/lib/CodeGen/CoverageMappingGen.cpp
@@ -993,24 +993,30 @@ void CoverageMappingModuleGen::emit() {
llvm::ArrayType::get(FunctionRecordTy, FunctionRecords.size());
auto RecordsVal = llvm::ConstantArray::get(RecordsTy, FunctionRecords);
+ llvm::Type *CovDataHeaderTypes[] = {
+#define COVMAP_HEADER(Type, LLVMType, Name, Init) LLVMType,
+#include "llvm/ProfileData/InstrProfData.inc"
+ };
+ auto CovDataHeaderTy =
+ llvm::StructType::get(Ctx, makeArrayRef(CovDataHeaderTypes));
+ llvm::Constant *CovDataHeaderVals[] = {
+#define COVMAP_HEADER(Type, LLVMType, Name, Init) Init,
+#include "llvm/ProfileData/InstrProfData.inc"
+ };
+ auto CovDataHeaderVal = llvm::ConstantStruct::get(
+ CovDataHeaderTy, makeArrayRef(CovDataHeaderVals));
+
// Create the coverage data record
- llvm::Type *CovDataTypes[] = {Int32Ty, Int32Ty,
- Int32Ty, Int32Ty,
- RecordsTy, FilenamesAndMappingsVal->getType()};
+ llvm::Type *CovDataTypes[] = {CovDataHeaderTy, RecordsTy,
+ FilenamesAndMappingsVal->getType()};
auto CovDataTy = llvm::StructType::get(Ctx, makeArrayRef(CovDataTypes));
- llvm::Constant *TUDataVals[] = {
- llvm::ConstantInt::get(Int32Ty, FunctionRecords.size()),
- llvm::ConstantInt::get(Int32Ty, FilenamesSize),
- llvm::ConstantInt::get(Int32Ty, CoverageMappingSize),
- llvm::ConstantInt::get(Int32Ty,
- /*Version=*/CoverageMappingVersion1),
- RecordsVal, FilenamesAndMappingsVal};
+ llvm::Constant *TUDataVals[] = {CovDataHeaderVal, RecordsVal,
+ FilenamesAndMappingsVal};
auto CovDataVal =
llvm::ConstantStruct::get(CovDataTy, makeArrayRef(TUDataVals));
- auto CovData = new llvm::GlobalVariable(CGM.getModule(), CovDataTy, true,
- llvm::GlobalValue::InternalLinkage,
- CovDataVal,
- llvm::getCoverageMappingVarName());
+ auto CovData = new llvm::GlobalVariable(
+ CGM.getModule(), CovDataTy, true, llvm::GlobalValue::InternalLinkage,
+ CovDataVal, llvm::getCoverageMappingVarName());
CovData->setSection(getCoverageSection(CGM));
CovData->setAlignment(8);
diff --git a/lib/Driver/Driver.cpp b/lib/Driver/Driver.cpp
index 4f8481c0beec..85bbcb4113ab 100644
--- a/lib/Driver/Driver.cpp
+++ b/lib/Driver/Driver.cpp
@@ -699,11 +699,11 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
}
void Driver::setUpResponseFiles(Compilation &C, Command &Cmd) {
- // Since argumentsFitWithinSystemLimits() may underestimate system's capacity
+ // Since commandLineFitsWithinSystemLimits() may underestimate system's capacity
// if the tool does not support response files, there is a chance/ that things
// will just work without a response file, so we silently just skip it.
if (Cmd.getCreator().getResponseFilesSupport() == Tool::RF_None ||
- llvm::sys::argumentsFitWithinSystemLimits(Cmd.getArguments()))
+ llvm::sys::commandLineFitsWithinSystemLimits(Cmd.getExecutable(), Cmd.getArguments()))
return;
std::string TmpName = GetTemporaryPath("response", "txt");
diff --git a/lib/Driver/ToolChains.cpp b/lib/Driver/ToolChains.cpp
index b02430e01763..7ece3216297f 100644
--- a/lib/Driver/ToolChains.cpp
+++ b/lib/Driver/ToolChains.cpp
@@ -2611,7 +2611,8 @@ void HexagonToolChain::getHexagonLibraryPaths(const ArgList &Args,
// Other standard paths
//----------------------------------------------------------------------------
std::vector<std::string> RootDirs;
- std::copy(D.PrefixDirs.begin(), D.PrefixDirs.end(), RootDirs.begin());
+ std::copy(D.PrefixDirs.begin(), D.PrefixDirs.end(),
+ std::back_inserter(RootDirs));
std::string TargetDir = getHexagonTargetDir(D.getInstalledDir(),
D.PrefixDirs);
diff --git a/lib/Driver/Tools.cpp b/lib/Driver/Tools.cpp
index 7a185dc0764d..84681053dd1a 100644
--- a/lib/Driver/Tools.cpp
+++ b/lib/Driver/Tools.cpp
@@ -939,7 +939,7 @@ static void getARMTargetFeatures(const ToolChain &TC,
Features.push_back("+reserve-r9");
// The kext linker doesn't know how to deal with movw/movt.
- if (KernelOrKext)
+ if (KernelOrKext || Args.hasArg(options::OPT_mno_movt))
Features.push_back("+no-movt");
}
@@ -2107,7 +2107,7 @@ static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
std::pair<StringRef, StringRef> Split = Mcpu.split("+");
CPU = Split.first;
if (CPU == "cyclone" || CPU == "cortex-a53" || CPU == "cortex-a57" ||
- CPU == "cortex-a72" || CPU == "cortex-a35") {
+ CPU == "cortex-a72" || CPU == "cortex-a35" || CPU == "exynos-m1") {
Features.push_back("+neon");
Features.push_back("+crc");
Features.push_back("+crypto");
@@ -3592,6 +3592,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (!IsWindowsMSVC)
CmdArgs.push_back("-analyzer-checker=unix");
+ // Disable some unix checkers for PS4.
+ if (IsPS4CPU) {
+ CmdArgs.push_back("-analyzer-disable-checker=unix.API");
+ CmdArgs.push_back("-analyzer-disable-checker=unix.Vfork");
+ }
+
if (getToolChain().getTriple().getVendor() == llvm::Triple::Apple)
CmdArgs.push_back("-analyzer-checker=osx");
@@ -3600,14 +3606,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (types::isCXX(Input.getType()))
CmdArgs.push_back("-analyzer-checker=cplusplus");
- // Enable the following experimental checkers for testing.
- CmdArgs.push_back(
- "-analyzer-checker=security.insecureAPI.UncheckedReturn");
- CmdArgs.push_back("-analyzer-checker=security.insecureAPI.getpw");
- CmdArgs.push_back("-analyzer-checker=security.insecureAPI.gets");
- CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mktemp");
- CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mkstemp");
- CmdArgs.push_back("-analyzer-checker=security.insecureAPI.vfork");
+ if (!IsPS4CPU) {
+ CmdArgs.push_back(
+ "-analyzer-checker=security.insecureAPI.UncheckedReturn");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.getpw");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.gets");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mktemp");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mkstemp");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.vfork");
+ }
// Default nullability checks.
CmdArgs.push_back("-analyzer-checker=nullability.NullPassedToNonnull");
@@ -4733,11 +4740,33 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
A->render(Args, CmdArgs);
}
- // -fbuiltin is default unless -mkernel is used
- if (!Args.hasFlag(options::OPT_fbuiltin, options::OPT_fno_builtin,
- !Args.hasArg(options::OPT_mkernel)))
+ // -fbuiltin is default unless -mkernel is used.
+ bool UseBuiltins =
+ Args.hasFlag(options::OPT_fbuiltin, options::OPT_fno_builtin,
+ !Args.hasArg(options::OPT_mkernel));
+ if (!UseBuiltins)
CmdArgs.push_back("-fno-builtin");
+ // -ffreestanding implies -fno-builtin.
+ if (Args.hasArg(options::OPT_ffreestanding))
+ UseBuiltins = false;
+
+ // Process the -fno-builtin-* options.
+ for (const auto &Arg : Args) {
+ const Option &O = Arg->getOption();
+ if (!O.matches(options::OPT_fno_builtin_))
+ continue;
+
+ Arg->claim();
+ // If -fno-builtin is specified, then there's no need to pass the option to
+ // the frontend.
+ if (!UseBuiltins)
+ continue;
+
+ StringRef FuncName = Arg->getValue();
+ CmdArgs.push_back(Args.MakeArgString("-fno-builtin-" + FuncName));
+ }
+
if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
options::OPT_fno_assume_sane_operator_new))
CmdArgs.push_back("-fno-assume-sane-operator-new");
@@ -6179,6 +6208,11 @@ void gcc::Compiler::RenderExtraToolArgs(const JobAction &JA,
case types::TY_LTO_BC:
CmdArgs.push_back("-c");
break;
+ // We assume we've got an "integrated" assembler in that gcc will produce an
+ // object file itself.
+ case types::TY_Object:
+ CmdArgs.push_back("-c");
+ break;
case types::TY_PP_Asm:
CmdArgs.push_back("-S");
break;
@@ -9473,6 +9507,8 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
A.renderAsInput(Args, CmdArgs);
}
+ TC.addProfileRTLibs(Args, CmdArgs);
+
// We need to special case some linker paths. In the case of lld, we need to
// translate 'lld' into 'lld-link', and in the case of the regular msvc
// linker, we need to use a special search algorithm.
diff --git a/lib/Driver/Tools.h b/lib/Driver/Tools.h
index 314315dea006..168662f7e7fe 100644
--- a/lib/Driver/Tools.h
+++ b/lib/Driver/Tools.h
@@ -149,6 +149,10 @@ public:
Common(const char *Name, const char *ShortName, const ToolChain &TC)
: GnuTool(Name, ShortName, TC) {}
+ // A gcc tool has an "integrated" assembler that it will call to produce an
+ // object. Let it use that assembler so that we don't have to deal with
+ // assembly syntax incompatibilities.
+ bool hasIntegratedAssembler() const override { return true; }
void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
const llvm::opt::ArgList &TCArgs,
diff --git a/lib/Format/ContinuationIndenter.cpp b/lib/Format/ContinuationIndenter.cpp
index 41451b91f881..8faab2869de6 100644
--- a/lib/Format/ContinuationIndenter.cpp
+++ b/lib/Format/ContinuationIndenter.cpp
@@ -38,6 +38,12 @@ static unsigned getLengthToMatchingParen(const FormatToken &Tok) {
return End->TotalLength - Tok.TotalLength + 1;
}
+static unsigned getLengthToNextOperator(const FormatToken &Tok) {
+ if (!Tok.NextOperator)
+ return 0;
+ return Tok.NextOperator->TotalLength - Tok.TotalLength;
+}
+
// Returns \c true if \c Tok is the "." or "->" of a call and starts the next
// segment of a builder type call.
static bool startsSegmentOfBuilderTypeCall(const FormatToken &Tok) {
@@ -153,7 +159,8 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
!Current.isOneOf(tok::r_paren, tok::r_brace))
return true;
if (((Previous.is(TT_DictLiteral) && Previous.is(tok::l_brace)) ||
- Previous.is(TT_ArrayInitializerLSquare)) &&
+ (Previous.is(TT_ArrayInitializerLSquare) &&
+ Previous.ParameterCount > 1)) &&
Style.ColumnLimit > 0 &&
getLengthToMatchingParen(Previous) + State.Column - 1 >
getColumnLimit(State))
@@ -170,9 +177,13 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
return true;
unsigned NewLineColumn = getNewLineColumn(State);
- if (State.Column < NewLineColumn)
+ if (State.Column <= NewLineColumn)
return false;
+ if (Current.isMemberAccess() &&
+ State.Column + getLengthToNextOperator(Current) > Style.ColumnLimit)
+ return true;
+
if (Style.AlwaysBreakBeforeMultilineStrings &&
(NewLineColumn == State.FirstIndent + Style.ContinuationIndentWidth ||
Previous.is(tok::comma) || Current.NestingLevel < 2) &&
@@ -246,8 +257,10 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
Previous.is(tok::l_brace) && !Current.isOneOf(tok::r_brace, tok::comment))
return true;
- if (Current.is(tok::lessless) && Previous.is(tok::identifier) &&
- Previous.TokenText == "endl")
+ if (Current.is(tok::lessless) &&
+ ((Previous.is(tok::identifier) && Previous.TokenText == "endl") ||
+ (Previous.Tok.isLiteral() && (Previous.TokenText.endswith("\\n\"") ||
+ Previous.TokenText == "\'\\n\'"))))
return true;
return false;
@@ -316,16 +329,16 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
if (Current.is(TT_SelectorName) &&
!State.Stack.back().ObjCSelectorNameFound) {
+ unsigned MinIndent =
+ std::max(State.FirstIndent + Style.ContinuationIndentWidth,
+ State.Stack.back().Indent);
+ unsigned FirstColonPos = State.Column + Spaces + Current.ColumnWidth;
if (Current.LongestObjCSelectorName == 0)
State.Stack.back().AlignColons = false;
- else if (State.Stack.back().Indent + Current.LongestObjCSelectorName >
- State.Column + Spaces + Current.ColumnWidth)
- State.Stack.back().ColonPos =
- std::max(State.FirstIndent + Style.ContinuationIndentWidth,
- State.Stack.back().Indent) +
- Current.LongestObjCSelectorName;
+ else if (MinIndent + Current.LongestObjCSelectorName > FirstColonPos)
+ State.Stack.back().ColonPos = MinIndent + Current.LongestObjCSelectorName;
else
- State.Stack.back().ColonPos = State.Column + Spaces + Current.ColumnWidth;
+ State.Stack.back().ColonPos = FirstColonPos;
}
// In "AlwaysBreak" mode, enforce wrapping directly after the parenthesis by
@@ -377,7 +390,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
TT_CtorInitializerColon)) &&
((Previous.getPrecedence() != prec::Assignment &&
(Previous.isNot(tok::lessless) || Previous.OperatorIndex != 0 ||
- !Previous.LastOperator)) ||
+ Previous.NextOperator)) ||
Current.StartsBinaryExpression)) {
// Always indent relative to the RHS of the expression unless this is a
// simple assignment without binary expression on the RHS. Also indent
@@ -692,7 +705,7 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
std::min(State.LowestLevelOnLine, Current.NestingLevel);
if (Current.isMemberAccess())
State.Stack.back().StartOfFunctionCall =
- Current.LastOperator ? 0 : State.Column;
+ !Current.NextOperator ? 0 : State.Column;
if (Current.is(TT_SelectorName)) {
State.Stack.back().ObjCSelectorNameFound = true;
if (Style.IndentWrappedFunctionNames) {
@@ -728,7 +741,7 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
// }, a, b, c);
if (Current.isNot(tok::comment) && Previous &&
Previous->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) &&
- State.Stack.size() > 1) {
+ !Previous->is(TT_DictLiteral) && State.Stack.size() > 1) {
if (State.Stack[State.Stack.size() - 2].NestedBlockInlined && Newline)
for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i)
State.Stack[i].NoLineBreak = true;
diff --git a/lib/Format/FormatToken.cpp b/lib/Format/FormatToken.cpp
index 63af0d6088d1..d6cd450d892e 100644
--- a/lib/Format/FormatToken.cpp
+++ b/lib/Format/FormatToken.cpp
@@ -218,10 +218,12 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
ItemBegin = ItemEnd->Next;
}
- // Don't use column layout for nested lists, lists with few elements and in
- // presence of separating comments.
- if ((Token->NestingLevel != 0 && Token->is(tok::l_brace)) ||
- Commas.size() < 5 || HasSeparatingComment)
+ // Don't use column layout for lists with few elements and in presence of
+ // separating comments.
+ if (Commas.size() < 5 || HasSeparatingComment)
+ return;
+
+ if (Token->NestingLevel != 0 && Token->is(tok::l_brace) && Commas.size() < 19)
return;
// We can never place more than ColumnLimit / 3 items in a row (because of the
diff --git a/lib/Format/FormatToken.h b/lib/Format/FormatToken.h
index 78bc0edc45c0..b683660f350a 100644
--- a/lib/Format/FormatToken.h
+++ b/lib/Format/FormatToken.h
@@ -248,9 +248,9 @@ struct FormatToken {
/// with the same precedence, contains the 0-based operator index.
unsigned OperatorIndex = 0;
- /// \brief Is this the last operator (or "."/"->") in a sequence of operators
- /// with the same precedence?
- bool LastOperator = false;
+ /// \brief If this is an operator (or "."/"->") in a sequence of operators
+ /// with the same precedence, points to the next operator.
+ FormatToken *NextOperator = nullptr;
/// \brief Is this token part of a \c DeclStmt defining multiple variables?
///
diff --git a/lib/Format/TokenAnnotator.cpp b/lib/Format/TokenAnnotator.cpp
index c3ea935e727b..caff1312f300 100644
--- a/lib/Format/TokenAnnotator.cpp
+++ b/lib/Format/TokenAnnotator.cpp
@@ -285,10 +285,13 @@ private:
Contexts.back().ContextKind == tok::l_brace &&
Parent->isOneOf(tok::l_brace, tok::comma)) {
Left->Type = TT_JsComputedPropertyName;
- } else if (Parent &&
- Parent->isOneOf(tok::at, tok::equal, tok::comma, tok::l_paren,
- tok::l_square, tok::question, tok::colon,
- tok::kw_return)) {
+ } else if (Style.Language == FormatStyle::LK_Proto ||
+ (Parent &&
+ Parent->isOneOf(TT_BinaryOperator, tok::at, tok::comma,
+ tok::l_paren, tok::l_square, tok::question,
+ tok::colon, tok::kw_return,
+ // Should only be relevant to JavaScript:
+ tok::kw_default))) {
Left->Type = TT_ArrayInitializerLSquare;
} else {
BindingIncrease = 10;
@@ -458,16 +461,16 @@ private:
break;
}
}
- if (Contexts.back().ColonIsDictLiteral) {
+ if (Contexts.back().ColonIsDictLiteral ||
+ Style.Language == FormatStyle::LK_Proto) {
Tok->Type = TT_DictLiteral;
} else if (Contexts.back().ColonIsObjCMethodExpr ||
Line.startsWith(TT_ObjCMethodSpecifier)) {
Tok->Type = TT_ObjCMethodExpr;
Tok->Previous->Type = TT_SelectorName;
if (Tok->Previous->ColumnWidth >
- Contexts.back().LongestObjCSelectorName) {
+ Contexts.back().LongestObjCSelectorName)
Contexts.back().LongestObjCSelectorName = Tok->Previous->ColumnWidth;
- }
if (!Contexts.back().FirstObjCSelectorName)
Contexts.back().FirstObjCSelectorName = Tok->Previous;
} else if (Contexts.back().ColonIsForRangeExpr) {
@@ -1327,6 +1330,8 @@ public:
} else {
// Operator found.
if (CurrentPrecedence == Precedence) {
+ if (LatestOperator)
+ LatestOperator->NextOperator = Current;
LatestOperator = Current;
Current->OperatorIndex = OperatorIndex;
++OperatorIndex;
@@ -1336,7 +1341,7 @@ public:
}
if (LatestOperator && (Current || Precedence > 0)) {
- LatestOperator->LastOperator = true;
+ // LatestOperator->LastOperator = true;
if (Precedence == PrecedenceArrowAndPeriod) {
// Call expressions don't have a binary operator precedence.
addFakeParenthesis(Start, prec::Unknown);
@@ -1715,11 +1720,13 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
Right.Next->is(TT_DictLiteral)))
return 1;
if (Right.is(tok::l_square)) {
- if (Style.Language == FormatStyle::LK_Proto || Left.is(tok::r_square))
+ if (Style.Language == FormatStyle::LK_Proto)
return 1;
+ if (Left.is(tok::r_square))
+ return 25;
// Slightly prefer formatting local lambda definitions like functions.
if (Right.is(TT_LambdaLSquare) && Left.is(tok::equal))
- return 50;
+ return 35;
if (!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
TT_ArrayInitializerLSquare))
return 500;
@@ -1766,7 +1773,15 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
// which might otherwise be blown up onto many lines. Here, clang-format
// won't produce "hanging" indents anyway as there is no other trailing
// call.
- return Right.LastOperator ? 150 : 40;
+ //
+ // Also apply higher penalty is not a call as that might lead to a wrapping
+ // like:
+ //
+ // aaaaaaa
+ // .aaaaaaaaa.bbbbbbbb(cccccccc);
+ return !Right.NextOperator || !Right.NextOperator->Previous->closesScope()
+ ? 150
+ : 35;
}
if (Right.is(TT_TrailingAnnotation) &&
@@ -1818,7 +1833,7 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
if (Right.is(tok::lessless)) {
if (Left.is(tok::string_literal) &&
- (!Right.LastOperator || Right.OperatorIndex != 1)) {
+ (Right.NextOperator || Right.OperatorIndex != 1)) {
StringRef Content = Left.TokenText;
if (Content.startswith("\""))
Content = Content.drop_front(1);
@@ -1875,6 +1890,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
tok::numeric_constant, tok::l_paren, tok::l_brace,
tok::kw_true, tok::kw_false))
return false;
+ if (Left.is(tok::colon))
+ return !Left.is(TT_ObjCMethodExpr);
if (Left.is(tok::coloncolon))
return false;
if (Left.is(tok::less) || Right.isOneOf(tok::greater, tok::less))
@@ -1925,8 +1942,6 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare) &&
!Left.isOneOf(tok::numeric_constant, TT_DictLiteral))
return false;
- if (Left.is(tok::colon))
- return !Left.is(TT_ObjCMethodExpr);
if (Left.is(tok::l_brace) && Right.is(tok::r_brace))
return !Left.Children.empty(); // No spaces in "{}".
if ((Left.is(tok::l_brace) && Left.BlockKind != BK_Block) ||
@@ -1996,6 +2011,9 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.isOneOf(Keywords.kw_let, Keywords.kw_var, TT_JsFatArrow,
Keywords.kw_in))
return true;
+ if (Left.is(tok::kw_default) && Left.Previous &&
+ Left.Previous->is(tok::kw_export))
+ return true;
if (Left.is(Keywords.kw_is) && Right.is(tok::l_brace))
return true;
if (Right.isOneOf(TT_JsTypeColon, TT_JsTypeOptionalQuestion))
diff --git a/lib/Frontend/CodeGenOptions.cpp b/lib/Frontend/CodeGenOptions.cpp
index 75ee47f86806..50bb9f951be4 100644
--- a/lib/Frontend/CodeGenOptions.cpp
+++ b/lib/Frontend/CodeGenOptions.cpp
@@ -21,4 +21,12 @@ CodeGenOptions::CodeGenOptions() {
memcpy(CoverageVersion, "402*", 4);
}
+bool CodeGenOptions::isNoBuiltinFunc(const char *Name) const {
+ StringRef FuncName(Name);
+ for (unsigned i = 0, e = NoBuiltinFuncs.size(); i != e; ++i)
+ if (FuncName.equals(NoBuiltinFuncs[i]))
+ return true;
+ return false;
+}
+
} // end namespace clang
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index d3870424b6bb..3a32f476566c 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -8,13 +8,14 @@
//===----------------------------------------------------------------------===//
#include "TestModuleFileExtension.h"
-#include "clang/Frontend/CompilerInvocation.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/Version.h"
#include "clang/Config/config.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/Util.h"
+#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/LangStandard.h"
#include "clang/Frontend/Utils.h"
@@ -135,6 +136,20 @@ static void addDiagnosticArgs(ArgList &Args, OptSpecifier Group,
}
}
+static void getAllNoBuiltinFuncValues(ArgList &Args,
+ std::vector<std::string> &Funcs) {
+ SmallVector<const char *, 8> Values;
+ for (const auto &Arg : Args) {
+ const Option &O = Arg->getOption();
+ if (O.matches(options::OPT_fno_builtin_)) {
+ const char *FuncName = Arg->getValue();
+ if (Builtin::Context::isBuiltinFunc(FuncName))
+ Values.push_back(FuncName);
+ }
+ }
+ Funcs.insert(Funcs.end(), Values.begin(), Values.end());
+}
+
static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags) {
using namespace options;
@@ -399,18 +414,29 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
if (Arg *A = Args.getLastArg(OPT_debug_info_kind_EQ)) {
- Opts.setDebugInfo(
- llvm::StringSwitch<CodeGenOptions::DebugInfoKind>(A->getValue())
+ unsigned Val =
+ llvm::StringSwitch<unsigned>(A->getValue())
.Case("line-tables-only", CodeGenOptions::DebugLineTablesOnly)
.Case("limited", CodeGenOptions::LimitedDebugInfo)
- .Case("standalone", CodeGenOptions::FullDebugInfo));
+ .Case("standalone", CodeGenOptions::FullDebugInfo)
+ .Default(~0U);
+ if (Val == ~0U)
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
+ << A->getValue();
+ else
+ Opts.setDebugInfo(static_cast<CodeGenOptions::DebugInfoKind>(Val));
}
if (Arg *A = Args.getLastArg(OPT_debugger_tuning_EQ)) {
- Opts.setDebuggerTuning(
- llvm::StringSwitch<CodeGenOptions::DebuggerKind>(A->getValue())
- .Case("gdb", CodeGenOptions::DebuggerKindGDB)
- .Case("lldb", CodeGenOptions::DebuggerKindLLDB)
- .Case("sce", CodeGenOptions::DebuggerKindSCE));
+ unsigned Val = llvm::StringSwitch<unsigned>(A->getValue())
+ .Case("gdb", CodeGenOptions::DebuggerKindGDB)
+ .Case("lldb", CodeGenOptions::DebuggerKindLLDB)
+ .Case("sce", CodeGenOptions::DebuggerKindSCE)
+ .Default(~0U);
+ if (Val == ~0U)
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
+ << A->getValue();
+ else
+ Opts.setDebuggerTuning(static_cast<CodeGenOptions::DebuggerKind>(Val));
}
Opts.DwarfVersion = getLastArgIntValue(Args, OPT_dwarf_version_EQ, 0, Diags);
Opts.DebugColumnInfo = Args.hasArg(OPT_dwarf_column_info);
@@ -441,6 +467,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.OptimizeSize = getOptimizationLevelSize(Args);
Opts.SimplifyLibCalls = !(Args.hasArg(OPT_fno_builtin) ||
Args.hasArg(OPT_ffreestanding));
+ if (Opts.SimplifyLibCalls)
+ getAllNoBuiltinFuncValues(Args, Opts.NoBuiltinFuncs);
Opts.UnrollLoops =
Args.hasFlag(OPT_funroll_loops, OPT_fno_unroll_loops,
(Opts.OptimizationLevel > 1 && !Opts.OptimizeSize));
@@ -1658,6 +1686,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.ShortEnums = Args.hasArg(OPT_fshort_enums);
Opts.Freestanding = Args.hasArg(OPT_ffreestanding);
Opts.NoBuiltin = Args.hasArg(OPT_fno_builtin) || Opts.Freestanding;
+ if (!Opts.NoBuiltin)
+ getAllNoBuiltinFuncValues(Args, Opts.NoBuiltinFuncs);
Opts.NoMathBuiltin = Args.hasArg(OPT_fno_math_builtin);
Opts.AssumeSaneOperatorNew = !Args.hasArg(OPT_fno_assume_sane_operator_new);
Opts.SizedDeallocation = Args.hasArg(OPT_fsized_deallocation);
@@ -1784,6 +1814,30 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.OpenMP = Args.hasArg(options::OPT_fopenmp);
Opts.OpenMPUseTLS =
Opts.OpenMP && !Args.hasArg(options::OPT_fnoopenmp_use_tls);
+ Opts.OpenMPIsDevice =
+ Opts.OpenMP && Args.hasArg(options::OPT_fopenmp_is_device);
+
+ // Get the OpenMP target triples if any.
+ if (Arg *A = Args.getLastArg(options::OPT_omptargets_EQ)) {
+
+ for (unsigned i = 0; i < A->getNumValues(); ++i) {
+ llvm::Triple TT(A->getValue(i));
+
+ if (TT.getArch() == llvm::Triple::UnknownArch)
+ Diags.Report(clang::diag::err_drv_invalid_omp_target) << A->getValue(i);
+ else
+ Opts.OMPTargetTriples.push_back(TT);
+ }
+ }
+
+ // Get OpenMP host file path if any and report if a non existent file is
+ // found
+ if (Arg *A = Args.getLastArg(options::OPT_omp_host_ir_file_path)) {
+ Opts.OMPHostIRFile = A->getValue();
+ if (!llvm::sys::fs::exists(Opts.OMPHostIRFile))
+ Diags.Report(clang::diag::err_drv_omp_host_ir_file_not_found)
+ << Opts.OMPHostIRFile;
+ }
// Record whether the __DEPRECATED define was requested.
Opts.Deprecated = Args.hasFlag(OPT_fdeprecated_macro,
diff --git a/lib/Headers/CMakeLists.txt b/lib/Headers/CMakeLists.txt
index 9393f69d41fa..bbe0688be650 100644
--- a/lib/Headers/CMakeLists.txt
+++ b/lib/Headers/CMakeLists.txt
@@ -12,6 +12,7 @@ set(files
avx512vlintrin.h
avx512dqintrin.h
avx512vldqintrin.h
+ pkuintrin.h
avxintrin.h
bmi2intrin.h
bmiintrin.h
diff --git a/lib/Headers/ia32intrin.h b/lib/Headers/ia32intrin.h
index 5adf3f1f5d83..b2f82bb59e38 100644
--- a/lib/Headers/ia32intrin.h
+++ b/lib/Headers/ia32intrin.h
@@ -32,50 +32,26 @@
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__readeflags(void)
{
- unsigned long long __res = 0;
- __asm__ __volatile__ ("pushf\n\t"
- "popq %0\n"
- :"=r"(__res)
- :
- :
- );
- return __res;
+ return __builtin_ia32_readeflags_u64();
}
static __inline__ void __attribute__((__always_inline__, __nodebug__))
__writeeflags(unsigned long long __f)
{
- __asm__ __volatile__ ("pushq %0\n\t"
- "popf\n"
- :
- :"r"(__f)
- :"flags"
- );
+ __builtin_ia32_writeeflags_u64(__f);
}
#else /* !__x86_64__ */
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__readeflags(void)
{
- unsigned int __res = 0;
- __asm__ __volatile__ ("pushf\n\t"
- "popl %0\n"
- :"=r"(__res)
- :
- :
- );
- return __res;
+ return __builtin_ia32_readeflags_u32();
}
static __inline__ void __attribute__((__always_inline__, __nodebug__))
__writeeflags(unsigned int __f)
{
- __asm__ __volatile__ ("pushl %0\n\t"
- "popf\n"
- :
- :"r"(__f)
- :"flags"
- );
+ __builtin_ia32_writeeflags_u32(__f);
}
#endif /* !__x86_64__ */
diff --git a/lib/Headers/immintrin.h b/lib/Headers/immintrin.h
index f3c6d1914d61..637646122653 100644
--- a/lib/Headers/immintrin.h
+++ b/lib/Headers/immintrin.h
@@ -79,6 +79,8 @@ _mm256_cvtph_ps(__m128i __a)
#include <avx512erintrin.h>
+#include <pkuintrin.h>
+
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand16_step(unsigned short *__p)
{
diff --git a/lib/Headers/pkuintrin.h b/lib/Headers/pkuintrin.h
new file mode 100644
index 000000000000..ad123481cf1e
--- /dev/null
+++ b/lib/Headers/pkuintrin.h
@@ -0,0 +1,48 @@
+/*===------------- pkuintrin.h - PKU intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <pkuintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __PKUINTRIN_H
+#define __PKUINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("pku")))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_rdpkru_u32(void)
+{
+ return __builtin_ia32_rdpkru();
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_wrpkru(unsigned int val)
+{
+ return __builtin_ia32_wrpkru(val);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/lib/Parse/ParseExpr.cpp b/lib/Parse/ParseExpr.cpp
index 490bd5ada62d..1fd98c140e0e 100644
--- a/lib/Parse/ParseExpr.cpp
+++ b/lib/Parse/ParseExpr.cpp
@@ -1334,8 +1334,23 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
return ExprError();
}
+ // Check to see whether Res is a function designator only. If it is and we
+ // are compiling for OpenCL, we need to return an error as this implies
+ // that the address of the function is being taken, which is illegal in CL.
+
// These can be followed by postfix-expr pieces.
- return ParsePostfixExpressionSuffix(Res);
+ Res = ParsePostfixExpressionSuffix(Res);
+ if (getLangOpts().OpenCL)
+ if (Expr *PostfixExpr = Res.get()) {
+ QualType Ty = PostfixExpr->getType();
+ if (!Ty.isNull() && Ty->isFunctionType()) {
+ Diag(PostfixExpr->getExprLoc(),
+ diag::err_opencl_taking_function_address_parser);
+ return ExprError();
+ }
+ }
+
+ return Res;
}
/// \brief Once the leading part of a postfix-expression is parsed, this
diff --git a/lib/Parse/ParseStmtAsm.cpp b/lib/Parse/ParseStmtAsm.cpp
index f469a064f896..142b473755de 100644
--- a/lib/Parse/ParseStmtAsm.cpp
+++ b/lib/Parse/ParseStmtAsm.cpp
@@ -241,9 +241,8 @@ ExprResult Parser::ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
ConsumeToken(); // Consume the period.
IdentifierInfo *Id = Tok.getIdentifierInfo();
ConsumeToken(); // Consume the identifier.
- unsigned OffsetUnused;
- Result = Actions.LookupInlineAsmVarDeclField(
- Result.get(), Id->getName(), OffsetUnused, Info, Tok.getLocation());
+ Result = Actions.LookupInlineAsmVarDeclField(Result.get(), Id->getName(),
+ Info, Tok.getLocation());
}
// Figure out how many tokens we are into LineToks.
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
index 59d51f7e84ca..cbdcb5e48391 100644
--- a/lib/Sema/SemaChecking.cpp
+++ b/lib/Sema/SemaChecking.cpp
@@ -6983,7 +6983,7 @@ void DiagnoseFloatingLiteralImpCast(Sema &S, FloatingLiteral *FL, QualType T,
SmallString<16> PrettyTargetValue;
if (T->isSpecificBuiltinType(BuiltinType::Bool))
- PrettyTargetValue = IntegerValue == 0 ? "false" : "true";
+ PrettyTargetValue = Value.isZero() ? "false" : "true";
else
IntegerValue.toString(PrettyTargetValue);
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
index 2c5516a48d64..f27fb2b10712 100644
--- a/lib/Sema/SemaDecl.cpp
+++ b/lib/Sema/SemaDecl.cpp
@@ -10911,12 +10911,8 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
// from the translation unit and reattach to the current context.
if (D->getLexicalDeclContext() == Context.getTranslationUnitDecl()) {
// Is the decl actually in the context?
- for (const auto *DI : Context.getTranslationUnitDecl()->decls()) {
- if (DI == D) {
- Context.getTranslationUnitDecl()->removeDecl(D);
- break;
- }
- }
+ if (Context.getTranslationUnitDecl()->containsDecl(D))
+ Context.getTranslationUnitDecl()->removeDecl(D);
// Either way, reassign the lexical decl context to our FunctionDecl.
D->setLexicalDeclContext(CurContext);
}
@@ -12281,16 +12277,35 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (!Invalid) {
// If this is a use, just return the declaration we found, unless
// we have attributes.
-
- // FIXME: In the future, return a variant or some other clue
- // for the consumer of this Decl to know it doesn't own it.
- // For our current ASTs this shouldn't be a problem, but will
- // need to be changed with DeclGroups.
- if (!Attr &&
- ((TUK == TUK_Reference &&
- (!PrevTagDecl->getFriendObjectKind() || getLangOpts().MicrosoftExt))
- || TUK == TUK_Friend))
- return PrevTagDecl;
+ if (TUK == TUK_Reference || TUK == TUK_Friend) {
+ if (Attr) {
+ // FIXME: Diagnose these attributes. For now, we create a new
+ // declaration to hold them.
+ } else if (TUK == TUK_Reference &&
+ (PrevTagDecl->getFriendObjectKind() ==
+ Decl::FOK_Undeclared ||
+ getOwningModule(PrevDecl) !=
+ PP.getModuleContainingLocation(KWLoc)) &&
+ SS.isEmpty()) {
+ // This declaration is a reference to an existing entity, but
+ // has different visibility from that entity: it either makes
+ // a friend visible or it makes a type visible in a new module.
+ // In either case, create a new declaration. We only do this if
+ // the declaration would have meant the same thing if no prior
+ // declaration were found, that is, if it was found in the same
+ // scope where we would have injected a declaration.
+ DeclContext *InjectedDC = CurContext;
+ while (!InjectedDC->isFileContext() &&
+ !InjectedDC->isFunctionOrMethod())
+ InjectedDC = InjectedDC->getParent();
+ if (!InjectedDC->getRedeclContext()->Equals(
+ PrevDecl->getDeclContext()->getRedeclContext()))
+ return PrevTagDecl;
+ // This is in the injected scope, create a new declaration.
+ } else {
+ return PrevTagDecl;
+ }
+ }
// Diagnose attempts to redefine a tag.
if (TUK == TUK_Definition) {
diff --git a/lib/Sema/SemaDeclCXX.cpp b/lib/Sema/SemaDeclCXX.cpp
index 3f6c6b00d902..02091a7bd530 100644
--- a/lib/Sema/SemaDeclCXX.cpp
+++ b/lib/Sema/SemaDeclCXX.cpp
@@ -9470,6 +9470,10 @@ static void getDefaultArgExprsForConstructors(Sema &S, CXXRecordDecl *Class) {
if (Class->getDescribedClassTemplate())
return;
+ CallingConv ExpectedCallingConv = S.Context.getDefaultCallingConvention(
+ /*IsVariadic=*/false, /*IsCXXMethod=*/true);
+
+ CXXConstructorDecl *LastExportedDefaultCtor = nullptr;
for (Decl *Member : Class->decls()) {
auto *CD = dyn_cast<CXXConstructorDecl>(Member);
if (!CD) {
@@ -9481,7 +9485,25 @@ static void getDefaultArgExprsForConstructors(Sema &S, CXXRecordDecl *Class) {
continue;
}
- for (unsigned I = 0, E = CD->getNumParams(); I != E; ++I) {
+ CallingConv ActualCallingConv =
+ CD->getType()->getAs<FunctionProtoType>()->getCallConv();
+
+ // Skip default constructors with typical calling conventions and no default
+ // arguments.
+ unsigned NumParams = CD->getNumParams();
+ if (ExpectedCallingConv == ActualCallingConv && NumParams == 0)
+ continue;
+
+ if (LastExportedDefaultCtor) {
+ S.Diag(LastExportedDefaultCtor->getLocation(),
+ diag::err_attribute_dll_ambiguous_default_ctor) << Class;
+ S.Diag(CD->getLocation(), diag::note_entity_declared_at)
+ << CD->getDeclName();
+ return;
+ }
+ LastExportedDefaultCtor = CD;
+
+ for (unsigned I = 0; I != NumParams; ++I) {
// Skip any default arguments that we've already instantiated.
if (S.Context.getDefaultArgExprForConstructor(CD, I))
continue;
diff --git a/lib/Sema/SemaExprObjC.cpp b/lib/Sema/SemaExprObjC.cpp
index 65f10816924f..57a08b94f5e8 100644
--- a/lib/Sema/SemaExprObjC.cpp
+++ b/lib/Sema/SemaExprObjC.cpp
@@ -756,9 +756,9 @@ ExprResult Sema::BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
BaseExpr = Result.get();
// Build the pseudo-object expression.
- return ObjCSubscriptRefExpr::Create(Context, BaseExpr, IndexExpr,
- Context.PseudoObjectTy, getterMethod,
- setterMethod, RB);
+ return new (Context) ObjCSubscriptRefExpr(
+ BaseExpr, IndexExpr, Context.PseudoObjectTy, VK_LValue, OK_ObjCSubscript,
+ getterMethod, setterMethod, RB);
}
ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
diff --git a/lib/Sema/SemaOpenMP.cpp b/lib/Sema/SemaOpenMP.cpp
index 5dd835498a9a..f66e2181b28f 100644
--- a/lib/Sema/SemaOpenMP.cpp
+++ b/lib/Sema/SemaOpenMP.cpp
@@ -1680,6 +1680,13 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
}
ErrorFound = true;
}
+ if (isOpenMPWorksharingDirective(DSAStack->getCurrentDirective()) &&
+ isOpenMPSimdDirective(DSAStack->getCurrentDirective()) && OC &&
+ OC->getNumForLoops()) {
+ Diag(OC->getLocStart(), diag::err_omp_ordered_simd)
+ << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
+ ErrorFound = true;
+ }
if (ErrorFound) {
ActOnCapturedRegionError();
return StmtError();
diff --git a/lib/Sema/SemaOverload.cpp b/lib/Sema/SemaOverload.cpp
index 1caa94c9a458..e0c10e4479e2 100644
--- a/lib/Sema/SemaOverload.cpp
+++ b/lib/Sema/SemaOverload.cpp
@@ -543,6 +543,12 @@ namespace {
struct DFIParamWithArguments : DFIArguments {
TemplateParameter Param;
};
+ // Structure used by DeductionFailureInfo to store template argument
+ // information and the index of the problematic call argument.
+ struct DFIDeducedMismatchArgs : DFIArguments {
+ TemplateArgumentList *TemplateArgs;
+ unsigned CallArgIndex;
+ };
}
/// \brief Convert from Sema's representation of template deduction information
@@ -554,13 +560,14 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
DeductionFailureInfo Result;
Result.Result = static_cast<unsigned>(TDK);
Result.HasDiagnostic = false;
- Result.Data = nullptr;
switch (TDK) {
case Sema::TDK_Success:
case Sema::TDK_Invalid:
case Sema::TDK_InstantiationDepth:
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
+ case Sema::TDK_MiscellaneousDeductionFailure:
+ Result.Data = nullptr;
break;
case Sema::TDK_Incomplete:
@@ -568,6 +575,17 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
Result.Data = Info.Param.getOpaqueValue();
break;
+ case Sema::TDK_DeducedMismatch: {
+ // FIXME: Should allocate from normal heap so that we can free this later.
+ auto *Saved = new (Context) DFIDeducedMismatchArgs;
+ Saved->FirstArg = Info.FirstArg;
+ Saved->SecondArg = Info.SecondArg;
+ Saved->TemplateArgs = Info.take();
+ Saved->CallArgIndex = Info.CallArgIndex;
+ Result.Data = Saved;
+ break;
+ }
+
case Sema::TDK_NonDeducedMismatch: {
// FIXME: Should allocate from normal heap so that we can free this later.
DFIArguments *Saved = new (Context) DFIArguments;
@@ -601,9 +619,6 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
case Sema::TDK_FailedOverloadResolution:
Result.Data = Info.Expression;
break;
-
- case Sema::TDK_MiscellaneousDeductionFailure:
- break;
}
return Result;
@@ -623,6 +638,7 @@ void DeductionFailureInfo::Destroy() {
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified:
+ case Sema::TDK_DeducedMismatch:
case Sema::TDK_NonDeducedMismatch:
// FIXME: Destroy the data?
Data = nullptr;
@@ -657,6 +673,7 @@ TemplateParameter DeductionFailureInfo::getTemplateParameter() {
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
case Sema::TDK_SubstitutionFailure:
+ case Sema::TDK_DeducedMismatch:
case Sema::TDK_NonDeducedMismatch:
case Sema::TDK_FailedOverloadResolution:
return TemplateParameter();
@@ -692,6 +709,9 @@ TemplateArgumentList *DeductionFailureInfo::getTemplateArgumentList() {
case Sema::TDK_FailedOverloadResolution:
return nullptr;
+ case Sema::TDK_DeducedMismatch:
+ return static_cast<DFIDeducedMismatchArgs*>(Data)->TemplateArgs;
+
case Sema::TDK_SubstitutionFailure:
return static_cast<TemplateArgumentList*>(Data);
@@ -718,6 +738,7 @@ const TemplateArgument *DeductionFailureInfo::getFirstArg() {
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified:
+ case Sema::TDK_DeducedMismatch:
case Sema::TDK_NonDeducedMismatch:
return &static_cast<DFIArguments*>(Data)->FirstArg;
@@ -744,6 +765,7 @@ const TemplateArgument *DeductionFailureInfo::getSecondArg() {
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified:
+ case Sema::TDK_DeducedMismatch:
case Sema::TDK_NonDeducedMismatch:
return &static_cast<DFIArguments*>(Data)->SecondArg;
@@ -763,6 +785,14 @@ Expr *DeductionFailureInfo::getExpr() {
return nullptr;
}
+llvm::Optional<unsigned> DeductionFailureInfo::getCallArgIndex() {
+ if (static_cast<Sema::TemplateDeductionResult>(Result) ==
+ Sema::TDK_DeducedMismatch)
+ return static_cast<DFIDeducedMismatchArgs*>(Data)->CallArgIndex;
+
+ return llvm::None;
+}
+
void OverloadCandidateSet::destroyCandidates() {
for (iterator i = begin(), e = end(); i != e; ++i) {
for (unsigned ii = 0, ie = i->NumConversions; ii != ie; ++ii)
@@ -9397,6 +9427,23 @@ static void DiagnoseBadDeduction(Sema &S, Decl *Templated,
return;
}
+ case Sema::TDK_DeducedMismatch: {
+ // Format the template argument list into the argument string.
+ SmallString<128> TemplateArgString;
+ if (TemplateArgumentList *Args =
+ DeductionFailure.getTemplateArgumentList()) {
+ TemplateArgString = " ";
+ TemplateArgString += S.getTemplateArgumentBindingsText(
+ getDescribedTemplate(Templated)->getTemplateParameters(), *Args);
+ }
+
+ S.Diag(Templated->getLocation(), diag::note_ovl_candidate_deduced_mismatch)
+ << (*DeductionFailure.getCallArgIndex() + 1)
+ << *DeductionFailure.getFirstArg() << *DeductionFailure.getSecondArg()
+ << TemplateArgString;
+ break;
+ }
+
case Sema::TDK_NonDeducedMismatch: {
// FIXME: Provide a source location to indicate what we couldn't match.
TemplateArgument FirstTA = *DeductionFailure.getFirstArg();
@@ -9686,6 +9733,7 @@ static unsigned RankDeductionFailure(const DeductionFailureInfo &DFI) {
return 2;
case Sema::TDK_SubstitutionFailure:
+ case Sema::TDK_DeducedMismatch:
case Sema::TDK_NonDeducedMismatch:
case Sema::TDK_MiscellaneousDeductionFailure:
return 3;
diff --git a/lib/Sema/SemaStmtAsm.cpp b/lib/Sema/SemaStmtAsm.cpp
index 0d6e0f8e41b0..11a4f8bfa85c 100644
--- a/lib/Sema/SemaStmtAsm.cpp
+++ b/lib/Sema/SemaStmtAsm.cpp
@@ -589,10 +589,8 @@ ExprResult Sema::LookupInlineAsmIdentifier(CXXScopeSpec &SS,
QualType T = Result.get()->getType();
- // For now, reject dependent types.
if (T->isDependentType()) {
- Diag(Id.getLocStart(), diag::err_asm_incomplete_type) << T;
- return ExprError();
+ return Result;
}
// Any sort of function type is fine.
@@ -674,12 +672,23 @@ bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
}
ExprResult
-Sema::LookupInlineAsmVarDeclField(Expr *E, StringRef Member, unsigned &Offset,
+Sema::LookupInlineAsmVarDeclField(Expr *E, StringRef Member,
llvm::InlineAsmIdentifierInfo &Info,
SourceLocation AsmLoc) {
Info.clear();
- const RecordType *RT = E->getType()->getAs<RecordType>();
+ QualType T = E->getType();
+ if (T->isDependentType()) {
+ DeclarationNameInfo NameInfo;
+ NameInfo.setLoc(AsmLoc);
+ NameInfo.setName(&Context.Idents.get(Member));
+ return CXXDependentScopeMemberExpr::Create(
+ Context, E, T, /*IsArrow=*/false, AsmLoc, NestedNameSpecifierLoc(),
+ SourceLocation(),
+ /*FirstQualifierInScope=*/nullptr, NameInfo, /*TemplateArgs=*/nullptr);
+ }
+
+ const RecordType *RT = T->getAs<RecordType>();
// FIXME: Diagnose this as field access into a scalar type.
if (!RT)
return ExprResult();
@@ -697,9 +706,6 @@ Sema::LookupInlineAsmVarDeclField(Expr *E, StringRef Member, unsigned &Offset,
if (!FD)
return ExprResult();
- Offset = (unsigned)Context.toCharUnitsFromBits(Context.getFieldOffset(FD))
- .getQuantity();
-
// Make an Expr to thread through OpDecl.
ExprResult Result = BuildMemberReferenceExpr(
E, E->getType(), AsmLoc, /*IsArrow=*/false, CXXScopeSpec(),
diff --git a/lib/Sema/SemaTemplateDeduction.cpp b/lib/Sema/SemaTemplateDeduction.cpp
index de04c8a7f042..cd54920b08cf 100644
--- a/lib/Sema/SemaTemplateDeduction.cpp
+++ b/lib/Sema/SemaTemplateDeduction.cpp
@@ -2954,8 +2954,12 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
continue;
QualType DeducedA = Specialization->getParamDecl(ParamIdx)->getType();
- if (CheckOriginalCallArgDeduction(*this, OriginalArg, DeducedA))
- return Sema::TDK_SubstitutionFailure;
+ if (CheckOriginalCallArgDeduction(*this, OriginalArg, DeducedA)) {
+ Info.FirstArg = TemplateArgument(DeducedA);
+ Info.SecondArg = TemplateArgument(OriginalArg.OriginalArgType);
+ Info.CallArgIndex = OriginalArg.ArgIdx;
+ return TDK_DeducedMismatch;
+ }
}
}
@@ -4887,19 +4891,23 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
break;
case Type::DependentTemplateSpecialization: {
+ // C++14 [temp.deduct.type]p5:
+ // The non-deduced contexts are:
+ // -- The nested-name-specifier of a type that was specified using a
+ // qualified-id
+ //
+ // C++14 [temp.deduct.type]p6:
+ // When a type name is specified in a way that includes a non-deduced
+ // context, all of the types that comprise that type name are also
+ // non-deduced.
+ if (OnlyDeduced)
+ break;
+
const DependentTemplateSpecializationType *Spec
= cast<DependentTemplateSpecializationType>(T);
- if (!OnlyDeduced)
- MarkUsedTemplateParameters(Ctx, Spec->getQualifier(),
- OnlyDeduced, Depth, Used);
- // C++0x [temp.deduct.type]p9:
- // If the template argument list of P contains a pack expansion that is not
- // the last template argument, the entire template argument list is a
- // non-deduced context.
- if (OnlyDeduced &&
- hasPackExpansionBeforeEnd(Spec->getArgs(), Spec->getNumArgs()))
- break;
+ MarkUsedTemplateParameters(Ctx, Spec->getQualifier(),
+ OnlyDeduced, Depth, Used);
for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
MarkUsedTemplateParameters(Ctx, Spec->getArg(I), OnlyDeduced, Depth,
diff --git a/lib/Serialization/ASTReader.cpp b/lib/Serialization/ASTReader.cpp
index 7d88a31f44a7..a279475eeafb 100644
--- a/lib/Serialization/ASTReader.cpp
+++ b/lib/Serialization/ASTReader.cpp
@@ -4699,6 +4699,13 @@ bool ASTReader::ParseLanguageOptions(const RecordData &Record,
}
LangOpts.CommentOpts.ParseAllComments = Record[Idx++];
+ // OpenMP offloading options.
+ for (unsigned N = Record[Idx++]; N; --N) {
+ LangOpts.OMPTargetTriples.push_back(llvm::Triple(ReadString(Record, Idx)));
+ }
+
+ LangOpts.OMPHostIRFile = ReadString(Record, Idx);
+
return Listener.ReadLanguageOptions(LangOpts, Complain,
AllowCompatibleDifferences);
}
diff --git a/lib/Serialization/ASTReaderStmt.cpp b/lib/Serialization/ASTReaderStmt.cpp
index 4082dec48c0a..bc678aff7861 100644
--- a/lib/Serialization/ASTReaderStmt.cpp
+++ b/lib/Serialization/ASTReaderStmt.cpp
@@ -987,8 +987,10 @@ void ASTStmtReader::VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
assert(NumElements == E->getNumElements() && "Wrong number of elements");
bool HasPackExpansions = Record[Idx++];
assert(HasPackExpansions == E->HasPackExpansions &&"Pack expansion mismatch");
- ObjCDictionaryLiteral::KeyValuePair *KeyValues = E->getKeyValues();
- ObjCDictionaryLiteral::ExpansionData *Expansions = E->getExpansionData();
+ ObjCDictionaryLiteral::KeyValuePair *KeyValues =
+ E->getTrailingObjects<ObjCDictionaryLiteral::KeyValuePair>();
+ ObjCDictionaryLiteral::ExpansionData *Expansions =
+ E->getTrailingObjects<ObjCDictionaryLiteral::ExpansionData>();
for (unsigned I = 0; I != NumElements; ++I) {
KeyValues[I].Key = Reader.ReadSubExpr();
KeyValues[I].Value = Reader.ReadSubExpr();
@@ -1445,7 +1447,8 @@ void ASTStmtReader::VisitExprWithCleanups(ExprWithCleanups *E) {
unsigned NumObjects = Record[Idx++];
assert(NumObjects == E->getNumObjects());
for (unsigned i = 0; i != NumObjects; ++i)
- E->getObjectsBuffer()[i] = ReadDeclAs<BlockDecl>(Record, Idx);
+ E->getTrailingObjects<BlockDecl *>()[i] =
+ ReadDeclAs<BlockDecl>(Record, Idx);
E->SubExpr = Reader.ReadSubExpr();
}
@@ -1541,7 +1544,7 @@ void ASTStmtReader::VisitTypeTraitExpr(TypeTraitExpr *E) {
E->Loc = Range.getBegin();
E->RParenLoc = Range.getEnd();
- TypeSourceInfo **Args = E->getTypeSourceInfos();
+ TypeSourceInfo **Args = E->getTrailingObjects<TypeSourceInfo *>();
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
Args[I] = GetTypeSourceInfo(Record, Idx);
}
@@ -1589,7 +1592,7 @@ void ASTStmtReader::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
E->Pack = Reader.ReadDeclAs<NamedDecl>(F, Record, Idx);
if (E->isPartiallySubstituted()) {
assert(E->Length == NumPartialArgs);
- for (auto *I = reinterpret_cast<TemplateArgument *>(E + 1),
+ for (auto *I = E->getTrailingObjects<TemplateArgument>(),
*E = I + NumPartialArgs;
I != E; ++I)
new (I) TemplateArgument(Reader.ReadTemplateArgument(F, Record, Idx));
@@ -1624,7 +1627,7 @@ void ASTStmtReader::VisitFunctionParmPackExpr(FunctionParmPackExpr *E) {
E->NumParameters = Record[Idx++];
E->ParamPack = ReadDeclAs<ParmVarDecl>(Record, Idx);
E->NameLoc = ReadSourceLocation(Record, Idx);
- ParmVarDecl **Parms = reinterpret_cast<ParmVarDecl**>(E+1);
+ ParmVarDecl **Parms = E->getTrailingObjects<ParmVarDecl *>();
for (unsigned i = 0, n = E->NumParameters; i != n; ++i)
Parms[i] = ReadDeclAs<ParmVarDecl>(Record, Idx);
}
diff --git a/lib/Serialization/ASTWriter.cpp b/lib/Serialization/ASTWriter.cpp
index 128935c5c73b..0f50d7a42eab 100644
--- a/lib/Serialization/ASTWriter.cpp
+++ b/lib/Serialization/ASTWriter.cpp
@@ -1323,6 +1323,13 @@ uint64_t ASTWriter::WriteControlBlock(Preprocessor &PP,
}
Record.push_back(LangOpts.CommentOpts.ParseAllComments);
+ // OpenMP offloading options.
+ Record.push_back(LangOpts.OMPTargetTriples.size());
+ for (auto &T : LangOpts.OMPTargetTriples)
+ AddString(T.getTriple(), Record);
+
+ AddString(LangOpts.OMPHostIRFile, Record);
+
Stream.EmitRecord(LANGUAGE_OPTIONS, Record);
// Target options.
diff --git a/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index 5dd28320f88f..f216f696ef65 100644
--- a/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -34,8 +34,7 @@ class DereferenceChecker
mutable std::unique_ptr<BuiltinBug> BT_null;
mutable std::unique_ptr<BuiltinBug> BT_undef;
- void reportBug(ProgramStateRef State, const Stmt *S, CheckerContext &C,
- bool IsBind = false) const;
+ void reportBug(ProgramStateRef State, const Stmt *S, CheckerContext &C) const;
public:
void checkLocation(SVal location, bool isLoad, const Stmt* S,
@@ -89,8 +88,31 @@ DereferenceChecker::AddDerefSource(raw_ostream &os,
}
}
+static const Expr *getDereferenceExpr(const Stmt *S, bool IsBind=false){
+ const Expr *E = nullptr;
+
+ // Walk through lvalue casts to get the original expression
+ // that syntactically caused the load.
+ if (const Expr *expr = dyn_cast<Expr>(S))
+ E = expr->IgnoreParenLValueCasts();
+
+ if (IsBind) {
+ const VarDecl *VD;
+ const Expr *Init;
+ std::tie(VD, Init) = parseAssignment(S);
+ if (VD && Init)
+ E = Init;
+ }
+ return E;
+}
+
+static bool suppressReport(const Expr *E) {
+ // Do not report dereferences on memory in non-default address spaces.
+ return E->getType().getQualifiers().hasAddressSpace();
+}
+
void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
- CheckerContext &C, bool IsBind) const {
+ CheckerContext &C) const {
// Generate an error node.
ExplodedNode *N = C.generateErrorNode(State);
if (!N)
@@ -106,19 +128,6 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
SmallVector<SourceRange, 2> Ranges;
- // Walk through lvalue casts to get the original expression
- // that syntactically caused the load.
- if (const Expr *expr = dyn_cast<Expr>(S))
- S = expr->IgnoreParenLValueCasts();
-
- if (IsBind) {
- const VarDecl *VD;
- const Expr *Init;
- std::tie(VD, Init) = parseAssignment(S);
- if (VD && Init)
- S = Init;
- }
-
switch (S->getStmtClass()) {
case Stmt::ArraySubscriptExprClass: {
os << "Array access";
@@ -209,8 +218,11 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
// The explicit NULL case.
if (nullState) {
if (!notNullState) {
- reportBug(nullState, S, C);
- return;
+ const Expr *expr = getDereferenceExpr(S);
+ if (!suppressReport(expr)) {
+ reportBug(nullState, expr, C);
+ return;
+ }
}
// Otherwise, we have the case where the location could either be
@@ -248,8 +260,11 @@ void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
if (StNull) {
if (!StNonNull) {
- reportBug(StNull, S, C, /*isBind=*/true);
- return;
+ const Expr *expr = getDereferenceExpr(S, /*IsBind=*/true);
+ if (!suppressReport(expr)) {
+ reportBug(StNull, expr, C);
+ return;
+ }
}
// At this point the value could be either null or non-null.
diff --git a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 713d9fe285a5..ce2c19409dc1 100644
--- a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -2508,6 +2508,16 @@ bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
return true;
}
+ if (FName == "postEvent" &&
+ FD->getQualifiedNameAsString() == "QCoreApplication::postEvent") {
+ return true;
+ }
+
+ if (FName == "postEvent" &&
+ FD->getQualifiedNameAsString() == "QCoreApplication::postEvent") {
+ return true;
+ }
+
// Handle cases where we know a buffer's /address/ can escape.
// Note that the above checks handle some special cases where we know that
// even though the address escapes, it's still our responsibility to free the
diff --git a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index ec1310d91814..cf1e0a6a656c 100644
--- a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -1542,6 +1542,16 @@ LikelyFalsePositiveSuppressionBRVisitor::getEndPath(BugReporterContext &BRC,
}
}
+ // The analyzer issues a false positive when the constructor of
+ // std::__independent_bits_engine from algorithms is used.
+ if (const CXXConstructorDecl *MD = dyn_cast<CXXConstructorDecl>(D)) {
+ const CXXRecordDecl *CD = MD->getParent();
+ if (CD->getName() == "__independent_bits_engine") {
+ BR.markInvalid(getTag(), nullptr);
+ return nullptr;
+ }
+ }
+
// The analyzer issues a false positive on
// std::basic_string<uint8_t> v; v.push_back(1);
// and